1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef MPLS_INTERNAL_H
3 #define MPLS_INTERNAL_H
4 #include <net/mpls.h>
5
6 /* put a reasonable limit on the number of labels
7 * we will accept from userspace
8 */
9 #define MAX_NEW_LABELS 30
10
11 struct mpls_entry_decoded {
12 u32 label;
13 u8 ttl;
14 u8 tc;
15 u8 bos;
16 };
17
18 struct mpls_pcpu_stats {
19 struct mpls_link_stats stats;
20 struct u64_stats_sync syncp;
21 };
22
23 struct mpls_dev {
24 int input_enabled;
25 struct net_device *dev;
26 struct mpls_pcpu_stats __percpu *stats;
27
28 struct ctl_table_header *sysctl;
29 struct rcu_head rcu;
30 };
31
32 #if BITS_PER_LONG == 32
33
34 #define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field) \
35 do { \
36 __typeof__(*(mdev)->stats) *ptr = \
37 raw_cpu_ptr((mdev)->stats); \
38 local_bh_disable(); \
39 u64_stats_update_begin(&ptr->syncp); \
40 ptr->stats.pkts_field++; \
41 ptr->stats.bytes_field += (len); \
42 u64_stats_update_end(&ptr->syncp); \
43 local_bh_enable(); \
44 } while (0)
45
46 #define MPLS_INC_STATS(mdev, field) \
47 do { \
48 __typeof__(*(mdev)->stats) *ptr = \
49 raw_cpu_ptr((mdev)->stats); \
50 local_bh_disable(); \
51 u64_stats_update_begin(&ptr->syncp); \
52 ptr->stats.field++; \
53 u64_stats_update_end(&ptr->syncp); \
54 local_bh_enable(); \
55 } while (0)
56
57 #else
58
59 #define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field) \
60 do { \
61 this_cpu_inc((mdev)->stats->stats.pkts_field); \
62 this_cpu_add((mdev)->stats->stats.bytes_field, (len)); \
63 } while (0)
64
65 #define MPLS_INC_STATS(mdev, field) \
66 this_cpu_inc((mdev)->stats->stats.field)
67
68 #endif
69
70 struct sk_buff;
71
72 #define LABEL_NOT_SPECIFIED (1 << 20)
73
74 /* This maximum ha length copied from the definition of struct neighbour */
75 #define VIA_ALEN_ALIGN sizeof(unsigned long)
76 #define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, VIA_ALEN_ALIGN))
77
78 enum mpls_payload_type {
79 MPT_UNSPEC, /* IPv4 or IPv6 */
80 MPT_IPV4 = 4,
81 MPT_IPV6 = 6,
82
83 /* Other types not implemented:
84 * - Pseudo-wire with or without control word (RFC4385)
85 * - GAL (RFC5586)
86 */
87 };
88
89 struct mpls_nh { /* next hop label forwarding entry */
90 struct net_device *nh_dev;
91
92 /* nh_flags is accessed under RCU in the packet path; it is
93 * modified handling netdev events with rtnl lock held
94 */
95 unsigned int nh_flags;
96 u8 nh_labels;
97 u8 nh_via_alen;
98 u8 nh_via_table;
99 u8 nh_reserved1;
100
101 u32 nh_label[];
102 };
103
104 /* offset of via from beginning of mpls_nh */
105 #define MPLS_NH_VIA_OFF(num_labels) \
106 ALIGN(sizeof(struct mpls_nh) + (num_labels) * sizeof(u32), \
107 VIA_ALEN_ALIGN)
108
109 /* all nexthops within a route have the same size based on the
110 * max number of labels and max via length across all nexthops
111 */
112 #define MPLS_NH_SIZE(num_labels, max_via_alen) \
113 (MPLS_NH_VIA_OFF((num_labels)) + \
114 ALIGN((max_via_alen), VIA_ALEN_ALIGN))
115
116 enum mpls_ttl_propagation {
117 MPLS_TTL_PROP_DEFAULT,
118 MPLS_TTL_PROP_ENABLED,
119 MPLS_TTL_PROP_DISABLED,
120 };
121
122 /* The route, nexthops and vias are stored together in the same memory
123 * block:
124 *
125 * +----------------------+
126 * | mpls_route |
127 * +----------------------+
128 * | mpls_nh 0 |
129 * +----------------------+
130 * | alignment padding | 4 bytes for odd number of labels
131 * +----------------------+
132 * | via[rt_max_alen] 0 |
133 * +----------------------+
134 * | alignment padding | via's aligned on sizeof(unsigned long)
135 * +----------------------+
136 * | ... |
137 * +----------------------+
138 * | mpls_nh n-1 |
139 * +----------------------+
140 * | via[rt_max_alen] n-1 |
141 * +----------------------+
142 */
143 struct mpls_route { /* next hop label forwarding entry */
144 struct rcu_head rt_rcu;
145 u8 rt_protocol;
146 u8 rt_payload_type;
147 u8 rt_max_alen;
148 u8 rt_ttl_propagate;
149 u8 rt_nhn;
150 /* rt_nhn_alive is accessed under RCU in the packet path; it
151 * is modified handling netdev events with rtnl lock held
152 */
153 u8 rt_nhn_alive;
154 u8 rt_nh_size;
155 u8 rt_via_offset;
156 u8 rt_reserved1;
157 struct mpls_nh rt_nh[];
158 };
159
160 #define for_nexthops(rt) { \
161 int nhsel; struct mpls_nh *nh; u8 *__nh; \
162 for (nhsel = 0, nh = (rt)->rt_nh, __nh = (u8 *)((rt)->rt_nh); \
163 nhsel < (rt)->rt_nhn; \
164 __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++)
165
166 #define change_nexthops(rt) { \
167 int nhsel; struct mpls_nh *nh; u8 *__nh; \
168 for (nhsel = 0, nh = (struct mpls_nh *)((rt)->rt_nh), \
169 __nh = (u8 *)((rt)->rt_nh); \
170 nhsel < (rt)->rt_nhn; \
171 __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++)
172
173 #define endfor_nexthops(rt) }
174
mpls_entry_decode(struct mpls_shim_hdr * hdr)175 static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr *hdr)
176 {
177 struct mpls_entry_decoded result;
178 unsigned entry = be32_to_cpu(hdr->label_stack_entry);
179
180 result.label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
181 result.ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
182 result.tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT;
183 result.bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT;
184
185 return result;
186 }
187
mpls_dev_get(const struct net_device * dev)188 static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev)
189 {
190 return rcu_dereference_rtnl(dev->mpls_ptr);
191 }
192
193 int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels,
194 const u32 label[]);
195 int nla_get_labels(const struct nlattr *nla, u8 max_labels, u8 *labels,
196 u32 label[], struct netlink_ext_ack *extack);
197 bool mpls_output_possible(const struct net_device *dev);
198 unsigned int mpls_dev_mtu(const struct net_device *dev);
199 bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu);
200 void mpls_stats_inc_outucastpkts(struct net_device *dev,
201 const struct sk_buff *skb);
202
203 #endif /* MPLS_INTERNAL_H */
204