1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * 6LoWPAN next header compression
4 *
5 * Authors:
6 * Alexander Aring <aar@pengutronix.de>
7 */
8
9 #include <linux/netdevice.h>
10
11 #include <net/ipv6.h>
12
13 #include "nhc.h"
14
15 static struct rb_root rb_root = RB_ROOT;
16 static struct lowpan_nhc *lowpan_nexthdr_nhcs[NEXTHDR_MAX + 1];
17 static DEFINE_SPINLOCK(lowpan_nhc_lock);
18
lowpan_nhc_insert(struct lowpan_nhc * nhc)19 static int lowpan_nhc_insert(struct lowpan_nhc *nhc)
20 {
21 struct rb_node **new = &rb_root.rb_node, *parent = NULL;
22
23 /* Figure out where to put new node */
24 while (*new) {
25 struct lowpan_nhc *this = rb_entry(*new, struct lowpan_nhc,
26 node);
27 int result, len_dif, len;
28
29 len_dif = nhc->idlen - this->idlen;
30
31 if (nhc->idlen < this->idlen)
32 len = nhc->idlen;
33 else
34 len = this->idlen;
35
36 result = memcmp(nhc->id, this->id, len);
37 if (!result)
38 result = len_dif;
39
40 parent = *new;
41 if (result < 0)
42 new = &((*new)->rb_left);
43 else if (result > 0)
44 new = &((*new)->rb_right);
45 else
46 return -EEXIST;
47 }
48
49 /* Add new node and rebalance tree. */
50 rb_link_node(&nhc->node, parent, new);
51 rb_insert_color(&nhc->node, &rb_root);
52
53 return 0;
54 }
55
lowpan_nhc_remove(struct lowpan_nhc * nhc)56 static void lowpan_nhc_remove(struct lowpan_nhc *nhc)
57 {
58 rb_erase(&nhc->node, &rb_root);
59 }
60
lowpan_nhc_by_nhcid(const struct sk_buff * skb)61 static struct lowpan_nhc *lowpan_nhc_by_nhcid(const struct sk_buff *skb)
62 {
63 struct rb_node *node = rb_root.rb_node;
64 const u8 *nhcid_skb_ptr = skb->data;
65
66 while (node) {
67 struct lowpan_nhc *nhc = rb_entry(node, struct lowpan_nhc,
68 node);
69 u8 nhcid_skb_ptr_masked[LOWPAN_NHC_MAX_ID_LEN];
70 int result, i;
71
72 if (nhcid_skb_ptr + nhc->idlen > skb->data + skb->len)
73 return NULL;
74
75 /* copy and mask afterwards the nhid value from skb */
76 memcpy(nhcid_skb_ptr_masked, nhcid_skb_ptr, nhc->idlen);
77 for (i = 0; i < nhc->idlen; i++)
78 nhcid_skb_ptr_masked[i] &= nhc->idmask[i];
79
80 result = memcmp(nhcid_skb_ptr_masked, nhc->id, nhc->idlen);
81 if (result < 0)
82 node = node->rb_left;
83 else if (result > 0)
84 node = node->rb_right;
85 else
86 return nhc;
87 }
88
89 return NULL;
90 }
91
lowpan_nhc_check_compression(struct sk_buff * skb,const struct ipv6hdr * hdr,u8 ** hc_ptr)92 int lowpan_nhc_check_compression(struct sk_buff *skb,
93 const struct ipv6hdr *hdr, u8 **hc_ptr)
94 {
95 struct lowpan_nhc *nhc;
96 int ret = 0;
97
98 spin_lock_bh(&lowpan_nhc_lock);
99
100 nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
101 if (!(nhc && nhc->compress))
102 ret = -ENOENT;
103
104 spin_unlock_bh(&lowpan_nhc_lock);
105
106 return ret;
107 }
108
lowpan_nhc_do_compression(struct sk_buff * skb,const struct ipv6hdr * hdr,u8 ** hc_ptr)109 int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
110 u8 **hc_ptr)
111 {
112 int ret;
113 struct lowpan_nhc *nhc;
114
115 spin_lock_bh(&lowpan_nhc_lock);
116
117 nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
118 /* check if the nhc module was removed in unlocked part.
119 * TODO: this is a workaround we should prevent unloading
120 * of nhc modules while unlocked part, this will always drop
121 * the lowpan packet but it's very unlikely.
122 *
123 * Solution isn't easy because we need to decide at
124 * lowpan_nhc_check_compression if we do a compression or not.
125 * Because the inline data which is added to skb, we can't move this
126 * handling.
127 */
128 if (unlikely(!nhc || !nhc->compress)) {
129 ret = -EINVAL;
130 goto out;
131 }
132
133 /* In the case of RAW sockets the transport header is not set by
134 * the ip6 stack so we must set it ourselves
135 */
136 if (skb->transport_header == skb->network_header)
137 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
138
139 ret = nhc->compress(skb, hc_ptr);
140 if (ret < 0)
141 goto out;
142
143 /* skip the transport header */
144 skb_pull(skb, nhc->nexthdrlen);
145
146 out:
147 spin_unlock_bh(&lowpan_nhc_lock);
148
149 return ret;
150 }
151
lowpan_nhc_do_uncompression(struct sk_buff * skb,const struct net_device * dev,struct ipv6hdr * hdr)152 int lowpan_nhc_do_uncompression(struct sk_buff *skb,
153 const struct net_device *dev,
154 struct ipv6hdr *hdr)
155 {
156 struct lowpan_nhc *nhc;
157 int ret;
158
159 spin_lock_bh(&lowpan_nhc_lock);
160
161 nhc = lowpan_nhc_by_nhcid(skb);
162 if (nhc) {
163 if (nhc->uncompress) {
164 ret = nhc->uncompress(skb, sizeof(struct ipv6hdr) +
165 nhc->nexthdrlen);
166 if (ret < 0) {
167 spin_unlock_bh(&lowpan_nhc_lock);
168 return ret;
169 }
170 } else {
171 spin_unlock_bh(&lowpan_nhc_lock);
172 netdev_warn(dev, "received nhc id for %s which is not implemented.\n",
173 nhc->name);
174 return -ENOTSUPP;
175 }
176 } else {
177 spin_unlock_bh(&lowpan_nhc_lock);
178 netdev_warn(dev, "received unknown nhc id which was not found.\n");
179 return -ENOENT;
180 }
181
182 hdr->nexthdr = nhc->nexthdr;
183 skb_reset_transport_header(skb);
184 raw_dump_table(__func__, "raw transport header dump",
185 skb_transport_header(skb), nhc->nexthdrlen);
186
187 spin_unlock_bh(&lowpan_nhc_lock);
188
189 return 0;
190 }
191
lowpan_nhc_add(struct lowpan_nhc * nhc)192 int lowpan_nhc_add(struct lowpan_nhc *nhc)
193 {
194 int ret;
195
196 if (!nhc->idlen || !nhc->idsetup)
197 return -EINVAL;
198
199 WARN_ONCE(nhc->idlen > LOWPAN_NHC_MAX_ID_LEN,
200 "LOWPAN_NHC_MAX_ID_LEN should be updated to %zd.\n",
201 nhc->idlen);
202
203 nhc->idsetup(nhc);
204
205 spin_lock_bh(&lowpan_nhc_lock);
206
207 if (lowpan_nexthdr_nhcs[nhc->nexthdr]) {
208 ret = -EEXIST;
209 goto out;
210 }
211
212 ret = lowpan_nhc_insert(nhc);
213 if (ret < 0)
214 goto out;
215
216 lowpan_nexthdr_nhcs[nhc->nexthdr] = nhc;
217 out:
218 spin_unlock_bh(&lowpan_nhc_lock);
219 return ret;
220 }
221 EXPORT_SYMBOL(lowpan_nhc_add);
222
lowpan_nhc_del(struct lowpan_nhc * nhc)223 void lowpan_nhc_del(struct lowpan_nhc *nhc)
224 {
225 spin_lock_bh(&lowpan_nhc_lock);
226
227 lowpan_nhc_remove(nhc);
228 lowpan_nexthdr_nhcs[nhc->nexthdr] = NULL;
229
230 spin_unlock_bh(&lowpan_nhc_lock);
231
232 synchronize_net();
233 }
234 EXPORT_SYMBOL(lowpan_nhc_del);
235