1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * UDP over IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 *
9 * Based on linux/ipv4/udp.c
10 *
11 * Fixes:
12 * Hideaki YOSHIFUJI : sin6_scope_id support
13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
15 * a single port at the same time.
16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
18 */
19
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
24 #include <linux/net.h>
25 #include <linux/in6.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_arp.h>
28 #include <linux/ipv6.h>
29 #include <linux/icmpv6.h>
30 #include <linux/init.h>
31 #include <linux/module.h>
32 #include <linux/skbuff.h>
33 #include <linux/slab.h>
34 #include <linux/uaccess.h>
35 #include <linux/indirect_call_wrapper.h>
36
37 #include <net/addrconf.h>
38 #include <net/ndisc.h>
39 #include <net/protocol.h>
40 #include <net/transp_v6.h>
41 #include <net/ip6_route.h>
42 #include <net/raw.h>
43 #include <net/seg6.h>
44 #include <net/tcp_states.h>
45 #include <net/ip6_checksum.h>
46 #include <net/ip6_tunnel.h>
47 #include <net/xfrm.h>
48 #include <net/inet_hashtables.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/busy_poll.h>
51 #include <net/sock_reuseport.h>
52
53 #include <linux/proc_fs.h>
54 #include <linux/seq_file.h>
55 #include <trace/events/skb.h>
56 #include "udp_impl.h"
57
udp6_ehashfn(const struct net * net,const struct in6_addr * laddr,const u16 lport,const struct in6_addr * faddr,const __be16 fport)58 static u32 udp6_ehashfn(const struct net *net,
59 const struct in6_addr *laddr,
60 const u16 lport,
61 const struct in6_addr *faddr,
62 const __be16 fport)
63 {
64 static u32 udp6_ehash_secret __read_mostly;
65 static u32 udp_ipv6_hash_secret __read_mostly;
66
67 u32 lhash, fhash;
68
69 net_get_random_once(&udp6_ehash_secret,
70 sizeof(udp6_ehash_secret));
71 net_get_random_once(&udp_ipv6_hash_secret,
72 sizeof(udp_ipv6_hash_secret));
73
74 lhash = (__force u32)laddr->s6_addr32[3];
75 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
76
77 return __inet6_ehashfn(lhash, lport, fhash, fport,
78 udp_ipv6_hash_secret + net_hash_mix(net));
79 }
80
udp_v6_get_port(struct sock * sk,unsigned short snum)81 int udp_v6_get_port(struct sock *sk, unsigned short snum)
82 {
83 unsigned int hash2_nulladdr =
84 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
85 unsigned int hash2_partial =
86 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
87
88 /* precompute partial secondary hash */
89 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
90 return udp_lib_get_port(sk, snum, hash2_nulladdr);
91 }
92
udp_v6_rehash(struct sock * sk)93 void udp_v6_rehash(struct sock *sk)
94 {
95 u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
96 &sk->sk_v6_rcv_saddr,
97 inet_sk(sk)->inet_num);
98
99 udp_lib_rehash(sk, new_hash);
100 }
101
compute_score(struct sock * sk,struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned short hnum,int dif,int sdif)102 static int compute_score(struct sock *sk, struct net *net,
103 const struct in6_addr *saddr, __be16 sport,
104 const struct in6_addr *daddr, unsigned short hnum,
105 int dif, int sdif)
106 {
107 int score;
108 struct inet_sock *inet;
109 bool dev_match;
110
111 if (!net_eq(sock_net(sk), net) ||
112 udp_sk(sk)->udp_port_hash != hnum ||
113 sk->sk_family != PF_INET6)
114 return -1;
115
116 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
117 return -1;
118
119 score = 0;
120 inet = inet_sk(sk);
121
122 if (inet->inet_dport) {
123 if (inet->inet_dport != sport)
124 return -1;
125 score++;
126 }
127
128 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
129 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
130 return -1;
131 score++;
132 }
133
134 dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
135 if (!dev_match)
136 return -1;
137 if (sk->sk_bound_dev_if)
138 score++;
139
140 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
141 score++;
142
143 return score;
144 }
145
lookup_reuseport(struct net * net,struct sock * sk,struct sk_buff * skb,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned int hnum)146 static struct sock *lookup_reuseport(struct net *net, struct sock *sk,
147 struct sk_buff *skb,
148 const struct in6_addr *saddr,
149 __be16 sport,
150 const struct in6_addr *daddr,
151 unsigned int hnum)
152 {
153 struct sock *reuse_sk = NULL;
154 u32 hash;
155
156 if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) {
157 hash = udp6_ehashfn(net, daddr, hnum, saddr, sport);
158 reuse_sk = reuseport_select_sock(sk, hash, skb,
159 sizeof(struct udphdr));
160 }
161 return reuse_sk;
162 }
163
164 /* called with rcu_read_lock() */
udp6_lib_lookup2(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned int hnum,int dif,int sdif,struct udp_hslot * hslot2,struct sk_buff * skb)165 static struct sock *udp6_lib_lookup2(struct net *net,
166 const struct in6_addr *saddr, __be16 sport,
167 const struct in6_addr *daddr, unsigned int hnum,
168 int dif, int sdif, struct udp_hslot *hslot2,
169 struct sk_buff *skb)
170 {
171 struct sock *sk, *result;
172 int score, badness;
173
174 result = NULL;
175 badness = -1;
176 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
177 score = compute_score(sk, net, saddr, sport,
178 daddr, hnum, dif, sdif);
179 if (score > badness) {
180 result = lookup_reuseport(net, sk, skb,
181 saddr, sport, daddr, hnum);
182 /* Fall back to scoring if group has connections */
183 if (result && !reuseport_has_conns(sk, false))
184 return result;
185
186 result = result ? : sk;
187 badness = score;
188 }
189 }
190 return result;
191 }
192
udp6_lookup_run_bpf(struct net * net,struct udp_table * udptable,struct sk_buff * skb,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,u16 hnum)193 static inline struct sock *udp6_lookup_run_bpf(struct net *net,
194 struct udp_table *udptable,
195 struct sk_buff *skb,
196 const struct in6_addr *saddr,
197 __be16 sport,
198 const struct in6_addr *daddr,
199 u16 hnum)
200 {
201 struct sock *sk, *reuse_sk;
202 bool no_reuseport;
203
204 if (udptable != &udp_table)
205 return NULL; /* only UDP is supported */
206
207 no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP,
208 saddr, sport, daddr, hnum, &sk);
209 if (no_reuseport || IS_ERR_OR_NULL(sk))
210 return sk;
211
212 reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
213 if (reuse_sk)
214 sk = reuse_sk;
215 return sk;
216 }
217
218 /* rcu_read_lock() must be held */
__udp6_lib_lookup(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,__be16 dport,int dif,int sdif,struct udp_table * udptable,struct sk_buff * skb)219 struct sock *__udp6_lib_lookup(struct net *net,
220 const struct in6_addr *saddr, __be16 sport,
221 const struct in6_addr *daddr, __be16 dport,
222 int dif, int sdif, struct udp_table *udptable,
223 struct sk_buff *skb)
224 {
225 unsigned short hnum = ntohs(dport);
226 unsigned int hash2, slot2;
227 struct udp_hslot *hslot2;
228 struct sock *result, *sk;
229
230 hash2 = ipv6_portaddr_hash(net, daddr, hnum);
231 slot2 = hash2 & udptable->mask;
232 hslot2 = &udptable->hash2[slot2];
233
234 /* Lookup connected or non-wildcard sockets */
235 result = udp6_lib_lookup2(net, saddr, sport,
236 daddr, hnum, dif, sdif,
237 hslot2, skb);
238 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
239 goto done;
240
241 /* Lookup redirect from BPF */
242 if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
243 sk = udp6_lookup_run_bpf(net, udptable, skb,
244 saddr, sport, daddr, hnum);
245 if (sk) {
246 result = sk;
247 goto done;
248 }
249 }
250
251 /* Got non-wildcard socket or error on first lookup */
252 if (result)
253 goto done;
254
255 /* Lookup wildcard sockets */
256 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
257 slot2 = hash2 & udptable->mask;
258 hslot2 = &udptable->hash2[slot2];
259
260 result = udp6_lib_lookup2(net, saddr, sport,
261 &in6addr_any, hnum, dif, sdif,
262 hslot2, skb);
263 done:
264 if (IS_ERR(result))
265 return NULL;
266 return result;
267 }
268 EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
269
__udp6_lib_lookup_skb(struct sk_buff * skb,__be16 sport,__be16 dport,struct udp_table * udptable)270 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
271 __be16 sport, __be16 dport,
272 struct udp_table *udptable)
273 {
274 const struct ipv6hdr *iph = ipv6_hdr(skb);
275
276 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
277 &iph->daddr, dport, inet6_iif(skb),
278 inet6_sdif(skb), udptable, skb);
279 }
280
udp6_lib_lookup_skb(const struct sk_buff * skb,__be16 sport,__be16 dport)281 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
282 __be16 sport, __be16 dport)
283 {
284 const struct ipv6hdr *iph = ipv6_hdr(skb);
285
286 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
287 &iph->daddr, dport, inet6_iif(skb),
288 inet6_sdif(skb), &udp_table, NULL);
289 }
290
291 /* Must be called under rcu_read_lock().
292 * Does increment socket refcount.
293 */
294 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
udp6_lib_lookup(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,__be16 dport,int dif)295 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
296 const struct in6_addr *daddr, __be16 dport, int dif)
297 {
298 struct sock *sk;
299
300 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport,
301 dif, 0, &udp_table, NULL);
302 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
303 sk = NULL;
304 return sk;
305 }
306 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
307 #endif
308
309 /* do not use the scratch area len for jumbogram: their length execeeds the
310 * scratch area space; note that the IP6CB flags is still in the first
311 * cacheline, so checking for jumbograms is cheap
312 */
udp6_skb_len(struct sk_buff * skb)313 static int udp6_skb_len(struct sk_buff *skb)
314 {
315 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
316 }
317
318 /*
319 * This should be easy, if there is something there we
320 * return it, otherwise we block.
321 */
322
udpv6_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int noblock,int flags,int * addr_len)323 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
324 int noblock, int flags, int *addr_len)
325 {
326 struct ipv6_pinfo *np = inet6_sk(sk);
327 struct inet_sock *inet = inet_sk(sk);
328 struct sk_buff *skb;
329 unsigned int ulen, copied;
330 int off, err, peeking = flags & MSG_PEEK;
331 int is_udplite = IS_UDPLITE(sk);
332 struct udp_mib __percpu *mib;
333 bool checksum_valid = false;
334 int is_udp4;
335
336 if (flags & MSG_ERRQUEUE)
337 return ipv6_recv_error(sk, msg, len, addr_len);
338
339 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
340 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
341
342 try_again:
343 off = sk_peek_offset(sk, flags);
344 skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
345 if (!skb)
346 return err;
347
348 ulen = udp6_skb_len(skb);
349 copied = len;
350 if (copied > ulen - off)
351 copied = ulen - off;
352 else if (copied < ulen)
353 msg->msg_flags |= MSG_TRUNC;
354
355 is_udp4 = (skb->protocol == htons(ETH_P_IP));
356 mib = __UDPX_MIB(sk, is_udp4);
357
358 /*
359 * If checksum is needed at all, try to do it while copying the
360 * data. If the data is truncated, or if we only want a partial
361 * coverage checksum (UDP-Lite), do it before the copy.
362 */
363
364 if (copied < ulen || peeking ||
365 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
366 checksum_valid = udp_skb_csum_unnecessary(skb) ||
367 !__udp_lib_checksum_complete(skb);
368 if (!checksum_valid)
369 goto csum_copy_err;
370 }
371
372 if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
373 if (udp_skb_is_linear(skb))
374 err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
375 else
376 err = skb_copy_datagram_msg(skb, off, msg, copied);
377 } else {
378 err = skb_copy_and_csum_datagram_msg(skb, off, msg);
379 if (err == -EINVAL)
380 goto csum_copy_err;
381 }
382 if (unlikely(err)) {
383 if (!peeking) {
384 atomic_inc(&sk->sk_drops);
385 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
386 }
387 kfree_skb(skb);
388 return err;
389 }
390 if (!peeking)
391 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
392
393 sock_recv_ts_and_drops(msg, sk, skb);
394
395 /* Copy the address. */
396 if (msg->msg_name) {
397 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
398 sin6->sin6_family = AF_INET6;
399 sin6->sin6_port = udp_hdr(skb)->source;
400 sin6->sin6_flowinfo = 0;
401
402 if (is_udp4) {
403 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
404 &sin6->sin6_addr);
405 sin6->sin6_scope_id = 0;
406 } else {
407 sin6->sin6_addr = ipv6_hdr(skb)->saddr;
408 sin6->sin6_scope_id =
409 ipv6_iface_scope_id(&sin6->sin6_addr,
410 inet6_iif(skb));
411 }
412 *addr_len = sizeof(*sin6);
413
414 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
415 (struct sockaddr *)sin6);
416 }
417
418 if (udp_sk(sk)->gro_enabled)
419 udp_cmsg_recv(msg, sk, skb);
420
421 if (np->rxopt.all)
422 ip6_datagram_recv_common_ctl(sk, msg, skb);
423
424 if (is_udp4) {
425 if (inet->cmsg_flags)
426 ip_cmsg_recv_offset(msg, sk, skb,
427 sizeof(struct udphdr), off);
428 } else {
429 if (np->rxopt.all)
430 ip6_datagram_recv_specific_ctl(sk, msg, skb);
431 }
432
433 err = copied;
434 if (flags & MSG_TRUNC)
435 err = ulen;
436
437 skb_consume_udp(sk, skb, peeking ? -err : err);
438 return err;
439
440 csum_copy_err:
441 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
442 udp_skb_destructor)) {
443 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
444 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
445 }
446 kfree_skb(skb);
447
448 /* starting over for a new packet, but check if we need to yield */
449 cond_resched();
450 msg->msg_flags &= ~MSG_TRUNC;
451 goto try_again;
452 }
453
454 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
udpv6_encap_enable(void)455 void udpv6_encap_enable(void)
456 {
457 static_branch_inc(&udpv6_encap_needed_key);
458 }
459 EXPORT_SYMBOL(udpv6_encap_enable);
460
461 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
462 * through error handlers in encapsulations looking for a match.
463 */
__udp6_lib_err_encap_no_sk(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)464 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
465 struct inet6_skb_parm *opt,
466 u8 type, u8 code, int offset, __be32 info)
467 {
468 int i;
469
470 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
471 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
472 u8 type, u8 code, int offset, __be32 info);
473 const struct ip6_tnl_encap_ops *encap;
474
475 encap = rcu_dereference(ip6tun_encaps[i]);
476 if (!encap)
477 continue;
478 handler = encap->err_handler;
479 if (handler && !handler(skb, opt, type, code, offset, info))
480 return 0;
481 }
482
483 return -ENOENT;
484 }
485
486 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
487 * reversing source and destination port: this will match tunnels that force the
488 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
489 * lwtunnels might actually break this assumption by being configured with
490 * different destination ports on endpoints, in this case we won't be able to
491 * trace ICMP messages back to them.
492 *
493 * If this doesn't match any socket, probe tunnels with arbitrary destination
494 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
495 * we've sent packets to won't necessarily match the local destination port.
496 *
497 * Then ask the tunnel implementation to match the error against a valid
498 * association.
499 *
500 * Return an error if we can't find a match, the socket if we need further
501 * processing, zero otherwise.
502 */
__udp6_lib_err_encap(struct net * net,const struct ipv6hdr * hdr,int offset,struct udphdr * uh,struct udp_table * udptable,struct sock * sk,struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,__be32 info)503 static struct sock *__udp6_lib_err_encap(struct net *net,
504 const struct ipv6hdr *hdr, int offset,
505 struct udphdr *uh,
506 struct udp_table *udptable,
507 struct sock *sk,
508 struct sk_buff *skb,
509 struct inet6_skb_parm *opt,
510 u8 type, u8 code, __be32 info)
511 {
512 int (*lookup)(struct sock *sk, struct sk_buff *skb);
513 int network_offset, transport_offset;
514 struct udp_sock *up;
515
516 network_offset = skb_network_offset(skb);
517 transport_offset = skb_transport_offset(skb);
518
519 /* Network header needs to point to the outer IPv6 header inside ICMP */
520 skb_reset_network_header(skb);
521
522 /* Transport header needs to point to the UDP header */
523 skb_set_transport_header(skb, offset);
524
525 if (sk) {
526 up = udp_sk(sk);
527
528 lookup = READ_ONCE(up->encap_err_lookup);
529 if (lookup && lookup(sk, skb))
530 sk = NULL;
531
532 goto out;
533 }
534
535 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
536 &hdr->saddr, uh->dest,
537 inet6_iif(skb), 0, udptable, skb);
538 if (sk) {
539 up = udp_sk(sk);
540
541 lookup = READ_ONCE(up->encap_err_lookup);
542 if (!lookup || lookup(sk, skb))
543 sk = NULL;
544 }
545
546 out:
547 if (!sk) {
548 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
549 offset, info));
550 }
551
552 skb_set_transport_header(skb, transport_offset);
553 skb_set_network_header(skb, network_offset);
554
555 return sk;
556 }
557
__udp6_lib_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info,struct udp_table * udptable)558 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
559 u8 type, u8 code, int offset, __be32 info,
560 struct udp_table *udptable)
561 {
562 struct ipv6_pinfo *np;
563 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
564 const struct in6_addr *saddr = &hdr->saddr;
565 const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
566 struct udphdr *uh = (struct udphdr *)(skb->data+offset);
567 bool tunnel = false;
568 struct sock *sk;
569 int harderr;
570 int err;
571 struct net *net = dev_net(skb->dev);
572
573 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
574 inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
575
576 if (!sk || udp_sk(sk)->encap_type) {
577 /* No socket for error: try tunnels before discarding */
578 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
579 sk = __udp6_lib_err_encap(net, hdr, offset, uh,
580 udptable, sk, skb,
581 opt, type, code, info);
582 if (!sk)
583 return 0;
584 } else
585 sk = ERR_PTR(-ENOENT);
586
587 if (IS_ERR(sk)) {
588 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
589 ICMP6_MIB_INERRORS);
590 return PTR_ERR(sk);
591 }
592
593 tunnel = true;
594 }
595
596 harderr = icmpv6_err_convert(type, code, &err);
597 np = inet6_sk(sk);
598
599 if (type == ICMPV6_PKT_TOOBIG) {
600 if (!ip6_sk_accept_pmtu(sk))
601 goto out;
602 ip6_sk_update_pmtu(skb, sk, info);
603 if (np->pmtudisc != IPV6_PMTUDISC_DONT)
604 harderr = 1;
605 }
606 if (type == NDISC_REDIRECT) {
607 if (tunnel) {
608 ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
609 sk->sk_mark, sk->sk_uid);
610 } else {
611 ip6_sk_redirect(skb, sk);
612 }
613 goto out;
614 }
615
616 /* Tunnels don't have an application socket: don't pass errors back */
617 if (tunnel)
618 goto out;
619
620 if (!np->recverr) {
621 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
622 goto out;
623 } else {
624 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
625 }
626
627 sk->sk_err = err;
628 sk_error_report(sk);
629 out:
630 return 0;
631 }
632
__udpv6_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)633 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
634 {
635 int rc;
636
637 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
638 sock_rps_save_rxhash(sk, skb);
639 sk_mark_napi_id(sk, skb);
640 sk_incoming_cpu_update(sk);
641 } else {
642 sk_mark_napi_id_once(sk, skb);
643 }
644
645 rc = __udp_enqueue_schedule_skb(sk, skb);
646 if (rc < 0) {
647 int is_udplite = IS_UDPLITE(sk);
648
649 /* Note that an ENOMEM error is charged twice */
650 if (rc == -ENOMEM)
651 UDP6_INC_STATS(sock_net(sk),
652 UDP_MIB_RCVBUFERRORS, is_udplite);
653 else
654 UDP6_INC_STATS(sock_net(sk),
655 UDP_MIB_MEMERRORS, is_udplite);
656 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
657 kfree_skb(skb);
658 return -1;
659 }
660
661 return 0;
662 }
663
udpv6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)664 static __inline__ int udpv6_err(struct sk_buff *skb,
665 struct inet6_skb_parm *opt, u8 type,
666 u8 code, int offset, __be32 info)
667 {
668 return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
669 }
670
udpv6_queue_rcv_one_skb(struct sock * sk,struct sk_buff * skb)671 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
672 {
673 struct udp_sock *up = udp_sk(sk);
674 int is_udplite = IS_UDPLITE(sk);
675
676 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
677 goto drop;
678
679 if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
680 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
681
682 /*
683 * This is an encapsulation socket so pass the skb to
684 * the socket's udp_encap_rcv() hook. Otherwise, just
685 * fall through and pass this up the UDP socket.
686 * up->encap_rcv() returns the following value:
687 * =0 if skb was successfully passed to the encap
688 * handler or was discarded by it.
689 * >0 if skb should be passed on to UDP.
690 * <0 if skb should be resubmitted as proto -N
691 */
692
693 /* if we're overly short, let UDP handle it */
694 encap_rcv = READ_ONCE(up->encap_rcv);
695 if (encap_rcv) {
696 int ret;
697
698 /* Verify checksum before giving to encap */
699 if (udp_lib_checksum_complete(skb))
700 goto csum_error;
701
702 ret = encap_rcv(sk, skb);
703 if (ret <= 0) {
704 __UDP6_INC_STATS(sock_net(sk),
705 UDP_MIB_INDATAGRAMS,
706 is_udplite);
707 return -ret;
708 }
709 }
710
711 /* FALLTHROUGH -- it's a UDP Packet */
712 }
713
714 /*
715 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
716 */
717 if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
718
719 if (up->pcrlen == 0) { /* full coverage was set */
720 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
721 UDP_SKB_CB(skb)->cscov, skb->len);
722 goto drop;
723 }
724 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
725 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
726 UDP_SKB_CB(skb)->cscov, up->pcrlen);
727 goto drop;
728 }
729 }
730
731 prefetch(&sk->sk_rmem_alloc);
732 if (rcu_access_pointer(sk->sk_filter) &&
733 udp_lib_checksum_complete(skb))
734 goto csum_error;
735
736 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
737 goto drop;
738
739 udp_csum_pull_header(skb);
740
741 skb_dst_drop(skb);
742
743 return __udpv6_queue_rcv_skb(sk, skb);
744
745 csum_error:
746 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
747 drop:
748 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
749 atomic_inc(&sk->sk_drops);
750 kfree_skb(skb);
751 return -1;
752 }
753
udpv6_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)754 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
755 {
756 struct sk_buff *next, *segs;
757 int ret;
758
759 if (likely(!udp_unexpected_gso(sk, skb)))
760 return udpv6_queue_rcv_one_skb(sk, skb);
761
762 __skb_push(skb, -skb_mac_offset(skb));
763 segs = udp_rcv_segment(sk, skb, false);
764 skb_list_walk_safe(segs, skb, next) {
765 __skb_pull(skb, skb_transport_offset(skb));
766
767 udp_post_segment_fix_csum(skb);
768 ret = udpv6_queue_rcv_one_skb(sk, skb);
769 if (ret > 0)
770 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
771 true);
772 }
773 return 0;
774 }
775
__udp_v6_is_mcast_sock(struct net * net,struct sock * sk,__be16 loc_port,const struct in6_addr * loc_addr,__be16 rmt_port,const struct in6_addr * rmt_addr,int dif,int sdif,unsigned short hnum)776 static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
777 __be16 loc_port, const struct in6_addr *loc_addr,
778 __be16 rmt_port, const struct in6_addr *rmt_addr,
779 int dif, int sdif, unsigned short hnum)
780 {
781 struct inet_sock *inet = inet_sk(sk);
782
783 if (!net_eq(sock_net(sk), net))
784 return false;
785
786 if (udp_sk(sk)->udp_port_hash != hnum ||
787 sk->sk_family != PF_INET6 ||
788 (inet->inet_dport && inet->inet_dport != rmt_port) ||
789 (!ipv6_addr_any(&sk->sk_v6_daddr) &&
790 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
791 !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) ||
792 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
793 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
794 return false;
795 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
796 return false;
797 return true;
798 }
799
udp6_csum_zero_error(struct sk_buff * skb)800 static void udp6_csum_zero_error(struct sk_buff *skb)
801 {
802 /* RFC 2460 section 8.1 says that we SHOULD log
803 * this error. Well, it is reasonable.
804 */
805 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
806 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
807 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
808 }
809
810 /*
811 * Note: called only from the BH handler context,
812 * so we don't need to lock the hashes.
813 */
__udp6_lib_mcast_deliver(struct net * net,struct sk_buff * skb,const struct in6_addr * saddr,const struct in6_addr * daddr,struct udp_table * udptable,int proto)814 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
815 const struct in6_addr *saddr, const struct in6_addr *daddr,
816 struct udp_table *udptable, int proto)
817 {
818 struct sock *sk, *first = NULL;
819 const struct udphdr *uh = udp_hdr(skb);
820 unsigned short hnum = ntohs(uh->dest);
821 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
822 unsigned int offset = offsetof(typeof(*sk), sk_node);
823 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
824 int dif = inet6_iif(skb);
825 int sdif = inet6_sdif(skb);
826 struct hlist_node *node;
827 struct sk_buff *nskb;
828
829 if (use_hash2) {
830 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
831 udptable->mask;
832 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
833 start_lookup:
834 hslot = &udptable->hash2[hash2];
835 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
836 }
837
838 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
839 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
840 uh->source, saddr, dif, sdif,
841 hnum))
842 continue;
843 /* If zero checksum and no_check is not on for
844 * the socket then skip it.
845 */
846 if (!uh->check && !udp_sk(sk)->no_check6_rx)
847 continue;
848 if (!first) {
849 first = sk;
850 continue;
851 }
852 nskb = skb_clone(skb, GFP_ATOMIC);
853 if (unlikely(!nskb)) {
854 atomic_inc(&sk->sk_drops);
855 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
856 IS_UDPLITE(sk));
857 __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
858 IS_UDPLITE(sk));
859 continue;
860 }
861
862 if (udpv6_queue_rcv_skb(sk, nskb) > 0)
863 consume_skb(nskb);
864 }
865
866 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
867 if (use_hash2 && hash2 != hash2_any) {
868 hash2 = hash2_any;
869 goto start_lookup;
870 }
871
872 if (first) {
873 if (udpv6_queue_rcv_skb(first, skb) > 0)
874 consume_skb(skb);
875 } else {
876 kfree_skb(skb);
877 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
878 proto == IPPROTO_UDPLITE);
879 }
880 return 0;
881 }
882
udp6_sk_rx_dst_set(struct sock * sk,struct dst_entry * dst)883 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
884 {
885 if (udp_sk_rx_dst_set(sk, dst)) {
886 const struct rt6_info *rt = (const struct rt6_info *)dst;
887
888 sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
889 }
890 }
891
892 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
893 * return code conversion for ip layer consumption
894 */
udp6_unicast_rcv_skb(struct sock * sk,struct sk_buff * skb,struct udphdr * uh)895 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
896 struct udphdr *uh)
897 {
898 int ret;
899
900 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
901 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
902
903 ret = udpv6_queue_rcv_skb(sk, skb);
904
905 /* a return value > 0 means to resubmit the input */
906 if (ret > 0)
907 return ret;
908 return 0;
909 }
910
__udp6_lib_rcv(struct sk_buff * skb,struct udp_table * udptable,int proto)911 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
912 int proto)
913 {
914 const struct in6_addr *saddr, *daddr;
915 struct net *net = dev_net(skb->dev);
916 struct udphdr *uh;
917 struct sock *sk;
918 bool refcounted;
919 u32 ulen = 0;
920
921 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
922 goto discard;
923
924 saddr = &ipv6_hdr(skb)->saddr;
925 daddr = &ipv6_hdr(skb)->daddr;
926 uh = udp_hdr(skb);
927
928 ulen = ntohs(uh->len);
929 if (ulen > skb->len)
930 goto short_packet;
931
932 if (proto == IPPROTO_UDP) {
933 /* UDP validates ulen. */
934
935 /* Check for jumbo payload */
936 if (ulen == 0)
937 ulen = skb->len;
938
939 if (ulen < sizeof(*uh))
940 goto short_packet;
941
942 if (ulen < skb->len) {
943 if (pskb_trim_rcsum(skb, ulen))
944 goto short_packet;
945 saddr = &ipv6_hdr(skb)->saddr;
946 daddr = &ipv6_hdr(skb)->daddr;
947 uh = udp_hdr(skb);
948 }
949 }
950
951 if (udp6_csum_init(skb, uh, proto))
952 goto csum_error;
953
954 /* Check if the socket is already available, e.g. due to early demux */
955 sk = skb_steal_sock(skb, &refcounted);
956 if (sk) {
957 struct dst_entry *dst = skb_dst(skb);
958 int ret;
959
960 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
961 udp6_sk_rx_dst_set(sk, dst);
962
963 if (!uh->check && !udp_sk(sk)->no_check6_rx) {
964 if (refcounted)
965 sock_put(sk);
966 goto report_csum_error;
967 }
968
969 ret = udp6_unicast_rcv_skb(sk, skb, uh);
970 if (refcounted)
971 sock_put(sk);
972 return ret;
973 }
974
975 /*
976 * Multicast receive code
977 */
978 if (ipv6_addr_is_multicast(daddr))
979 return __udp6_lib_mcast_deliver(net, skb,
980 saddr, daddr, udptable, proto);
981
982 /* Unicast */
983 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
984 if (sk) {
985 if (!uh->check && !udp_sk(sk)->no_check6_rx)
986 goto report_csum_error;
987 return udp6_unicast_rcv_skb(sk, skb, uh);
988 }
989
990 if (!uh->check)
991 goto report_csum_error;
992
993 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
994 goto discard;
995
996 if (udp_lib_checksum_complete(skb))
997 goto csum_error;
998
999 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1000 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1001
1002 kfree_skb(skb);
1003 return 0;
1004
1005 short_packet:
1006 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1007 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1008 saddr, ntohs(uh->source),
1009 ulen, skb->len,
1010 daddr, ntohs(uh->dest));
1011 goto discard;
1012
1013 report_csum_error:
1014 udp6_csum_zero_error(skb);
1015 csum_error:
1016 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1017 discard:
1018 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1019 kfree_skb(skb);
1020 return 0;
1021 }
1022
1023
__udp6_lib_demux_lookup(struct net * net,__be16 loc_port,const struct in6_addr * loc_addr,__be16 rmt_port,const struct in6_addr * rmt_addr,int dif,int sdif)1024 static struct sock *__udp6_lib_demux_lookup(struct net *net,
1025 __be16 loc_port, const struct in6_addr *loc_addr,
1026 __be16 rmt_port, const struct in6_addr *rmt_addr,
1027 int dif, int sdif)
1028 {
1029 unsigned short hnum = ntohs(loc_port);
1030 unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1031 unsigned int slot2 = hash2 & udp_table.mask;
1032 struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
1033 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
1034 struct sock *sk;
1035
1036 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1037 if (sk->sk_state == TCP_ESTABLISHED &&
1038 INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif, sdif))
1039 return sk;
1040 /* Only check first socket in chain */
1041 break;
1042 }
1043 return NULL;
1044 }
1045
udp_v6_early_demux(struct sk_buff * skb)1046 INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
1047 {
1048 struct net *net = dev_net(skb->dev);
1049 const struct udphdr *uh;
1050 struct sock *sk;
1051 struct dst_entry *dst;
1052 int dif = skb->dev->ifindex;
1053 int sdif = inet6_sdif(skb);
1054
1055 if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1056 sizeof(struct udphdr)))
1057 return;
1058
1059 uh = udp_hdr(skb);
1060
1061 if (skb->pkt_type == PACKET_HOST)
1062 sk = __udp6_lib_demux_lookup(net, uh->dest,
1063 &ipv6_hdr(skb)->daddr,
1064 uh->source, &ipv6_hdr(skb)->saddr,
1065 dif, sdif);
1066 else
1067 return;
1068
1069 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
1070 return;
1071
1072 skb->sk = sk;
1073 skb->destructor = sock_efree;
1074 dst = rcu_dereference(sk->sk_rx_dst);
1075
1076 if (dst)
1077 dst = dst_check(dst, sk->sk_rx_dst_cookie);
1078 if (dst) {
1079 /* set noref for now.
1080 * any place which wants to hold dst has to call
1081 * dst_hold_safe()
1082 */
1083 skb_dst_set_noref(skb, dst);
1084 }
1085 }
1086
udpv6_rcv(struct sk_buff * skb)1087 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1088 {
1089 return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
1090 }
1091
1092 /*
1093 * Throw away all pending data and cancel the corking. Socket is locked.
1094 */
udp_v6_flush_pending_frames(struct sock * sk)1095 static void udp_v6_flush_pending_frames(struct sock *sk)
1096 {
1097 struct udp_sock *up = udp_sk(sk);
1098
1099 if (up->pending == AF_INET)
1100 udp_flush_pending_frames(sk);
1101 else if (up->pending) {
1102 up->len = 0;
1103 up->pending = 0;
1104 ip6_flush_pending_frames(sk);
1105 }
1106 }
1107
udpv6_pre_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)1108 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1109 int addr_len)
1110 {
1111 if (addr_len < offsetofend(struct sockaddr, sa_family))
1112 return -EINVAL;
1113 /* The following checks are replicated from __ip6_datagram_connect()
1114 * and intended to prevent BPF program called below from accessing
1115 * bytes that are out of the bound specified by user in addr_len.
1116 */
1117 if (uaddr->sa_family == AF_INET) {
1118 if (__ipv6_only_sock(sk))
1119 return -EAFNOSUPPORT;
1120 return udp_pre_connect(sk, uaddr, addr_len);
1121 }
1122
1123 if (addr_len < SIN6_LEN_RFC2133)
1124 return -EINVAL;
1125
1126 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
1127 }
1128
1129 /**
1130 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
1131 * @sk: socket we are sending on
1132 * @skb: sk_buff containing the filled-in UDP header
1133 * (checksum field must be zeroed out)
1134 * @saddr: source address
1135 * @daddr: destination address
1136 * @len: length of packet
1137 */
udp6_hwcsum_outgoing(struct sock * sk,struct sk_buff * skb,const struct in6_addr * saddr,const struct in6_addr * daddr,int len)1138 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1139 const struct in6_addr *saddr,
1140 const struct in6_addr *daddr, int len)
1141 {
1142 unsigned int offset;
1143 struct udphdr *uh = udp_hdr(skb);
1144 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1145 __wsum csum = 0;
1146
1147 if (!frags) {
1148 /* Only one fragment on the socket. */
1149 skb->csum_start = skb_transport_header(skb) - skb->head;
1150 skb->csum_offset = offsetof(struct udphdr, check);
1151 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1152 } else {
1153 /*
1154 * HW-checksum won't work as there are two or more
1155 * fragments on the socket so that all csums of sk_buffs
1156 * should be together
1157 */
1158 offset = skb_transport_offset(skb);
1159 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1160 csum = skb->csum;
1161
1162 skb->ip_summed = CHECKSUM_NONE;
1163
1164 do {
1165 csum = csum_add(csum, frags->csum);
1166 } while ((frags = frags->next));
1167
1168 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1169 csum);
1170 if (uh->check == 0)
1171 uh->check = CSUM_MANGLED_0;
1172 }
1173 }
1174
1175 /*
1176 * Sending
1177 */
1178
udp_v6_send_skb(struct sk_buff * skb,struct flowi6 * fl6,struct inet_cork * cork)1179 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1180 struct inet_cork *cork)
1181 {
1182 struct sock *sk = skb->sk;
1183 struct udphdr *uh;
1184 int err = 0;
1185 int is_udplite = IS_UDPLITE(sk);
1186 __wsum csum = 0;
1187 int offset = skb_transport_offset(skb);
1188 int len = skb->len - offset;
1189 int datalen = len - sizeof(*uh);
1190
1191 /*
1192 * Create a UDP header
1193 */
1194 uh = udp_hdr(skb);
1195 uh->source = fl6->fl6_sport;
1196 uh->dest = fl6->fl6_dport;
1197 uh->len = htons(len);
1198 uh->check = 0;
1199
1200 if (cork->gso_size) {
1201 const int hlen = skb_network_header_len(skb) +
1202 sizeof(struct udphdr);
1203
1204 if (hlen + cork->gso_size > cork->fragsize) {
1205 kfree_skb(skb);
1206 return -EINVAL;
1207 }
1208 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1209 kfree_skb(skb);
1210 return -EINVAL;
1211 }
1212 if (udp_sk(sk)->no_check6_tx) {
1213 kfree_skb(skb);
1214 return -EINVAL;
1215 }
1216 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1217 dst_xfrm(skb_dst(skb))) {
1218 kfree_skb(skb);
1219 return -EIO;
1220 }
1221
1222 if (datalen > cork->gso_size) {
1223 skb_shinfo(skb)->gso_size = cork->gso_size;
1224 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1225 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1226 cork->gso_size);
1227 }
1228 goto csum_partial;
1229 }
1230
1231 if (is_udplite)
1232 csum = udplite_csum(skb);
1233 else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */
1234 skb->ip_summed = CHECKSUM_NONE;
1235 goto send;
1236 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1237 csum_partial:
1238 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1239 goto send;
1240 } else
1241 csum = udp_csum(skb);
1242
1243 /* add protocol-dependent pseudo-header */
1244 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1245 len, fl6->flowi6_proto, csum);
1246 if (uh->check == 0)
1247 uh->check = CSUM_MANGLED_0;
1248
1249 send:
1250 err = ip6_send_skb(skb);
1251 if (err) {
1252 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1253 UDP6_INC_STATS(sock_net(sk),
1254 UDP_MIB_SNDBUFERRORS, is_udplite);
1255 err = 0;
1256 }
1257 } else {
1258 UDP6_INC_STATS(sock_net(sk),
1259 UDP_MIB_OUTDATAGRAMS, is_udplite);
1260 }
1261 return err;
1262 }
1263
udp_v6_push_pending_frames(struct sock * sk)1264 static int udp_v6_push_pending_frames(struct sock *sk)
1265 {
1266 struct sk_buff *skb;
1267 struct udp_sock *up = udp_sk(sk);
1268 struct flowi6 fl6;
1269 int err = 0;
1270
1271 if (up->pending == AF_INET)
1272 return udp_push_pending_frames(sk);
1273
1274 /* ip6_finish_skb will release the cork, so make a copy of
1275 * fl6 here.
1276 */
1277 fl6 = inet_sk(sk)->cork.fl.u.ip6;
1278
1279 skb = ip6_finish_skb(sk);
1280 if (!skb)
1281 goto out;
1282
1283 err = udp_v6_send_skb(skb, &fl6, &inet_sk(sk)->cork.base);
1284
1285 out:
1286 up->len = 0;
1287 up->pending = 0;
1288 return err;
1289 }
1290
udpv6_sendmsg(struct sock * sk,struct msghdr * msg,size_t len)1291 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1292 {
1293 struct ipv6_txoptions opt_space;
1294 struct udp_sock *up = udp_sk(sk);
1295 struct inet_sock *inet = inet_sk(sk);
1296 struct ipv6_pinfo *np = inet6_sk(sk);
1297 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1298 struct in6_addr *daddr, *final_p, final;
1299 struct ipv6_txoptions *opt = NULL;
1300 struct ipv6_txoptions *opt_to_free = NULL;
1301 struct ip6_flowlabel *flowlabel = NULL;
1302 struct flowi6 fl6;
1303 struct dst_entry *dst;
1304 struct ipcm6_cookie ipc6;
1305 int addr_len = msg->msg_namelen;
1306 bool connected = false;
1307 int ulen = len;
1308 int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
1309 int err;
1310 int is_udplite = IS_UDPLITE(sk);
1311 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1312
1313 ipcm6_init(&ipc6);
1314 ipc6.gso_size = READ_ONCE(up->gso_size);
1315 ipc6.sockc.tsflags = sk->sk_tsflags;
1316 ipc6.sockc.mark = sk->sk_mark;
1317
1318 /* destination address check */
1319 if (sin6) {
1320 if (addr_len < offsetof(struct sockaddr, sa_data))
1321 return -EINVAL;
1322
1323 switch (sin6->sin6_family) {
1324 case AF_INET6:
1325 if (addr_len < SIN6_LEN_RFC2133)
1326 return -EINVAL;
1327 daddr = &sin6->sin6_addr;
1328 if (ipv6_addr_any(daddr) &&
1329 ipv6_addr_v4mapped(&np->saddr))
1330 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1331 daddr);
1332 break;
1333 case AF_INET:
1334 goto do_udp_sendmsg;
1335 case AF_UNSPEC:
1336 msg->msg_name = sin6 = NULL;
1337 msg->msg_namelen = addr_len = 0;
1338 daddr = NULL;
1339 break;
1340 default:
1341 return -EINVAL;
1342 }
1343 } else if (!up->pending) {
1344 if (sk->sk_state != TCP_ESTABLISHED)
1345 return -EDESTADDRREQ;
1346 daddr = &sk->sk_v6_daddr;
1347 } else
1348 daddr = NULL;
1349
1350 if (daddr) {
1351 if (ipv6_addr_v4mapped(daddr)) {
1352 struct sockaddr_in sin;
1353 sin.sin_family = AF_INET;
1354 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1355 sin.sin_addr.s_addr = daddr->s6_addr32[3];
1356 msg->msg_name = &sin;
1357 msg->msg_namelen = sizeof(sin);
1358 do_udp_sendmsg:
1359 if (__ipv6_only_sock(sk))
1360 return -ENETUNREACH;
1361 return udp_sendmsg(sk, msg, len);
1362 }
1363 }
1364
1365 if (up->pending == AF_INET)
1366 return udp_sendmsg(sk, msg, len);
1367
1368 /* Rough check on arithmetic overflow,
1369 better check is made in ip6_append_data().
1370 */
1371 if (len > INT_MAX - sizeof(struct udphdr))
1372 return -EMSGSIZE;
1373
1374 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1375 if (up->pending) {
1376 /*
1377 * There are pending frames.
1378 * The socket lock must be held while it's corked.
1379 */
1380 lock_sock(sk);
1381 if (likely(up->pending)) {
1382 if (unlikely(up->pending != AF_INET6)) {
1383 release_sock(sk);
1384 return -EAFNOSUPPORT;
1385 }
1386 dst = NULL;
1387 goto do_append_data;
1388 }
1389 release_sock(sk);
1390 }
1391 ulen += sizeof(struct udphdr);
1392
1393 memset(&fl6, 0, sizeof(fl6));
1394
1395 if (sin6) {
1396 if (sin6->sin6_port == 0)
1397 return -EINVAL;
1398
1399 fl6.fl6_dport = sin6->sin6_port;
1400 daddr = &sin6->sin6_addr;
1401
1402 if (np->sndflow) {
1403 fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1404 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1405 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1406 if (IS_ERR(flowlabel))
1407 return -EINVAL;
1408 }
1409 }
1410
1411 /*
1412 * Otherwise it will be difficult to maintain
1413 * sk->sk_dst_cache.
1414 */
1415 if (sk->sk_state == TCP_ESTABLISHED &&
1416 ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1417 daddr = &sk->sk_v6_daddr;
1418
1419 if (addr_len >= sizeof(struct sockaddr_in6) &&
1420 sin6->sin6_scope_id &&
1421 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1422 fl6.flowi6_oif = sin6->sin6_scope_id;
1423 } else {
1424 if (sk->sk_state != TCP_ESTABLISHED)
1425 return -EDESTADDRREQ;
1426
1427 fl6.fl6_dport = inet->inet_dport;
1428 daddr = &sk->sk_v6_daddr;
1429 fl6.flowlabel = np->flow_label;
1430 connected = true;
1431 }
1432
1433 if (!fl6.flowi6_oif)
1434 fl6.flowi6_oif = sk->sk_bound_dev_if;
1435
1436 if (!fl6.flowi6_oif)
1437 fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1438
1439 fl6.flowi6_uid = sk->sk_uid;
1440
1441 if (msg->msg_controllen) {
1442 opt = &opt_space;
1443 memset(opt, 0, sizeof(struct ipv6_txoptions));
1444 opt->tot_len = sizeof(*opt);
1445 ipc6.opt = opt;
1446
1447 err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1448 if (err > 0)
1449 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6,
1450 &ipc6);
1451 if (err < 0) {
1452 fl6_sock_release(flowlabel);
1453 return err;
1454 }
1455 if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1456 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1457 if (IS_ERR(flowlabel))
1458 return -EINVAL;
1459 }
1460 if (!(opt->opt_nflen|opt->opt_flen))
1461 opt = NULL;
1462 connected = false;
1463 }
1464 if (!opt) {
1465 opt = txopt_get(np);
1466 opt_to_free = opt;
1467 }
1468 if (flowlabel)
1469 opt = fl6_merge_options(&opt_space, flowlabel, opt);
1470 opt = ipv6_fixup_options(&opt_space, opt);
1471 ipc6.opt = opt;
1472
1473 fl6.flowi6_proto = sk->sk_protocol;
1474 fl6.flowi6_mark = ipc6.sockc.mark;
1475 fl6.daddr = *daddr;
1476 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
1477 fl6.saddr = np->saddr;
1478 fl6.fl6_sport = inet->inet_sport;
1479
1480 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1481 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1482 (struct sockaddr *)sin6, &fl6.saddr);
1483 if (err)
1484 goto out_no_dst;
1485 if (sin6) {
1486 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1487 /* BPF program rewrote IPv6-only by IPv4-mapped
1488 * IPv6. It's currently unsupported.
1489 */
1490 err = -ENOTSUPP;
1491 goto out_no_dst;
1492 }
1493 if (sin6->sin6_port == 0) {
1494 /* BPF program set invalid port. Reject it. */
1495 err = -EINVAL;
1496 goto out_no_dst;
1497 }
1498 fl6.fl6_dport = sin6->sin6_port;
1499 fl6.daddr = sin6->sin6_addr;
1500 }
1501 }
1502
1503 if (ipv6_addr_any(&fl6.daddr))
1504 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1505
1506 final_p = fl6_update_dst(&fl6, opt, &final);
1507 if (final_p)
1508 connected = false;
1509
1510 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
1511 fl6.flowi6_oif = np->mcast_oif;
1512 connected = false;
1513 } else if (!fl6.flowi6_oif)
1514 fl6.flowi6_oif = np->ucast_oif;
1515
1516 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
1517
1518 if (ipc6.tclass < 0)
1519 ipc6.tclass = np->tclass;
1520
1521 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
1522
1523 dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, connected);
1524 if (IS_ERR(dst)) {
1525 err = PTR_ERR(dst);
1526 dst = NULL;
1527 goto out;
1528 }
1529
1530 if (ipc6.hlimit < 0)
1531 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
1532
1533 if (msg->msg_flags&MSG_CONFIRM)
1534 goto do_confirm;
1535 back_from_confirm:
1536
1537 /* Lockless fast path for the non-corking case */
1538 if (!corkreq) {
1539 struct inet_cork_full cork;
1540 struct sk_buff *skb;
1541
1542 skb = ip6_make_skb(sk, getfrag, msg, ulen,
1543 sizeof(struct udphdr), &ipc6,
1544 &fl6, (struct rt6_info *)dst,
1545 msg->msg_flags, &cork);
1546 err = PTR_ERR(skb);
1547 if (!IS_ERR_OR_NULL(skb))
1548 err = udp_v6_send_skb(skb, &fl6, &cork.base);
1549 goto out;
1550 }
1551
1552 lock_sock(sk);
1553 if (unlikely(up->pending)) {
1554 /* The socket is already corked while preparing it. */
1555 /* ... which is an evident application bug. --ANK */
1556 release_sock(sk);
1557
1558 net_dbg_ratelimited("udp cork app bug 2\n");
1559 err = -EINVAL;
1560 goto out;
1561 }
1562
1563 up->pending = AF_INET6;
1564
1565 do_append_data:
1566 if (ipc6.dontfrag < 0)
1567 ipc6.dontfrag = np->dontfrag;
1568 up->len += ulen;
1569 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1570 &ipc6, &fl6, (struct rt6_info *)dst,
1571 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1572 if (err)
1573 udp_v6_flush_pending_frames(sk);
1574 else if (!corkreq)
1575 err = udp_v6_push_pending_frames(sk);
1576 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1577 up->pending = 0;
1578
1579 if (err > 0)
1580 err = np->recverr ? net_xmit_errno(err) : 0;
1581 release_sock(sk);
1582
1583 out:
1584 dst_release(dst);
1585 out_no_dst:
1586 fl6_sock_release(flowlabel);
1587 txopt_put(opt_to_free);
1588 if (!err)
1589 return len;
1590 /*
1591 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1592 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1593 * we don't have a good statistic (IpOutDiscards but it can be too many
1594 * things). We could add another new stat but at least for now that
1595 * seems like overkill.
1596 */
1597 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1598 UDP6_INC_STATS(sock_net(sk),
1599 UDP_MIB_SNDBUFERRORS, is_udplite);
1600 }
1601 return err;
1602
1603 do_confirm:
1604 if (msg->msg_flags & MSG_PROBE)
1605 dst_confirm_neigh(dst, &fl6.daddr);
1606 if (!(msg->msg_flags&MSG_PROBE) || len)
1607 goto back_from_confirm;
1608 err = 0;
1609 goto out;
1610 }
1611
udpv6_destroy_sock(struct sock * sk)1612 void udpv6_destroy_sock(struct sock *sk)
1613 {
1614 struct udp_sock *up = udp_sk(sk);
1615 lock_sock(sk);
1616
1617 /* protects from races with udp_abort() */
1618 sock_set_flag(sk, SOCK_DEAD);
1619 udp_v6_flush_pending_frames(sk);
1620 release_sock(sk);
1621
1622 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1623 if (up->encap_type) {
1624 void (*encap_destroy)(struct sock *sk);
1625 encap_destroy = READ_ONCE(up->encap_destroy);
1626 if (encap_destroy)
1627 encap_destroy(sk);
1628 }
1629 if (up->encap_enabled) {
1630 static_branch_dec(&udpv6_encap_needed_key);
1631 udp_encap_disable();
1632 }
1633 }
1634
1635 inet6_destroy_sock(sk);
1636 }
1637
1638 /*
1639 * Socket option code for UDP
1640 */
udpv6_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)1641 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1642 unsigned int optlen)
1643 {
1644 if (level == SOL_UDP || level == SOL_UDPLITE)
1645 return udp_lib_setsockopt(sk, level, optname,
1646 optval, optlen,
1647 udp_v6_push_pending_frames);
1648 return ipv6_setsockopt(sk, level, optname, optval, optlen);
1649 }
1650
udpv6_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)1651 int udpv6_getsockopt(struct sock *sk, int level, int optname,
1652 char __user *optval, int __user *optlen)
1653 {
1654 if (level == SOL_UDP || level == SOL_UDPLITE)
1655 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1656 return ipv6_getsockopt(sk, level, optname, optval, optlen);
1657 }
1658
1659 /* thinking of making this const? Don't.
1660 * early_demux can change based on sysctl.
1661 */
1662 static struct inet6_protocol udpv6_protocol = {
1663 .early_demux = udp_v6_early_demux,
1664 .early_demux_handler = udp_v6_early_demux,
1665 .handler = udpv6_rcv,
1666 .err_handler = udpv6_err,
1667 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1668 };
1669
1670 /* ------------------------------------------------------------------------ */
1671 #ifdef CONFIG_PROC_FS
udp6_seq_show(struct seq_file * seq,void * v)1672 int udp6_seq_show(struct seq_file *seq, void *v)
1673 {
1674 if (v == SEQ_START_TOKEN) {
1675 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1676 } else {
1677 int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1678 struct inet_sock *inet = inet_sk(v);
1679 __u16 srcp = ntohs(inet->inet_sport);
1680 __u16 destp = ntohs(inet->inet_dport);
1681 __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1682 udp_rqueue_get(v), bucket);
1683 }
1684 return 0;
1685 }
1686
1687 const struct seq_operations udp6_seq_ops = {
1688 .start = udp_seq_start,
1689 .next = udp_seq_next,
1690 .stop = udp_seq_stop,
1691 .show = udp6_seq_show,
1692 };
1693 EXPORT_SYMBOL(udp6_seq_ops);
1694
1695 static struct udp_seq_afinfo udp6_seq_afinfo = {
1696 .family = AF_INET6,
1697 .udp_table = &udp_table,
1698 };
1699
udp6_proc_init(struct net * net)1700 int __net_init udp6_proc_init(struct net *net)
1701 {
1702 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1703 sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1704 return -ENOMEM;
1705 return 0;
1706 }
1707
udp6_proc_exit(struct net * net)1708 void udp6_proc_exit(struct net *net)
1709 {
1710 remove_proc_entry("udp6", net->proc_net);
1711 }
1712 #endif /* CONFIG_PROC_FS */
1713
1714 /* ------------------------------------------------------------------------ */
1715
1716 struct proto udpv6_prot = {
1717 .name = "UDPv6",
1718 .owner = THIS_MODULE,
1719 .close = udp_lib_close,
1720 .pre_connect = udpv6_pre_connect,
1721 .connect = ip6_datagram_connect,
1722 .disconnect = udp_disconnect,
1723 .ioctl = udp_ioctl,
1724 .init = udp_init_sock,
1725 .destroy = udpv6_destroy_sock,
1726 .setsockopt = udpv6_setsockopt,
1727 .getsockopt = udpv6_getsockopt,
1728 .sendmsg = udpv6_sendmsg,
1729 .recvmsg = udpv6_recvmsg,
1730 .release_cb = ip6_datagram_release_cb,
1731 .hash = udp_lib_hash,
1732 .unhash = udp_lib_unhash,
1733 .rehash = udp_v6_rehash,
1734 .get_port = udp_v6_get_port,
1735 #ifdef CONFIG_BPF_SYSCALL
1736 .psock_update_sk_prot = udp_bpf_update_proto,
1737 #endif
1738 .memory_allocated = &udp_memory_allocated,
1739 .sysctl_mem = sysctl_udp_mem,
1740 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1741 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1742 .obj_size = sizeof(struct udp6_sock),
1743 .h.udp_table = &udp_table,
1744 .diag_destroy = udp_abort,
1745 };
1746
1747 static struct inet_protosw udpv6_protosw = {
1748 .type = SOCK_DGRAM,
1749 .protocol = IPPROTO_UDP,
1750 .prot = &udpv6_prot,
1751 .ops = &inet6_dgram_ops,
1752 .flags = INET_PROTOSW_PERMANENT,
1753 };
1754
udpv6_init(void)1755 int __init udpv6_init(void)
1756 {
1757 int ret;
1758
1759 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1760 if (ret)
1761 goto out;
1762
1763 ret = inet6_register_protosw(&udpv6_protosw);
1764 if (ret)
1765 goto out_udpv6_protocol;
1766 out:
1767 return ret;
1768
1769 out_udpv6_protocol:
1770 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1771 goto out;
1772 }
1773
udpv6_exit(void)1774 void udpv6_exit(void)
1775 {
1776 inet6_unregister_protosw(&udpv6_protosw);
1777 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1778 }
1779