1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the IP module.
8 *
9 * Version: @(#)ip.h 1.0.2 05/07/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * Alan Cox, <gw4pts@gw4pts.ampr.org>
14 *
15 * Changes:
16 * Mike McLagan : Routing by source
17 */
18 #ifndef _IP_H
19 #define _IP_H
20
21 #include <linux/types.h>
22 #include <linux/ip.h>
23 #include <linux/in.h>
24 #include <linux/skbuff.h>
25 #include <linux/jhash.h>
26 #include <linux/sockptr.h>
27 #include <linux/static_key.h>
28
29 #include <net/inet_sock.h>
30 #include <net/route.h>
31 #include <net/snmp.h>
32 #include <net/flow.h>
33 #include <net/flow_dissector.h>
34 #include <net/netns/hash.h>
35 #include <net/lwtunnel.h>
36
37 #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
38 #define IPV4_MIN_MTU 68 /* RFC 791 */
39
40 extern unsigned int sysctl_fib_sync_mem;
41 extern unsigned int sysctl_fib_sync_mem_min;
42 extern unsigned int sysctl_fib_sync_mem_max;
43
44 struct sock;
45
46 struct inet_skb_parm {
47 int iif;
48 struct ip_options opt; /* Compiled IP options */
49 u16 flags;
50
51 #define IPSKB_FORWARDED BIT(0)
52 #define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
53 #define IPSKB_XFRM_TRANSFORMED BIT(2)
54 #define IPSKB_FRAG_COMPLETE BIT(3)
55 #define IPSKB_REROUTED BIT(4)
56 #define IPSKB_DOREDIRECT BIT(5)
57 #define IPSKB_FRAG_PMTU BIT(6)
58 #define IPSKB_L3SLAVE BIT(7)
59
60 u16 frag_max_size;
61 };
62
ipv4_l3mdev_skb(u16 flags)63 static inline bool ipv4_l3mdev_skb(u16 flags)
64 {
65 return !!(flags & IPSKB_L3SLAVE);
66 }
67
ip_hdrlen(const struct sk_buff * skb)68 static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
69 {
70 return ip_hdr(skb)->ihl * 4;
71 }
72
73 struct ipcm_cookie {
74 struct sockcm_cookie sockc;
75 __be32 addr;
76 int oif;
77 struct ip_options_rcu *opt;
78 __u8 ttl;
79 __s16 tos;
80 char priority;
81 __u16 gso_size;
82 };
83
ipcm_init(struct ipcm_cookie * ipcm)84 static inline void ipcm_init(struct ipcm_cookie *ipcm)
85 {
86 *ipcm = (struct ipcm_cookie) { .tos = -1 };
87 }
88
ipcm_init_sk(struct ipcm_cookie * ipcm,const struct inet_sock * inet)89 static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
90 const struct inet_sock *inet)
91 {
92 ipcm_init(ipcm);
93
94 ipcm->sockc.mark = inet->sk.sk_mark;
95 ipcm->sockc.tsflags = inet->sk.sk_tsflags;
96 ipcm->oif = inet->sk.sk_bound_dev_if;
97 ipcm->addr = inet->inet_saddr;
98 }
99
100 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
101 #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
102
103 /* return enslaved device index if relevant */
inet_sdif(const struct sk_buff * skb)104 static inline int inet_sdif(const struct sk_buff *skb)
105 {
106 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
107 if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
108 return IPCB(skb)->iif;
109 #endif
110 return 0;
111 }
112
113 /* Special input handler for packets caught by router alert option.
114 They are selected only by protocol field, and then processed likely
115 local ones; but only if someone wants them! Otherwise, router
116 not running rsvpd will kill RSVP.
117
118 It is user level problem, what it will make with them.
119 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
120 but receiver should be enough clever f.e. to forward mtrace requests,
121 sent to multicast group to reach destination designated router.
122 */
123
124 struct ip_ra_chain {
125 struct ip_ra_chain __rcu *next;
126 struct sock *sk;
127 union {
128 void (*destructor)(struct sock *);
129 struct sock *saved_sk;
130 };
131 struct rcu_head rcu;
132 };
133
134 /* IP flags. */
135 #define IP_CE 0x8000 /* Flag: "Congestion" */
136 #define IP_DF 0x4000 /* Flag: "Don't Fragment" */
137 #define IP_MF 0x2000 /* Flag: "More Fragments" */
138 #define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
139
140 #define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
141
142 struct msghdr;
143 struct net_device;
144 struct packet_type;
145 struct rtable;
146 struct sockaddr;
147
148 int igmp_mc_init(void);
149
150 /*
151 * Functions provided by ip.c
152 */
153
154 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
155 __be32 saddr, __be32 daddr,
156 struct ip_options_rcu *opt, u8 tos);
157 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
158 struct net_device *orig_dev);
159 void ip_list_rcv(struct list_head *head, struct packet_type *pt,
160 struct net_device *orig_dev);
161 int ip_local_deliver(struct sk_buff *skb);
162 void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
163 int ip_mr_input(struct sk_buff *skb);
164 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
165 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
166 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
167 int (*output)(struct net *, struct sock *, struct sk_buff *));
168
169 struct ip_fraglist_iter {
170 struct sk_buff *frag;
171 struct iphdr *iph;
172 int offset;
173 unsigned int hlen;
174 };
175
176 void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
177 unsigned int hlen, struct ip_fraglist_iter *iter);
178 void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter);
179
ip_fraglist_next(struct ip_fraglist_iter * iter)180 static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
181 {
182 struct sk_buff *skb = iter->frag;
183
184 iter->frag = skb->next;
185 skb_mark_not_on_list(skb);
186
187 return skb;
188 }
189
190 struct ip_frag_state {
191 bool DF;
192 unsigned int hlen;
193 unsigned int ll_rs;
194 unsigned int mtu;
195 unsigned int left;
196 int offset;
197 int ptr;
198 __be16 not_last_frag;
199 };
200
201 void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
202 unsigned int mtu, bool DF, struct ip_frag_state *state);
203 struct sk_buff *ip_frag_next(struct sk_buff *skb,
204 struct ip_frag_state *state);
205
206 void ip_send_check(struct iphdr *ip);
207 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
208 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
209
210 int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
211 __u8 tos);
212 void ip_init(void);
213 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
214 int getfrag(void *from, char *to, int offset, int len,
215 int odd, struct sk_buff *skb),
216 void *from, int len, int protolen,
217 struct ipcm_cookie *ipc,
218 struct rtable **rt,
219 unsigned int flags);
220 int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
221 struct sk_buff *skb);
222 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
223 int offset, size_t size, int flags);
224 struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
225 struct sk_buff_head *queue,
226 struct inet_cork *cork);
227 int ip_send_skb(struct net *net, struct sk_buff *skb);
228 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
229 void ip_flush_pending_frames(struct sock *sk);
230 struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
231 int getfrag(void *from, char *to, int offset,
232 int len, int odd, struct sk_buff *skb),
233 void *from, int length, int transhdrlen,
234 struct ipcm_cookie *ipc, struct rtable **rtp,
235 struct inet_cork *cork, unsigned int flags);
236
237 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
238
ip_finish_skb(struct sock * sk,struct flowi4 * fl4)239 static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
240 {
241 return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
242 }
243
get_rttos(struct ipcm_cookie * ipc,struct inet_sock * inet)244 static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
245 {
246 return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
247 }
248
get_rtconn_flags(struct ipcm_cookie * ipc,struct sock * sk)249 static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
250 {
251 return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
252 }
253
254 /* datagram.c */
255 int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
256 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
257
258 void ip4_datagram_release_cb(struct sock *sk);
259
260 struct ip_reply_arg {
261 struct kvec iov[1];
262 int flags;
263 __wsum csum;
264 int csumoffset; /* u16 offset of csum in iov[0].iov_base */
265 /* -1 if not needed */
266 int bound_dev_if;
267 u8 tos;
268 kuid_t uid;
269 };
270
271 #define IP_REPLY_ARG_NOSRCCHECK 1
272
ip_reply_arg_flowi_flags(const struct ip_reply_arg * arg)273 static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
274 {
275 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
276 }
277
278 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
279 const struct ip_options *sopt,
280 __be32 daddr, __be32 saddr,
281 const struct ip_reply_arg *arg,
282 unsigned int len, u64 transmit_time);
283
284 #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
285 #define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
286 #define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
287 #define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
288 #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
289 #define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
290 #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
291 #define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field)
292 #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
293 #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
294
snmp_get_cpu_field(void __percpu * mib,int cpu,int offt)295 static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
296 {
297 return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
298 }
299
300 unsigned long snmp_fold_field(void __percpu *mib, int offt);
301 #if BITS_PER_LONG==32
302 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
303 size_t syncp_offset);
304 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
305 #else
snmp_get_cpu_field64(void __percpu * mib,int cpu,int offct,size_t syncp_offset)306 static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
307 size_t syncp_offset)
308 {
309 return snmp_get_cpu_field(mib, cpu, offct);
310
311 }
312
snmp_fold_field64(void __percpu * mib,int offt,size_t syncp_off)313 static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
314 {
315 return snmp_fold_field(mib, offt);
316 }
317 #endif
318
319 #define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
320 { \
321 int i, c; \
322 for_each_possible_cpu(c) { \
323 for (i = 0; stats_list[i].name; i++) \
324 buff64[i] += snmp_get_cpu_field64( \
325 mib_statistic, \
326 c, stats_list[i].entry, \
327 offset); \
328 } \
329 }
330
331 #define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
332 { \
333 int i, c; \
334 for_each_possible_cpu(c) { \
335 for (i = 0; stats_list[i].name; i++) \
336 buff[i] += snmp_get_cpu_field( \
337 mib_statistic, \
338 c, stats_list[i].entry); \
339 } \
340 }
341
342 void inet_get_local_port_range(struct net *net, int *low, int *high);
343
344 #ifdef CONFIG_SYSCTL
inet_is_local_reserved_port(struct net * net,unsigned short port)345 static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
346 {
347 if (!net->ipv4.sysctl_local_reserved_ports)
348 return false;
349 return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
350 }
351
sysctl_dev_name_is_allowed(const char * name)352 static inline bool sysctl_dev_name_is_allowed(const char *name)
353 {
354 return strcmp(name, "default") != 0 && strcmp(name, "all") != 0;
355 }
356
inet_port_requires_bind_service(struct net * net,unsigned short port)357 static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
358 {
359 return port < net->ipv4.sysctl_ip_prot_sock;
360 }
361
362 #else
inet_is_local_reserved_port(struct net * net,unsigned short port)363 static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
364 {
365 return false;
366 }
367
inet_port_requires_bind_service(struct net * net,unsigned short port)368 static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
369 {
370 return port < PROT_SOCK;
371 }
372 #endif
373
374 __be32 inet_current_timestamp(void);
375
376 /* From inetpeer.c */
377 extern int inet_peer_threshold;
378 extern int inet_peer_minttl;
379 extern int inet_peer_maxttl;
380
381 void ipfrag_init(void);
382
383 void ip_static_sysctl_init(void);
384
385 #define IP4_REPLY_MARK(net, mark) \
386 ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
387
ip_is_fragment(const struct iphdr * iph)388 static inline bool ip_is_fragment(const struct iphdr *iph)
389 {
390 return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
391 }
392
393 #ifdef CONFIG_INET
394 #include <net/dst.h>
395
396 /* The function in 2.2 was invalid, producing wrong result for
397 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
398 static inline
ip_decrease_ttl(struct iphdr * iph)399 int ip_decrease_ttl(struct iphdr *iph)
400 {
401 u32 check = (__force u32)iph->check;
402 check += (__force u32)htons(0x0100);
403 iph->check = (__force __sum16)(check + (check>=0xFFFF));
404 return --iph->ttl;
405 }
406
ip_mtu_locked(const struct dst_entry * dst)407 static inline int ip_mtu_locked(const struct dst_entry *dst)
408 {
409 const struct rtable *rt = (const struct rtable *)dst;
410
411 return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
412 }
413
414 static inline
ip_dont_fragment(const struct sock * sk,const struct dst_entry * dst)415 int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
416 {
417 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
418
419 return pmtudisc == IP_PMTUDISC_DO ||
420 (pmtudisc == IP_PMTUDISC_WANT &&
421 !ip_mtu_locked(dst));
422 }
423
ip_sk_accept_pmtu(const struct sock * sk)424 static inline bool ip_sk_accept_pmtu(const struct sock *sk)
425 {
426 return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
427 inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
428 }
429
ip_sk_use_pmtu(const struct sock * sk)430 static inline bool ip_sk_use_pmtu(const struct sock *sk)
431 {
432 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
433 }
434
ip_sk_ignore_df(const struct sock * sk)435 static inline bool ip_sk_ignore_df(const struct sock *sk)
436 {
437 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
438 inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
439 }
440
ip_dst_mtu_maybe_forward(const struct dst_entry * dst,bool forwarding)441 static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
442 bool forwarding)
443 {
444 const struct rtable *rt = container_of(dst, struct rtable, dst);
445 struct net *net = dev_net(dst->dev);
446 unsigned int mtu;
447
448 if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
449 ip_mtu_locked(dst) ||
450 !forwarding) {
451 mtu = rt->rt_pmtu;
452 if (mtu && time_before(jiffies, rt->dst.expires))
453 goto out;
454 }
455
456 /* 'forwarding = true' case should always honour route mtu */
457 mtu = dst_metric_raw(dst, RTAX_MTU);
458 if (mtu)
459 goto out;
460
461 mtu = READ_ONCE(dst->dev->mtu);
462
463 if (unlikely(ip_mtu_locked(dst))) {
464 if (rt->rt_uses_gateway && mtu > 576)
465 mtu = 576;
466 }
467
468 out:
469 mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
470
471 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
472 }
473
ip_skb_dst_mtu(struct sock * sk,const struct sk_buff * skb)474 static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
475 const struct sk_buff *skb)
476 {
477 unsigned int mtu;
478
479 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
480 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
481
482 return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
483 }
484
485 mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
486 return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
487 }
488
489 struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
490 int fc_mx_len,
491 struct netlink_ext_ack *extack);
ip_fib_metrics_put(struct dst_metrics * fib_metrics)492 static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
493 {
494 if (fib_metrics != &dst_default_metrics &&
495 refcount_dec_and_test(&fib_metrics->refcnt))
496 kfree(fib_metrics);
497 }
498
499 /* ipv4 and ipv6 both use refcounted metrics if it is not the default */
500 static inline
ip_dst_init_metrics(struct dst_entry * dst,struct dst_metrics * fib_metrics)501 void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics)
502 {
503 dst_init_metrics(dst, fib_metrics->metrics, true);
504
505 if (fib_metrics != &dst_default_metrics) {
506 dst->_metrics |= DST_METRICS_REFCOUNTED;
507 refcount_inc(&fib_metrics->refcnt);
508 }
509 }
510
511 static inline
ip_dst_metrics_put(struct dst_entry * dst)512 void ip_dst_metrics_put(struct dst_entry *dst)
513 {
514 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
515
516 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
517 kfree(p);
518 }
519
520 u32 ip_idents_reserve(u32 hash, int segs);
521 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
522
ip_select_ident_segs(struct net * net,struct sk_buff * skb,struct sock * sk,int segs)523 static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
524 struct sock *sk, int segs)
525 {
526 struct iphdr *iph = ip_hdr(skb);
527
528 if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
529 /* This is only to work around buggy Windows95/2000
530 * VJ compression implementations. If the ID field
531 * does not change, they drop every other packet in
532 * a TCP stream using header compression.
533 */
534 if (sk && inet_sk(sk)->inet_daddr) {
535 iph->id = htons(inet_sk(sk)->inet_id);
536 inet_sk(sk)->inet_id += segs;
537 } else {
538 iph->id = 0;
539 }
540 } else {
541 __ip_select_ident(net, iph, segs);
542 }
543 }
544
ip_select_ident(struct net * net,struct sk_buff * skb,struct sock * sk)545 static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
546 struct sock *sk)
547 {
548 ip_select_ident_segs(net, skb, sk, 1);
549 }
550
inet_compute_pseudo(struct sk_buff * skb,int proto)551 static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
552 {
553 return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
554 skb->len, proto, 0);
555 }
556
557 /* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
558 * Equivalent to : flow->v4addrs.src = iph->saddr;
559 * flow->v4addrs.dst = iph->daddr;
560 */
iph_to_flow_copy_v4addrs(struct flow_keys * flow,const struct iphdr * iph)561 static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
562 const struct iphdr *iph)
563 {
564 BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
565 offsetof(typeof(flow->addrs), v4addrs.src) +
566 sizeof(flow->addrs.v4addrs.src));
567 memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
568 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
569 }
570
inet_gro_compute_pseudo(struct sk_buff * skb,int proto)571 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
572 {
573 const struct iphdr *iph = skb_gro_network_header(skb);
574
575 return csum_tcpudp_nofold(iph->saddr, iph->daddr,
576 skb_gro_len(skb), proto, 0);
577 }
578
579 /*
580 * Map a multicast IP onto multicast MAC for type ethernet.
581 */
582
ip_eth_mc_map(__be32 naddr,char * buf)583 static inline void ip_eth_mc_map(__be32 naddr, char *buf)
584 {
585 __u32 addr=ntohl(naddr);
586 buf[0]=0x01;
587 buf[1]=0x00;
588 buf[2]=0x5e;
589 buf[5]=addr&0xFF;
590 addr>>=8;
591 buf[4]=addr&0xFF;
592 addr>>=8;
593 buf[3]=addr&0x7F;
594 }
595
596 /*
597 * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
598 * Leave P_Key as 0 to be filled in by driver.
599 */
600
ip_ib_mc_map(__be32 naddr,const unsigned char * broadcast,char * buf)601 static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
602 {
603 __u32 addr;
604 unsigned char scope = broadcast[5] & 0xF;
605
606 buf[0] = 0; /* Reserved */
607 buf[1] = 0xff; /* Multicast QPN */
608 buf[2] = 0xff;
609 buf[3] = 0xff;
610 addr = ntohl(naddr);
611 buf[4] = 0xff;
612 buf[5] = 0x10 | scope; /* scope from broadcast address */
613 buf[6] = 0x40; /* IPv4 signature */
614 buf[7] = 0x1b;
615 buf[8] = broadcast[8]; /* P_Key */
616 buf[9] = broadcast[9];
617 buf[10] = 0;
618 buf[11] = 0;
619 buf[12] = 0;
620 buf[13] = 0;
621 buf[14] = 0;
622 buf[15] = 0;
623 buf[19] = addr & 0xff;
624 addr >>= 8;
625 buf[18] = addr & 0xff;
626 addr >>= 8;
627 buf[17] = addr & 0xff;
628 addr >>= 8;
629 buf[16] = addr & 0x0f;
630 }
631
ip_ipgre_mc_map(__be32 naddr,const unsigned char * broadcast,char * buf)632 static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
633 {
634 if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
635 memcpy(buf, broadcast, 4);
636 else
637 memcpy(buf, &naddr, sizeof(naddr));
638 }
639
640 #if IS_ENABLED(CONFIG_IPV6)
641 #include <linux/ipv6.h>
642 #endif
643
inet_reset_saddr(struct sock * sk)644 static __inline__ void inet_reset_saddr(struct sock *sk)
645 {
646 inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
647 #if IS_ENABLED(CONFIG_IPV6)
648 if (sk->sk_family == PF_INET6) {
649 struct ipv6_pinfo *np = inet6_sk(sk);
650
651 memset(&np->saddr, 0, sizeof(np->saddr));
652 memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
653 }
654 #endif
655 }
656
657 #endif
658
ipv4_addr_hash(__be32 ip)659 static inline unsigned int ipv4_addr_hash(__be32 ip)
660 {
661 return (__force unsigned int) ip;
662 }
663
ipv4_portaddr_hash(const struct net * net,__be32 saddr,unsigned int port)664 static inline u32 ipv4_portaddr_hash(const struct net *net,
665 __be32 saddr,
666 unsigned int port)
667 {
668 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
669 }
670
671 bool ip_call_ra_chain(struct sk_buff *skb);
672
673 /*
674 * Functions provided by ip_fragment.c
675 */
676
677 enum ip_defrag_users {
678 IP_DEFRAG_LOCAL_DELIVER,
679 IP_DEFRAG_CALL_RA_CHAIN,
680 IP_DEFRAG_CONNTRACK_IN,
681 __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
682 IP_DEFRAG_CONNTRACK_OUT,
683 __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
684 IP_DEFRAG_CONNTRACK_BRIDGE_IN,
685 __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
686 IP_DEFRAG_VS_IN,
687 IP_DEFRAG_VS_OUT,
688 IP_DEFRAG_VS_FWD,
689 IP_DEFRAG_AF_PACKET,
690 IP_DEFRAG_MACVLAN,
691 };
692
693 /* Return true if the value of 'user' is between 'lower_bond'
694 * and 'upper_bond' inclusively.
695 */
ip_defrag_user_in_between(u32 user,enum ip_defrag_users lower_bond,enum ip_defrag_users upper_bond)696 static inline bool ip_defrag_user_in_between(u32 user,
697 enum ip_defrag_users lower_bond,
698 enum ip_defrag_users upper_bond)
699 {
700 return user >= lower_bond && user <= upper_bond;
701 }
702
703 int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
704 #ifdef CONFIG_INET
705 struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
706 #else
ip_check_defrag(struct net * net,struct sk_buff * skb,u32 user)707 static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
708 {
709 return skb;
710 }
711 #endif
712
713 /*
714 * Functions provided by ip_forward.c
715 */
716
717 int ip_forward(struct sk_buff *skb);
718
719 /*
720 * Functions provided by ip_options.c
721 */
722
723 void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
724 __be32 daddr, struct rtable *rt, int is_frag);
725
726 int __ip_options_echo(struct net *net, struct ip_options *dopt,
727 struct sk_buff *skb, const struct ip_options *sopt);
ip_options_echo(struct net * net,struct ip_options * dopt,struct sk_buff * skb)728 static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
729 struct sk_buff *skb)
730 {
731 return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt);
732 }
733
734 void ip_options_fragment(struct sk_buff *skb);
735 int __ip_options_compile(struct net *net, struct ip_options *opt,
736 struct sk_buff *skb, __be32 *info);
737 int ip_options_compile(struct net *net, struct ip_options *opt,
738 struct sk_buff *skb);
739 int ip_options_get(struct net *net, struct ip_options_rcu **optp,
740 sockptr_t data, int optlen);
741 void ip_options_undo(struct ip_options *opt);
742 void ip_forward_options(struct sk_buff *skb);
743 int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
744
745 /*
746 * Functions provided by ip_sockglue.c
747 */
748
749 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
750 void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
751 struct sk_buff *skb, int tlen, int offset);
752 int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
753 struct ipcm_cookie *ipc, bool allow_ipv6);
754 DECLARE_STATIC_KEY_FALSE(ip4_min_ttl);
755 int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
756 unsigned int optlen);
757 int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
758 int __user *optlen);
759 int ip_ra_control(struct sock *sk, unsigned char on,
760 void (*destructor)(struct sock *));
761
762 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
763 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
764 u32 info, u8 *payload);
765 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
766 u32 info);
767
ip_cmsg_recv(struct msghdr * msg,struct sk_buff * skb)768 static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
769 {
770 ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
771 }
772
773 bool icmp_global_allow(void);
774 extern int sysctl_icmp_msgs_per_sec;
775 extern int sysctl_icmp_msgs_burst;
776
777 #ifdef CONFIG_PROC_FS
778 int ip_misc_proc_init(void);
779 #endif
780
781 int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
782 struct netlink_ext_ack *extack);
783
inetdev_valid_mtu(unsigned int mtu)784 static inline bool inetdev_valid_mtu(unsigned int mtu)
785 {
786 return likely(mtu >= IPV4_MIN_MTU);
787 }
788
789 void ip_sock_set_freebind(struct sock *sk);
790 int ip_sock_set_mtu_discover(struct sock *sk, int val);
791 void ip_sock_set_pktinfo(struct sock *sk);
792 void ip_sock_set_recverr(struct sock *sk);
793 void ip_sock_set_tos(struct sock *sk, int val);
794
795 #endif /* _IP_H */
796