1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the AF_INET socket handler.
8 *
9 * Version: @(#)sock.h 1.0.4 05/13/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche <flla@stud.uni-sb.de>
15 *
16 * Fixes:
17 * Alan Cox : Volatiles in skbuff pointers. See
18 * skbuff comments. May be overdone,
19 * better to prove they can be removed
20 * than the reverse.
21 * Alan Cox : Added a zapped field for tcp to note
22 * a socket is reset and must stay shut up
23 * Alan Cox : New fields for options
24 * Pauline Middelink : identd support
25 * Alan Cox : Eliminate low level recv/recvfrom
26 * David S. Miller : New socket lookup architecture.
27 * Steve Whitehouse: Default routines for sock_ops
28 * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made
29 * protinfo be just a void pointer, as the
30 * protocol specific parts were moved to
31 * respective headers and ipv4/v6, etc now
32 * use private slabcaches for its socks
33 * Pedro Hortas : New flags field for socket options
34 */
35 #ifndef _SOCK_H
36 #define _SOCK_H
37
38 #include <linux/hardirq.h>
39 #include <linux/kernel.h>
40 #include <linux/list.h>
41 #include <linux/list_nulls.h>
42 #include <linux/timer.h>
43 #include <linux/cache.h>
44 #include <linux/bitops.h>
45 #include <linux/lockdep.h>
46 #include <linux/netdevice.h>
47 #include <linux/skbuff.h> /* struct sk_buff */
48 #include <linux/mm.h>
49 #include <linux/security.h>
50 #include <linux/slab.h>
51 #include <linux/uaccess.h>
52 #include <linux/page_counter.h>
53 #include <linux/memcontrol.h>
54 #include <linux/static_key.h>
55 #include <linux/sched.h>
56 #include <linux/wait.h>
57 #include <linux/cgroup-defs.h>
58 #include <linux/rbtree.h>
59 #include <linux/filter.h>
60 #include <linux/rculist_nulls.h>
61 #include <linux/poll.h>
62 #include <linux/sockptr.h>
63 #include <linux/indirect_call_wrapper.h>
64 #include <linux/atomic.h>
65 #include <linux/refcount.h>
66 #include <net/dst.h>
67 #include <net/checksum.h>
68 #include <net/tcp_states.h>
69 #include <linux/net_tstamp.h>
70 #include <net/l3mdev.h>
71 #include <uapi/linux/socket.h>
72
73 /*
74 * This structure really needs to be cleaned up.
75 * Most of it is for TCP, and not used by any of
76 * the other protocols.
77 */
78
79 /* Define this to get the SOCK_DBG debugging facility. */
80 #define SOCK_DEBUGGING
81 #ifdef SOCK_DEBUGGING
82 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
83 printk(KERN_DEBUG msg); } while (0)
84 #else
85 /* Validate arguments and do nothing */
86 static inline __printf(2, 3)
SOCK_DEBUG(const struct sock * sk,const char * msg,...)87 void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
88 {
89 }
90 #endif
91
92 /* This is the per-socket lock. The spinlock provides a synchronization
93 * between user contexts and software interrupt processing, whereas the
94 * mini-semaphore synchronizes multiple users amongst themselves.
95 */
96 typedef struct {
97 spinlock_t slock;
98 int owned;
99 wait_queue_head_t wq;
100 /*
101 * We express the mutex-alike socket_lock semantics
102 * to the lock validator by explicitly managing
103 * the slock as a lock variant (in addition to
104 * the slock itself):
105 */
106 #ifdef CONFIG_DEBUG_LOCK_ALLOC
107 struct lockdep_map dep_map;
108 #endif
109 } socket_lock_t;
110
111 struct sock;
112 struct proto;
113 struct net;
114
115 typedef __u32 __bitwise __portpair;
116 typedef __u64 __bitwise __addrpair;
117
118 /**
119 * struct sock_common - minimal network layer representation of sockets
120 * @skc_daddr: Foreign IPv4 addr
121 * @skc_rcv_saddr: Bound local IPv4 addr
122 * @skc_addrpair: 8-byte-aligned __u64 union of @skc_daddr & @skc_rcv_saddr
123 * @skc_hash: hash value used with various protocol lookup tables
124 * @skc_u16hashes: two u16 hash values used by UDP lookup tables
125 * @skc_dport: placeholder for inet_dport/tw_dport
126 * @skc_num: placeholder for inet_num/tw_num
127 * @skc_portpair: __u32 union of @skc_dport & @skc_num
128 * @skc_family: network address family
129 * @skc_state: Connection state
130 * @skc_reuse: %SO_REUSEADDR setting
131 * @skc_reuseport: %SO_REUSEPORT setting
132 * @skc_ipv6only: socket is IPV6 only
133 * @skc_net_refcnt: socket is using net ref counting
134 * @skc_bound_dev_if: bound device index if != 0
135 * @skc_bind_node: bind hash linkage for various protocol lookup tables
136 * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
137 * @skc_prot: protocol handlers inside a network family
138 * @skc_net: reference to the network namespace of this socket
139 * @skc_v6_daddr: IPV6 destination address
140 * @skc_v6_rcv_saddr: IPV6 source address
141 * @skc_cookie: socket's cookie value
142 * @skc_node: main hash linkage for various protocol lookup tables
143 * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
144 * @skc_tx_queue_mapping: tx queue number for this connection
145 * @skc_rx_queue_mapping: rx queue number for this connection
146 * @skc_flags: place holder for sk_flags
147 * %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
148 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
149 * @skc_listener: connection request listener socket (aka rsk_listener)
150 * [union with @skc_flags]
151 * @skc_tw_dr: (aka tw_dr) ptr to &struct inet_timewait_death_row
152 * [union with @skc_flags]
153 * @skc_incoming_cpu: record/match cpu processing incoming packets
154 * @skc_rcv_wnd: (aka rsk_rcv_wnd) TCP receive window size (possibly scaled)
155 * [union with @skc_incoming_cpu]
156 * @skc_tw_rcv_nxt: (aka tw_rcv_nxt) TCP window next expected seq number
157 * [union with @skc_incoming_cpu]
158 * @skc_refcnt: reference count
159 *
160 * This is the minimal network layer representation of sockets, the header
161 * for struct sock and struct inet_timewait_sock.
162 */
163 struct sock_common {
164 /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned
165 * address on 64bit arches : cf INET_MATCH()
166 */
167 union {
168 __addrpair skc_addrpair;
169 struct {
170 __be32 skc_daddr;
171 __be32 skc_rcv_saddr;
172 };
173 };
174 union {
175 unsigned int skc_hash;
176 __u16 skc_u16hashes[2];
177 };
178 /* skc_dport && skc_num must be grouped as well */
179 union {
180 __portpair skc_portpair;
181 struct {
182 __be16 skc_dport;
183 __u16 skc_num;
184 };
185 };
186
187 unsigned short skc_family;
188 volatile unsigned char skc_state;
189 unsigned char skc_reuse:4;
190 unsigned char skc_reuseport:1;
191 unsigned char skc_ipv6only:1;
192 unsigned char skc_net_refcnt:1;
193 int skc_bound_dev_if;
194 union {
195 struct hlist_node skc_bind_node;
196 struct hlist_node skc_portaddr_node;
197 };
198 struct proto *skc_prot;
199 possible_net_t skc_net;
200
201 #if IS_ENABLED(CONFIG_IPV6)
202 struct in6_addr skc_v6_daddr;
203 struct in6_addr skc_v6_rcv_saddr;
204 #endif
205
206 atomic64_t skc_cookie;
207
208 /* following fields are padding to force
209 * offset(struct sock, sk_refcnt) == 128 on 64bit arches
210 * assuming IPV6 is enabled. We use this padding differently
211 * for different kind of 'sockets'
212 */
213 union {
214 unsigned long skc_flags;
215 struct sock *skc_listener; /* request_sock */
216 struct inet_timewait_death_row *skc_tw_dr; /* inet_timewait_sock */
217 };
218 /*
219 * fields between dontcopy_begin/dontcopy_end
220 * are not copied in sock_copy()
221 */
222 /* private: */
223 int skc_dontcopy_begin[0];
224 /* public: */
225 union {
226 struct hlist_node skc_node;
227 struct hlist_nulls_node skc_nulls_node;
228 };
229 unsigned short skc_tx_queue_mapping;
230 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
231 unsigned short skc_rx_queue_mapping;
232 #endif
233 union {
234 int skc_incoming_cpu;
235 u32 skc_rcv_wnd;
236 u32 skc_tw_rcv_nxt; /* struct tcp_timewait_sock */
237 };
238
239 refcount_t skc_refcnt;
240 /* private: */
241 int skc_dontcopy_end[0];
242 union {
243 u32 skc_rxhash;
244 u32 skc_window_clamp;
245 u32 skc_tw_snd_nxt; /* struct tcp_timewait_sock */
246 };
247 /* public: */
248 };
249
250 struct bpf_local_storage;
251
252 /**
253 * struct sock - network layer representation of sockets
254 * @__sk_common: shared layout with inet_timewait_sock
255 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
256 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
257 * @sk_lock: synchronizer
258 * @sk_kern_sock: True if sock is using kernel lock classes
259 * @sk_rcvbuf: size of receive buffer in bytes
260 * @sk_wq: sock wait queue and async head
261 * @sk_rx_dst: receive input route used by early demux
262 * @sk_rx_dst_ifindex: ifindex for @sk_rx_dst
263 * @sk_rx_dst_cookie: cookie for @sk_rx_dst
264 * @sk_dst_cache: destination cache
265 * @sk_dst_pending_confirm: need to confirm neighbour
266 * @sk_policy: flow policy
267 * @sk_receive_queue: incoming packets
268 * @sk_wmem_alloc: transmit queue bytes committed
269 * @sk_tsq_flags: TCP Small Queues flags
270 * @sk_write_queue: Packet sending queue
271 * @sk_omem_alloc: "o" is "option" or "other"
272 * @sk_wmem_queued: persistent queue size
273 * @sk_forward_alloc: space allocated forward
274 * @sk_reserved_mem: space reserved and non-reclaimable for the socket
275 * @sk_napi_id: id of the last napi context to receive data for sk
276 * @sk_ll_usec: usecs to busypoll when there is no data
277 * @sk_allocation: allocation mode
278 * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
279 * @sk_pacing_status: Pacing status (requested, handled by sch_fq)
280 * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
281 * @sk_sndbuf: size of send buffer in bytes
282 * @__sk_flags_offset: empty field used to determine location of bitfield
283 * @sk_padding: unused element for alignment
284 * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
285 * @sk_no_check_rx: allow zero checksum in RX packets
286 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
287 * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
288 * @sk_route_forced_caps: static, forced route capabilities
289 * (set in tcp_init_sock())
290 * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
291 * @sk_gso_max_size: Maximum GSO segment size to build
292 * @sk_gso_max_segs: Maximum number of GSO segments
293 * @sk_pacing_shift: scaling factor for TCP Small Queues
294 * @sk_lingertime: %SO_LINGER l_linger setting
295 * @sk_backlog: always used with the per-socket spinlock held
296 * @sk_callback_lock: used with the callbacks in the end of this struct
297 * @sk_error_queue: rarely used
298 * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
299 * IPV6_ADDRFORM for instance)
300 * @sk_err: last error
301 * @sk_err_soft: errors that don't cause failure but are the cause of a
302 * persistent failure not just 'timed out'
303 * @sk_drops: raw/udp drops counter
304 * @sk_ack_backlog: current listen backlog
305 * @sk_max_ack_backlog: listen backlog set in listen()
306 * @sk_uid: user id of owner
307 * @sk_prefer_busy_poll: prefer busypolling over softirq processing
308 * @sk_busy_poll_budget: napi processing budget when busypolling
309 * @sk_priority: %SO_PRIORITY setting
310 * @sk_type: socket type (%SOCK_STREAM, etc)
311 * @sk_protocol: which protocol this socket belongs in this network family
312 * @sk_peer_lock: lock protecting @sk_peer_pid and @sk_peer_cred
313 * @sk_peer_pid: &struct pid for this socket's peer
314 * @sk_peer_cred: %SO_PEERCRED setting
315 * @sk_rcvlowat: %SO_RCVLOWAT setting
316 * @sk_rcvtimeo: %SO_RCVTIMEO setting
317 * @sk_sndtimeo: %SO_SNDTIMEO setting
318 * @sk_txhash: computed flow hash for use on transmit
319 * @sk_filter: socket filtering instructions
320 * @sk_timer: sock cleanup timer
321 * @sk_stamp: time stamp of last packet received
322 * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
323 * @sk_tsflags: SO_TIMESTAMPING flags
324 * @sk_bind_phc: SO_TIMESTAMPING bind PHC index of PTP virtual clock
325 * for timestamping
326 * @sk_tskey: counter to disambiguate concurrent tstamp requests
327 * @sk_zckey: counter to order MSG_ZEROCOPY notifications
328 * @sk_socket: Identd and reporting IO signals
329 * @sk_user_data: RPC layer private data
330 * @sk_frag: cached page frag
331 * @sk_peek_off: current peek_offset value
332 * @sk_send_head: front of stuff to transmit
333 * @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
334 * @sk_security: used by security modules
335 * @sk_mark: generic packet mark
336 * @sk_cgrp_data: cgroup data for this cgroup
337 * @sk_memcg: this socket's memory cgroup association
338 * @sk_write_pending: a write to stream socket waits to start
339 * @sk_state_change: callback to indicate change in the state of the sock
340 * @sk_data_ready: callback to indicate there is data to be processed
341 * @sk_write_space: callback to indicate there is bf sending space available
342 * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
343 * @sk_backlog_rcv: callback to process the backlog
344 * @sk_validate_xmit_skb: ptr to an optional validate function
345 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
346 * @sk_reuseport_cb: reuseport group container
347 * @sk_bpf_storage: ptr to cache and control for bpf_sk_storage
348 * @sk_rcu: used during RCU grace period
349 * @sk_clockid: clockid used by time-based scheduling (SO_TXTIME)
350 * @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
351 * @sk_txtime_report_errors: set report errors mode for SO_TXTIME
352 * @sk_txtime_unused: unused txtime flags
353 */
354 struct sock {
355 /*
356 * Now struct inet_timewait_sock also uses sock_common, so please just
357 * don't add nothing before this first member (__sk_common) --acme
358 */
359 struct sock_common __sk_common;
360 #define sk_node __sk_common.skc_node
361 #define sk_nulls_node __sk_common.skc_nulls_node
362 #define sk_refcnt __sk_common.skc_refcnt
363 #define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
364 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
365 #define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping
366 #endif
367
368 #define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
369 #define sk_dontcopy_end __sk_common.skc_dontcopy_end
370 #define sk_hash __sk_common.skc_hash
371 #define sk_portpair __sk_common.skc_portpair
372 #define sk_num __sk_common.skc_num
373 #define sk_dport __sk_common.skc_dport
374 #define sk_addrpair __sk_common.skc_addrpair
375 #define sk_daddr __sk_common.skc_daddr
376 #define sk_rcv_saddr __sk_common.skc_rcv_saddr
377 #define sk_family __sk_common.skc_family
378 #define sk_state __sk_common.skc_state
379 #define sk_reuse __sk_common.skc_reuse
380 #define sk_reuseport __sk_common.skc_reuseport
381 #define sk_ipv6only __sk_common.skc_ipv6only
382 #define sk_net_refcnt __sk_common.skc_net_refcnt
383 #define sk_bound_dev_if __sk_common.skc_bound_dev_if
384 #define sk_bind_node __sk_common.skc_bind_node
385 #define sk_prot __sk_common.skc_prot
386 #define sk_net __sk_common.skc_net
387 #define sk_v6_daddr __sk_common.skc_v6_daddr
388 #define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
389 #define sk_cookie __sk_common.skc_cookie
390 #define sk_incoming_cpu __sk_common.skc_incoming_cpu
391 #define sk_flags __sk_common.skc_flags
392 #define sk_rxhash __sk_common.skc_rxhash
393
394 socket_lock_t sk_lock;
395 atomic_t sk_drops;
396 int sk_rcvlowat;
397 struct sk_buff_head sk_error_queue;
398 struct sk_buff_head sk_receive_queue;
399 /*
400 * The backlog queue is special, it is always used with
401 * the per-socket spinlock held and requires low latency
402 * access. Therefore we special case it's implementation.
403 * Note : rmem_alloc is in this structure to fill a hole
404 * on 64bit arches, not because its logically part of
405 * backlog.
406 */
407 struct {
408 atomic_t rmem_alloc;
409 int len;
410 struct sk_buff *head;
411 struct sk_buff *tail;
412 } sk_backlog;
413 #define sk_rmem_alloc sk_backlog.rmem_alloc
414
415 int sk_forward_alloc;
416 u32 sk_reserved_mem;
417 #ifdef CONFIG_NET_RX_BUSY_POLL
418 unsigned int sk_ll_usec;
419 /* ===== mostly read cache line ===== */
420 unsigned int sk_napi_id;
421 #endif
422 int sk_rcvbuf;
423
424 struct sk_filter __rcu *sk_filter;
425 union {
426 struct socket_wq __rcu *sk_wq;
427 /* private: */
428 struct socket_wq *sk_wq_raw;
429 /* public: */
430 };
431 #ifdef CONFIG_XFRM
432 struct xfrm_policy __rcu *sk_policy[2];
433 #endif
434 struct dst_entry __rcu *sk_rx_dst;
435 int sk_rx_dst_ifindex;
436 u32 sk_rx_dst_cookie;
437
438 struct dst_entry __rcu *sk_dst_cache;
439 atomic_t sk_omem_alloc;
440 int sk_sndbuf;
441
442 /* ===== cache line for TX ===== */
443 int sk_wmem_queued;
444 refcount_t sk_wmem_alloc;
445 unsigned long sk_tsq_flags;
446 union {
447 struct sk_buff *sk_send_head;
448 struct rb_root tcp_rtx_queue;
449 };
450 struct sk_buff_head sk_write_queue;
451 __s32 sk_peek_off;
452 int sk_write_pending;
453 __u32 sk_dst_pending_confirm;
454 u32 sk_pacing_status; /* see enum sk_pacing */
455 long sk_sndtimeo;
456 struct timer_list sk_timer;
457 __u32 sk_priority;
458 __u32 sk_mark;
459 unsigned long sk_pacing_rate; /* bytes per second */
460 unsigned long sk_max_pacing_rate;
461 struct page_frag sk_frag;
462 netdev_features_t sk_route_caps;
463 netdev_features_t sk_route_nocaps;
464 netdev_features_t sk_route_forced_caps;
465 int sk_gso_type;
466 unsigned int sk_gso_max_size;
467 gfp_t sk_allocation;
468 __u32 sk_txhash;
469
470 /*
471 * Because of non atomicity rules, all
472 * changes are protected by socket lock.
473 */
474 u8 sk_padding : 1,
475 sk_kern_sock : 1,
476 sk_no_check_tx : 1,
477 sk_no_check_rx : 1,
478 sk_userlocks : 4;
479 u8 sk_pacing_shift;
480 u16 sk_type;
481 u16 sk_protocol;
482 u16 sk_gso_max_segs;
483 unsigned long sk_lingertime;
484 struct proto *sk_prot_creator;
485 rwlock_t sk_callback_lock;
486 int sk_err,
487 sk_err_soft;
488 u32 sk_ack_backlog;
489 u32 sk_max_ack_backlog;
490 kuid_t sk_uid;
491 #ifdef CONFIG_NET_RX_BUSY_POLL
492 u8 sk_prefer_busy_poll;
493 u16 sk_busy_poll_budget;
494 #endif
495 spinlock_t sk_peer_lock;
496 struct pid *sk_peer_pid;
497 const struct cred *sk_peer_cred;
498
499 long sk_rcvtimeo;
500 ktime_t sk_stamp;
501 #if BITS_PER_LONG==32
502 seqlock_t sk_stamp_seq;
503 #endif
504 u16 sk_tsflags;
505 int sk_bind_phc;
506 u8 sk_shutdown;
507 u32 sk_tskey;
508 atomic_t sk_zckey;
509
510 u8 sk_clockid;
511 u8 sk_txtime_deadline_mode : 1,
512 sk_txtime_report_errors : 1,
513 sk_txtime_unused : 6;
514
515 struct socket *sk_socket;
516 void *sk_user_data;
517 #ifdef CONFIG_SECURITY
518 void *sk_security;
519 #endif
520 struct sock_cgroup_data sk_cgrp_data;
521 struct mem_cgroup *sk_memcg;
522 void (*sk_state_change)(struct sock *sk);
523 void (*sk_data_ready)(struct sock *sk);
524 void (*sk_write_space)(struct sock *sk);
525 void (*sk_error_report)(struct sock *sk);
526 int (*sk_backlog_rcv)(struct sock *sk,
527 struct sk_buff *skb);
528 #ifdef CONFIG_SOCK_VALIDATE_XMIT
529 struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
530 struct net_device *dev,
531 struct sk_buff *skb);
532 #endif
533 void (*sk_destruct)(struct sock *sk);
534 struct sock_reuseport __rcu *sk_reuseport_cb;
535 #ifdef CONFIG_BPF_SYSCALL
536 struct bpf_local_storage __rcu *sk_bpf_storage;
537 #endif
538 struct rcu_head sk_rcu;
539 };
540
541 enum sk_pacing {
542 SK_PACING_NONE = 0,
543 SK_PACING_NEEDED = 1,
544 SK_PACING_FQ = 2,
545 };
546
547 /* Pointer stored in sk_user_data might not be suitable for copying
548 * when cloning the socket. For instance, it can point to a reference
549 * counted object. sk_user_data bottom bit is set if pointer must not
550 * be copied.
551 */
552 #define SK_USER_DATA_NOCOPY 1UL
553 #define SK_USER_DATA_BPF 2UL /* Managed by BPF */
554 #define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF)
555
556 /**
557 * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
558 * @sk: socket
559 */
sk_user_data_is_nocopy(const struct sock * sk)560 static inline bool sk_user_data_is_nocopy(const struct sock *sk)
561 {
562 return ((uintptr_t)sk->sk_user_data & SK_USER_DATA_NOCOPY);
563 }
564
565 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
566
567 #define rcu_dereference_sk_user_data(sk) \
568 ({ \
569 void *__tmp = rcu_dereference(__sk_user_data((sk))); \
570 (void *)((uintptr_t)__tmp & SK_USER_DATA_PTRMASK); \
571 })
572 #define rcu_assign_sk_user_data(sk, ptr) \
573 ({ \
574 uintptr_t __tmp = (uintptr_t)(ptr); \
575 WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
576 rcu_assign_pointer(__sk_user_data((sk)), __tmp); \
577 })
578 #define rcu_assign_sk_user_data_nocopy(sk, ptr) \
579 ({ \
580 uintptr_t __tmp = (uintptr_t)(ptr); \
581 WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
582 rcu_assign_pointer(__sk_user_data((sk)), \
583 __tmp | SK_USER_DATA_NOCOPY); \
584 })
585
586 /*
587 * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
588 * or not whether his port will be reused by someone else. SK_FORCE_REUSE
589 * on a socket means that the socket will reuse everybody else's port
590 * without looking at the other's sk_reuse value.
591 */
592
593 #define SK_NO_REUSE 0
594 #define SK_CAN_REUSE 1
595 #define SK_FORCE_REUSE 2
596
597 int sk_set_peek_off(struct sock *sk, int val);
598
sk_peek_offset(struct sock * sk,int flags)599 static inline int sk_peek_offset(struct sock *sk, int flags)
600 {
601 if (unlikely(flags & MSG_PEEK)) {
602 return READ_ONCE(sk->sk_peek_off);
603 }
604
605 return 0;
606 }
607
sk_peek_offset_bwd(struct sock * sk,int val)608 static inline void sk_peek_offset_bwd(struct sock *sk, int val)
609 {
610 s32 off = READ_ONCE(sk->sk_peek_off);
611
612 if (unlikely(off >= 0)) {
613 off = max_t(s32, off - val, 0);
614 WRITE_ONCE(sk->sk_peek_off, off);
615 }
616 }
617
sk_peek_offset_fwd(struct sock * sk,int val)618 static inline void sk_peek_offset_fwd(struct sock *sk, int val)
619 {
620 sk_peek_offset_bwd(sk, -val);
621 }
622
623 /*
624 * Hashed lists helper routines
625 */
sk_entry(const struct hlist_node * node)626 static inline struct sock *sk_entry(const struct hlist_node *node)
627 {
628 return hlist_entry(node, struct sock, sk_node);
629 }
630
__sk_head(const struct hlist_head * head)631 static inline struct sock *__sk_head(const struct hlist_head *head)
632 {
633 return hlist_entry(head->first, struct sock, sk_node);
634 }
635
sk_head(const struct hlist_head * head)636 static inline struct sock *sk_head(const struct hlist_head *head)
637 {
638 return hlist_empty(head) ? NULL : __sk_head(head);
639 }
640
__sk_nulls_head(const struct hlist_nulls_head * head)641 static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
642 {
643 return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
644 }
645
sk_nulls_head(const struct hlist_nulls_head * head)646 static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
647 {
648 return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
649 }
650
sk_next(const struct sock * sk)651 static inline struct sock *sk_next(const struct sock *sk)
652 {
653 return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node);
654 }
655
sk_nulls_next(const struct sock * sk)656 static inline struct sock *sk_nulls_next(const struct sock *sk)
657 {
658 return (!is_a_nulls(sk->sk_nulls_node.next)) ?
659 hlist_nulls_entry(sk->sk_nulls_node.next,
660 struct sock, sk_nulls_node) :
661 NULL;
662 }
663
sk_unhashed(const struct sock * sk)664 static inline bool sk_unhashed(const struct sock *sk)
665 {
666 return hlist_unhashed(&sk->sk_node);
667 }
668
sk_hashed(const struct sock * sk)669 static inline bool sk_hashed(const struct sock *sk)
670 {
671 return !sk_unhashed(sk);
672 }
673
sk_node_init(struct hlist_node * node)674 static inline void sk_node_init(struct hlist_node *node)
675 {
676 node->pprev = NULL;
677 }
678
sk_nulls_node_init(struct hlist_nulls_node * node)679 static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
680 {
681 node->pprev = NULL;
682 }
683
__sk_del_node(struct sock * sk)684 static inline void __sk_del_node(struct sock *sk)
685 {
686 __hlist_del(&sk->sk_node);
687 }
688
689 /* NB: equivalent to hlist_del_init_rcu */
__sk_del_node_init(struct sock * sk)690 static inline bool __sk_del_node_init(struct sock *sk)
691 {
692 if (sk_hashed(sk)) {
693 __sk_del_node(sk);
694 sk_node_init(&sk->sk_node);
695 return true;
696 }
697 return false;
698 }
699
700 /* Grab socket reference count. This operation is valid only
701 when sk is ALREADY grabbed f.e. it is found in hash table
702 or a list and the lookup is made under lock preventing hash table
703 modifications.
704 */
705
sock_hold(struct sock * sk)706 static __always_inline void sock_hold(struct sock *sk)
707 {
708 refcount_inc(&sk->sk_refcnt);
709 }
710
711 /* Ungrab socket in the context, which assumes that socket refcnt
712 cannot hit zero, f.e. it is true in context of any socketcall.
713 */
__sock_put(struct sock * sk)714 static __always_inline void __sock_put(struct sock *sk)
715 {
716 refcount_dec(&sk->sk_refcnt);
717 }
718
sk_del_node_init(struct sock * sk)719 static inline bool sk_del_node_init(struct sock *sk)
720 {
721 bool rc = __sk_del_node_init(sk);
722
723 if (rc) {
724 /* paranoid for a while -acme */
725 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
726 __sock_put(sk);
727 }
728 return rc;
729 }
730 #define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
731
__sk_nulls_del_node_init_rcu(struct sock * sk)732 static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
733 {
734 if (sk_hashed(sk)) {
735 hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
736 return true;
737 }
738 return false;
739 }
740
sk_nulls_del_node_init_rcu(struct sock * sk)741 static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
742 {
743 bool rc = __sk_nulls_del_node_init_rcu(sk);
744
745 if (rc) {
746 /* paranoid for a while -acme */
747 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
748 __sock_put(sk);
749 }
750 return rc;
751 }
752
__sk_add_node(struct sock * sk,struct hlist_head * list)753 static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
754 {
755 hlist_add_head(&sk->sk_node, list);
756 }
757
sk_add_node(struct sock * sk,struct hlist_head * list)758 static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
759 {
760 sock_hold(sk);
761 __sk_add_node(sk, list);
762 }
763
sk_add_node_rcu(struct sock * sk,struct hlist_head * list)764 static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
765 {
766 sock_hold(sk);
767 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
768 sk->sk_family == AF_INET6)
769 hlist_add_tail_rcu(&sk->sk_node, list);
770 else
771 hlist_add_head_rcu(&sk->sk_node, list);
772 }
773
sk_add_node_tail_rcu(struct sock * sk,struct hlist_head * list)774 static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
775 {
776 sock_hold(sk);
777 hlist_add_tail_rcu(&sk->sk_node, list);
778 }
779
__sk_nulls_add_node_rcu(struct sock * sk,struct hlist_nulls_head * list)780 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
781 {
782 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
783 }
784
__sk_nulls_add_node_tail_rcu(struct sock * sk,struct hlist_nulls_head * list)785 static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
786 {
787 hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
788 }
789
sk_nulls_add_node_rcu(struct sock * sk,struct hlist_nulls_head * list)790 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
791 {
792 sock_hold(sk);
793 __sk_nulls_add_node_rcu(sk, list);
794 }
795
__sk_del_bind_node(struct sock * sk)796 static inline void __sk_del_bind_node(struct sock *sk)
797 {
798 __hlist_del(&sk->sk_bind_node);
799 }
800
sk_add_bind_node(struct sock * sk,struct hlist_head * list)801 static inline void sk_add_bind_node(struct sock *sk,
802 struct hlist_head *list)
803 {
804 hlist_add_head(&sk->sk_bind_node, list);
805 }
806
807 #define sk_for_each(__sk, list) \
808 hlist_for_each_entry(__sk, list, sk_node)
809 #define sk_for_each_rcu(__sk, list) \
810 hlist_for_each_entry_rcu(__sk, list, sk_node)
811 #define sk_nulls_for_each(__sk, node, list) \
812 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
813 #define sk_nulls_for_each_rcu(__sk, node, list) \
814 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
815 #define sk_for_each_from(__sk) \
816 hlist_for_each_entry_from(__sk, sk_node)
817 #define sk_nulls_for_each_from(__sk, node) \
818 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
819 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
820 #define sk_for_each_safe(__sk, tmp, list) \
821 hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
822 #define sk_for_each_bound(__sk, list) \
823 hlist_for_each_entry(__sk, list, sk_bind_node)
824
825 /**
826 * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
827 * @tpos: the type * to use as a loop cursor.
828 * @pos: the &struct hlist_node to use as a loop cursor.
829 * @head: the head for your list.
830 * @offset: offset of hlist_node within the struct.
831 *
832 */
833 #define sk_for_each_entry_offset_rcu(tpos, pos, head, offset) \
834 for (pos = rcu_dereference(hlist_first_rcu(head)); \
835 pos != NULL && \
836 ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \
837 pos = rcu_dereference(hlist_next_rcu(pos)))
838
sk_user_ns(struct sock * sk)839 static inline struct user_namespace *sk_user_ns(struct sock *sk)
840 {
841 /* Careful only use this in a context where these parameters
842 * can not change and must all be valid, such as recvmsg from
843 * userspace.
844 */
845 return sk->sk_socket->file->f_cred->user_ns;
846 }
847
848 /* Sock flags */
849 enum sock_flags {
850 SOCK_DEAD,
851 SOCK_DONE,
852 SOCK_URGINLINE,
853 SOCK_KEEPOPEN,
854 SOCK_LINGER,
855 SOCK_DESTROY,
856 SOCK_BROADCAST,
857 SOCK_TIMESTAMP,
858 SOCK_ZAPPED,
859 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
860 SOCK_DBG, /* %SO_DEBUG setting */
861 SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
862 SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
863 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
864 SOCK_MEMALLOC, /* VM depends on this socket for swapping */
865 SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */
866 SOCK_FASYNC, /* fasync() active */
867 SOCK_RXQ_OVFL,
868 SOCK_ZEROCOPY, /* buffers from userspace */
869 SOCK_WIFI_STATUS, /* push wifi status to userspace */
870 SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS.
871 * Will use last 4 bytes of packet sent from
872 * user-space instead.
873 */
874 SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
875 SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
876 SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
877 SOCK_TXTIME,
878 SOCK_XDP, /* XDP is attached */
879 SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */
880 };
881
882 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
883
sock_copy_flags(struct sock * nsk,struct sock * osk)884 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
885 {
886 nsk->sk_flags = osk->sk_flags;
887 }
888
sock_set_flag(struct sock * sk,enum sock_flags flag)889 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
890 {
891 __set_bit(flag, &sk->sk_flags);
892 }
893
sock_reset_flag(struct sock * sk,enum sock_flags flag)894 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
895 {
896 __clear_bit(flag, &sk->sk_flags);
897 }
898
sock_valbool_flag(struct sock * sk,enum sock_flags bit,int valbool)899 static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit,
900 int valbool)
901 {
902 if (valbool)
903 sock_set_flag(sk, bit);
904 else
905 sock_reset_flag(sk, bit);
906 }
907
sock_flag(const struct sock * sk,enum sock_flags flag)908 static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
909 {
910 return test_bit(flag, &sk->sk_flags);
911 }
912
913 #ifdef CONFIG_NET
914 DECLARE_STATIC_KEY_FALSE(memalloc_socks_key);
sk_memalloc_socks(void)915 static inline int sk_memalloc_socks(void)
916 {
917 return static_branch_unlikely(&memalloc_socks_key);
918 }
919
920 void __receive_sock(struct file *file);
921 #else
922
sk_memalloc_socks(void)923 static inline int sk_memalloc_socks(void)
924 {
925 return 0;
926 }
927
__receive_sock(struct file * file)928 static inline void __receive_sock(struct file *file)
929 { }
930 #endif
931
sk_gfp_mask(const struct sock * sk,gfp_t gfp_mask)932 static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
933 {
934 return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC);
935 }
936
sk_acceptq_removed(struct sock * sk)937 static inline void sk_acceptq_removed(struct sock *sk)
938 {
939 WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1);
940 }
941
sk_acceptq_added(struct sock * sk)942 static inline void sk_acceptq_added(struct sock *sk)
943 {
944 WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
945 }
946
947 /* Note: If you think the test should be:
948 * return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog);
949 * Then please take a look at commit 64a146513f8f ("[NET]: Revert incorrect accept queue backlog changes.")
950 */
sk_acceptq_is_full(const struct sock * sk)951 static inline bool sk_acceptq_is_full(const struct sock *sk)
952 {
953 return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
954 }
955
956 /*
957 * Compute minimal free write space needed to queue new packets.
958 */
sk_stream_min_wspace(const struct sock * sk)959 static inline int sk_stream_min_wspace(const struct sock *sk)
960 {
961 return READ_ONCE(sk->sk_wmem_queued) >> 1;
962 }
963
sk_stream_wspace(const struct sock * sk)964 static inline int sk_stream_wspace(const struct sock *sk)
965 {
966 return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued);
967 }
968
sk_wmem_queued_add(struct sock * sk,int val)969 static inline void sk_wmem_queued_add(struct sock *sk, int val)
970 {
971 WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
972 }
973
974 void sk_stream_write_space(struct sock *sk);
975
976 /* OOB backlog add */
__sk_add_backlog(struct sock * sk,struct sk_buff * skb)977 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
978 {
979 /* dont let skb dst not refcounted, we are going to leave rcu lock */
980 skb_dst_force(skb);
981
982 if (!sk->sk_backlog.tail)
983 WRITE_ONCE(sk->sk_backlog.head, skb);
984 else
985 sk->sk_backlog.tail->next = skb;
986
987 WRITE_ONCE(sk->sk_backlog.tail, skb);
988 skb->next = NULL;
989 }
990
991 /*
992 * Take into account size of receive queue and backlog queue
993 * Do not take into account this skb truesize,
994 * to allow even a single big packet to come.
995 */
sk_rcvqueues_full(const struct sock * sk,unsigned int limit)996 static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
997 {
998 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
999
1000 return qsize > limit;
1001 }
1002
1003 /* The per-socket spinlock must be held here. */
sk_add_backlog(struct sock * sk,struct sk_buff * skb,unsigned int limit)1004 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
1005 unsigned int limit)
1006 {
1007 if (sk_rcvqueues_full(sk, limit))
1008 return -ENOBUFS;
1009
1010 /*
1011 * If the skb was allocated from pfmemalloc reserves, only
1012 * allow SOCK_MEMALLOC sockets to use it as this socket is
1013 * helping free memory
1014 */
1015 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
1016 return -ENOMEM;
1017
1018 __sk_add_backlog(sk, skb);
1019 sk->sk_backlog.len += skb->truesize;
1020 return 0;
1021 }
1022
1023 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
1024
sk_backlog_rcv(struct sock * sk,struct sk_buff * skb)1025 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1026 {
1027 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
1028 return __sk_backlog_rcv(sk, skb);
1029
1030 return sk->sk_backlog_rcv(sk, skb);
1031 }
1032
sk_incoming_cpu_update(struct sock * sk)1033 static inline void sk_incoming_cpu_update(struct sock *sk)
1034 {
1035 int cpu = raw_smp_processor_id();
1036
1037 if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
1038 WRITE_ONCE(sk->sk_incoming_cpu, cpu);
1039 }
1040
sock_rps_record_flow_hash(__u32 hash)1041 static inline void sock_rps_record_flow_hash(__u32 hash)
1042 {
1043 #ifdef CONFIG_RPS
1044 struct rps_sock_flow_table *sock_flow_table;
1045
1046 rcu_read_lock();
1047 sock_flow_table = rcu_dereference(rps_sock_flow_table);
1048 rps_record_sock_flow(sock_flow_table, hash);
1049 rcu_read_unlock();
1050 #endif
1051 }
1052
sock_rps_record_flow(const struct sock * sk)1053 static inline void sock_rps_record_flow(const struct sock *sk)
1054 {
1055 #ifdef CONFIG_RPS
1056 if (static_branch_unlikely(&rfs_needed)) {
1057 /* Reading sk->sk_rxhash might incur an expensive cache line
1058 * miss.
1059 *
1060 * TCP_ESTABLISHED does cover almost all states where RFS
1061 * might be useful, and is cheaper [1] than testing :
1062 * IPv4: inet_sk(sk)->inet_daddr
1063 * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
1064 * OR an additional socket flag
1065 * [1] : sk_state and sk_prot are in the same cache line.
1066 */
1067 if (sk->sk_state == TCP_ESTABLISHED)
1068 sock_rps_record_flow_hash(sk->sk_rxhash);
1069 }
1070 #endif
1071 }
1072
sock_rps_save_rxhash(struct sock * sk,const struct sk_buff * skb)1073 static inline void sock_rps_save_rxhash(struct sock *sk,
1074 const struct sk_buff *skb)
1075 {
1076 #ifdef CONFIG_RPS
1077 if (unlikely(sk->sk_rxhash != skb->hash))
1078 sk->sk_rxhash = skb->hash;
1079 #endif
1080 }
1081
sock_rps_reset_rxhash(struct sock * sk)1082 static inline void sock_rps_reset_rxhash(struct sock *sk)
1083 {
1084 #ifdef CONFIG_RPS
1085 sk->sk_rxhash = 0;
1086 #endif
1087 }
1088
1089 #define sk_wait_event(__sk, __timeo, __condition, __wait) \
1090 ({ int __rc; \
1091 release_sock(__sk); \
1092 __rc = __condition; \
1093 if (!__rc) { \
1094 *(__timeo) = wait_woken(__wait, \
1095 TASK_INTERRUPTIBLE, \
1096 *(__timeo)); \
1097 } \
1098 sched_annotate_sleep(); \
1099 lock_sock(__sk); \
1100 __rc = __condition; \
1101 __rc; \
1102 })
1103
1104 int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
1105 int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
1106 void sk_stream_wait_close(struct sock *sk, long timeo_p);
1107 int sk_stream_error(struct sock *sk, int flags, int err);
1108 void sk_stream_kill_queues(struct sock *sk);
1109 void sk_set_memalloc(struct sock *sk);
1110 void sk_clear_memalloc(struct sock *sk);
1111
1112 void __sk_flush_backlog(struct sock *sk);
1113
sk_flush_backlog(struct sock * sk)1114 static inline bool sk_flush_backlog(struct sock *sk)
1115 {
1116 if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
1117 __sk_flush_backlog(sk);
1118 return true;
1119 }
1120 return false;
1121 }
1122
1123 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
1124
1125 struct request_sock_ops;
1126 struct timewait_sock_ops;
1127 struct inet_hashinfo;
1128 struct raw_hashinfo;
1129 struct smc_hashinfo;
1130 struct module;
1131 struct sk_psock;
1132
1133 /*
1134 * caches using SLAB_TYPESAFE_BY_RCU should let .next pointer from nulls nodes
1135 * un-modified. Special care is taken when initializing object to zero.
1136 */
sk_prot_clear_nulls(struct sock * sk,int size)1137 static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1138 {
1139 if (offsetof(struct sock, sk_node.next) != 0)
1140 memset(sk, 0, offsetof(struct sock, sk_node.next));
1141 memset(&sk->sk_node.pprev, 0,
1142 size - offsetof(struct sock, sk_node.pprev));
1143 }
1144
1145 /* Networking protocol blocks we attach to sockets.
1146 * socket layer -> transport layer interface
1147 */
1148 struct proto {
1149 void (*close)(struct sock *sk,
1150 long timeout);
1151 int (*pre_connect)(struct sock *sk,
1152 struct sockaddr *uaddr,
1153 int addr_len);
1154 int (*connect)(struct sock *sk,
1155 struct sockaddr *uaddr,
1156 int addr_len);
1157 int (*disconnect)(struct sock *sk, int flags);
1158
1159 struct sock * (*accept)(struct sock *sk, int flags, int *err,
1160 bool kern);
1161
1162 int (*ioctl)(struct sock *sk, int cmd,
1163 unsigned long arg);
1164 int (*init)(struct sock *sk);
1165 void (*destroy)(struct sock *sk);
1166 void (*shutdown)(struct sock *sk, int how);
1167 int (*setsockopt)(struct sock *sk, int level,
1168 int optname, sockptr_t optval,
1169 unsigned int optlen);
1170 int (*getsockopt)(struct sock *sk, int level,
1171 int optname, char __user *optval,
1172 int __user *option);
1173 void (*keepalive)(struct sock *sk, int valbool);
1174 #ifdef CONFIG_COMPAT
1175 int (*compat_ioctl)(struct sock *sk,
1176 unsigned int cmd, unsigned long arg);
1177 #endif
1178 int (*sendmsg)(struct sock *sk, struct msghdr *msg,
1179 size_t len);
1180 int (*recvmsg)(struct sock *sk, struct msghdr *msg,
1181 size_t len, int noblock, int flags,
1182 int *addr_len);
1183 int (*sendpage)(struct sock *sk, struct page *page,
1184 int offset, size_t size, int flags);
1185 int (*bind)(struct sock *sk,
1186 struct sockaddr *addr, int addr_len);
1187 int (*bind_add)(struct sock *sk,
1188 struct sockaddr *addr, int addr_len);
1189
1190 int (*backlog_rcv) (struct sock *sk,
1191 struct sk_buff *skb);
1192 bool (*bpf_bypass_getsockopt)(int level,
1193 int optname);
1194
1195 void (*release_cb)(struct sock *sk);
1196
1197 /* Keeping track of sk's, looking them up, and port selection methods. */
1198 int (*hash)(struct sock *sk);
1199 void (*unhash)(struct sock *sk);
1200 void (*rehash)(struct sock *sk);
1201 int (*get_port)(struct sock *sk, unsigned short snum);
1202 #ifdef CONFIG_BPF_SYSCALL
1203 int (*psock_update_sk_prot)(struct sock *sk,
1204 struct sk_psock *psock,
1205 bool restore);
1206 #endif
1207
1208 /* Keeping track of sockets in use */
1209 #ifdef CONFIG_PROC_FS
1210 unsigned int inuse_idx;
1211 #endif
1212
1213 int (*forward_alloc_get)(const struct sock *sk);
1214
1215 bool (*stream_memory_free)(const struct sock *sk, int wake);
1216 bool (*sock_is_readable)(struct sock *sk);
1217 /* Memory pressure */
1218 void (*enter_memory_pressure)(struct sock *sk);
1219 void (*leave_memory_pressure)(struct sock *sk);
1220 atomic_long_t *memory_allocated; /* Current allocated memory. */
1221 struct percpu_counter *sockets_allocated; /* Current number of sockets. */
1222
1223 /*
1224 * Pressure flag: try to collapse.
1225 * Technical note: it is used by multiple contexts non atomically.
1226 * All the __sk_mem_schedule() is of this nature: accounting
1227 * is strict, actions are advisory and have some latency.
1228 */
1229 unsigned long *memory_pressure;
1230 long *sysctl_mem;
1231
1232 int *sysctl_wmem;
1233 int *sysctl_rmem;
1234 u32 sysctl_wmem_offset;
1235 u32 sysctl_rmem_offset;
1236
1237 int max_header;
1238 bool no_autobind;
1239
1240 struct kmem_cache *slab;
1241 unsigned int obj_size;
1242 slab_flags_t slab_flags;
1243 unsigned int useroffset; /* Usercopy region offset */
1244 unsigned int usersize; /* Usercopy region size */
1245
1246 unsigned int __percpu *orphan_count;
1247
1248 struct request_sock_ops *rsk_prot;
1249 struct timewait_sock_ops *twsk_prot;
1250
1251 union {
1252 struct inet_hashinfo *hashinfo;
1253 struct udp_table *udp_table;
1254 struct raw_hashinfo *raw_hash;
1255 struct smc_hashinfo *smc_hash;
1256 } h;
1257
1258 struct module *owner;
1259
1260 char name[32];
1261
1262 struct list_head node;
1263 #ifdef SOCK_REFCNT_DEBUG
1264 atomic_t socks;
1265 #endif
1266 int (*diag_destroy)(struct sock *sk, int err);
1267 } __randomize_layout;
1268
1269 int proto_register(struct proto *prot, int alloc_slab);
1270 void proto_unregister(struct proto *prot);
1271 int sock_load_diag_module(int family, int protocol);
1272
1273 #ifdef SOCK_REFCNT_DEBUG
sk_refcnt_debug_inc(struct sock * sk)1274 static inline void sk_refcnt_debug_inc(struct sock *sk)
1275 {
1276 atomic_inc(&sk->sk_prot->socks);
1277 }
1278
sk_refcnt_debug_dec(struct sock * sk)1279 static inline void sk_refcnt_debug_dec(struct sock *sk)
1280 {
1281 atomic_dec(&sk->sk_prot->socks);
1282 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
1283 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
1284 }
1285
sk_refcnt_debug_release(const struct sock * sk)1286 static inline void sk_refcnt_debug_release(const struct sock *sk)
1287 {
1288 if (refcount_read(&sk->sk_refcnt) != 1)
1289 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
1290 sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
1291 }
1292 #else /* SOCK_REFCNT_DEBUG */
1293 #define sk_refcnt_debug_inc(sk) do { } while (0)
1294 #define sk_refcnt_debug_dec(sk) do { } while (0)
1295 #define sk_refcnt_debug_release(sk) do { } while (0)
1296 #endif /* SOCK_REFCNT_DEBUG */
1297
1298 INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
1299
sk_forward_alloc_get(const struct sock * sk)1300 static inline int sk_forward_alloc_get(const struct sock *sk)
1301 {
1302 if (!sk->sk_prot->forward_alloc_get)
1303 return sk->sk_forward_alloc;
1304
1305 return sk->sk_prot->forward_alloc_get(sk);
1306 }
1307
__sk_stream_memory_free(const struct sock * sk,int wake)1308 static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
1309 {
1310 if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
1311 return false;
1312
1313 return sk->sk_prot->stream_memory_free ?
1314 INDIRECT_CALL_INET_1(sk->sk_prot->stream_memory_free,
1315 tcp_stream_memory_free, sk, wake) : true;
1316 }
1317
sk_stream_memory_free(const struct sock * sk)1318 static inline bool sk_stream_memory_free(const struct sock *sk)
1319 {
1320 return __sk_stream_memory_free(sk, 0);
1321 }
1322
__sk_stream_is_writeable(const struct sock * sk,int wake)1323 static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake)
1324 {
1325 return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
1326 __sk_stream_memory_free(sk, wake);
1327 }
1328
sk_stream_is_writeable(const struct sock * sk)1329 static inline bool sk_stream_is_writeable(const struct sock *sk)
1330 {
1331 return __sk_stream_is_writeable(sk, 0);
1332 }
1333
sk_under_cgroup_hierarchy(struct sock * sk,struct cgroup * ancestor)1334 static inline int sk_under_cgroup_hierarchy(struct sock *sk,
1335 struct cgroup *ancestor)
1336 {
1337 #ifdef CONFIG_SOCK_CGROUP_DATA
1338 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
1339 ancestor);
1340 #else
1341 return -ENOTSUPP;
1342 #endif
1343 }
1344
sk_has_memory_pressure(const struct sock * sk)1345 static inline bool sk_has_memory_pressure(const struct sock *sk)
1346 {
1347 return sk->sk_prot->memory_pressure != NULL;
1348 }
1349
sk_under_memory_pressure(const struct sock * sk)1350 static inline bool sk_under_memory_pressure(const struct sock *sk)
1351 {
1352 if (!sk->sk_prot->memory_pressure)
1353 return false;
1354
1355 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
1356 mem_cgroup_under_socket_pressure(sk->sk_memcg))
1357 return true;
1358
1359 return !!*sk->sk_prot->memory_pressure;
1360 }
1361
1362 static inline long
sk_memory_allocated(const struct sock * sk)1363 sk_memory_allocated(const struct sock *sk)
1364 {
1365 return atomic_long_read(sk->sk_prot->memory_allocated);
1366 }
1367
1368 static inline long
sk_memory_allocated_add(struct sock * sk,int amt)1369 sk_memory_allocated_add(struct sock *sk, int amt)
1370 {
1371 return atomic_long_add_return(amt, sk->sk_prot->memory_allocated);
1372 }
1373
1374 static inline void
sk_memory_allocated_sub(struct sock * sk,int amt)1375 sk_memory_allocated_sub(struct sock *sk, int amt)
1376 {
1377 atomic_long_sub(amt, sk->sk_prot->memory_allocated);
1378 }
1379
1380 #define SK_ALLOC_PERCPU_COUNTER_BATCH 16
1381
sk_sockets_allocated_dec(struct sock * sk)1382 static inline void sk_sockets_allocated_dec(struct sock *sk)
1383 {
1384 percpu_counter_add_batch(sk->sk_prot->sockets_allocated, -1,
1385 SK_ALLOC_PERCPU_COUNTER_BATCH);
1386 }
1387
sk_sockets_allocated_inc(struct sock * sk)1388 static inline void sk_sockets_allocated_inc(struct sock *sk)
1389 {
1390 percpu_counter_add_batch(sk->sk_prot->sockets_allocated, 1,
1391 SK_ALLOC_PERCPU_COUNTER_BATCH);
1392 }
1393
1394 static inline u64
sk_sockets_allocated_read_positive(struct sock * sk)1395 sk_sockets_allocated_read_positive(struct sock *sk)
1396 {
1397 return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
1398 }
1399
1400 static inline int
proto_sockets_allocated_sum_positive(struct proto * prot)1401 proto_sockets_allocated_sum_positive(struct proto *prot)
1402 {
1403 return percpu_counter_sum_positive(prot->sockets_allocated);
1404 }
1405
1406 static inline long
proto_memory_allocated(struct proto * prot)1407 proto_memory_allocated(struct proto *prot)
1408 {
1409 return atomic_long_read(prot->memory_allocated);
1410 }
1411
1412 static inline bool
proto_memory_pressure(struct proto * prot)1413 proto_memory_pressure(struct proto *prot)
1414 {
1415 if (!prot->memory_pressure)
1416 return false;
1417 return !!*prot->memory_pressure;
1418 }
1419
1420
1421 #ifdef CONFIG_PROC_FS
1422 /* Called with local bh disabled */
1423 void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
1424 int sock_prot_inuse_get(struct net *net, struct proto *proto);
1425 int sock_inuse_get(struct net *net);
1426 #else
sock_prot_inuse_add(struct net * net,struct proto * prot,int inc)1427 static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
1428 int inc)
1429 {
1430 }
1431 #endif
1432
1433
1434 /* With per-bucket locks this operation is not-atomic, so that
1435 * this version is not worse.
1436 */
__sk_prot_rehash(struct sock * sk)1437 static inline int __sk_prot_rehash(struct sock *sk)
1438 {
1439 sk->sk_prot->unhash(sk);
1440 return sk->sk_prot->hash(sk);
1441 }
1442
1443 /* About 10 seconds */
1444 #define SOCK_DESTROY_TIME (10*HZ)
1445
1446 /* Sockets 0-1023 can't be bound to unless you are superuser */
1447 #define PROT_SOCK 1024
1448
1449 #define SHUTDOWN_MASK 3
1450 #define RCV_SHUTDOWN 1
1451 #define SEND_SHUTDOWN 2
1452
1453 #define SOCK_BINDADDR_LOCK 4
1454 #define SOCK_BINDPORT_LOCK 8
1455
1456 struct socket_alloc {
1457 struct socket socket;
1458 struct inode vfs_inode;
1459 };
1460
SOCKET_I(struct inode * inode)1461 static inline struct socket *SOCKET_I(struct inode *inode)
1462 {
1463 return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
1464 }
1465
SOCK_INODE(struct socket * socket)1466 static inline struct inode *SOCK_INODE(struct socket *socket)
1467 {
1468 return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
1469 }
1470
1471 /*
1472 * Functions for memory accounting
1473 */
1474 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
1475 int __sk_mem_schedule(struct sock *sk, int size, int kind);
1476 void __sk_mem_reduce_allocated(struct sock *sk, int amount);
1477 void __sk_mem_reclaim(struct sock *sk, int amount);
1478
1479 /* We used to have PAGE_SIZE here, but systems with 64KB pages
1480 * do not necessarily have 16x time more memory than 4KB ones.
1481 */
1482 #define SK_MEM_QUANTUM 4096
1483 #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
1484 #define SK_MEM_SEND 0
1485 #define SK_MEM_RECV 1
1486
1487 /* sysctl_mem values are in pages, we convert them in SK_MEM_QUANTUM units */
sk_prot_mem_limits(const struct sock * sk,int index)1488 static inline long sk_prot_mem_limits(const struct sock *sk, int index)
1489 {
1490 long val = sk->sk_prot->sysctl_mem[index];
1491
1492 #if PAGE_SIZE > SK_MEM_QUANTUM
1493 val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT;
1494 #elif PAGE_SIZE < SK_MEM_QUANTUM
1495 val >>= SK_MEM_QUANTUM_SHIFT - PAGE_SHIFT;
1496 #endif
1497 return val;
1498 }
1499
sk_mem_pages(int amt)1500 static inline int sk_mem_pages(int amt)
1501 {
1502 return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
1503 }
1504
sk_has_account(struct sock * sk)1505 static inline bool sk_has_account(struct sock *sk)
1506 {
1507 /* return true if protocol supports memory accounting */
1508 return !!sk->sk_prot->memory_allocated;
1509 }
1510
sk_wmem_schedule(struct sock * sk,int size)1511 static inline bool sk_wmem_schedule(struct sock *sk, int size)
1512 {
1513 if (!sk_has_account(sk))
1514 return true;
1515 return size <= sk->sk_forward_alloc ||
1516 __sk_mem_schedule(sk, size, SK_MEM_SEND);
1517 }
1518
1519 static inline bool
sk_rmem_schedule(struct sock * sk,struct sk_buff * skb,int size)1520 sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
1521 {
1522 if (!sk_has_account(sk))
1523 return true;
1524 return size <= sk->sk_forward_alloc ||
1525 __sk_mem_schedule(sk, size, SK_MEM_RECV) ||
1526 skb_pfmemalloc(skb);
1527 }
1528
sk_unused_reserved_mem(const struct sock * sk)1529 static inline int sk_unused_reserved_mem(const struct sock *sk)
1530 {
1531 int unused_mem;
1532
1533 if (likely(!sk->sk_reserved_mem))
1534 return 0;
1535
1536 unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued -
1537 atomic_read(&sk->sk_rmem_alloc);
1538
1539 return unused_mem > 0 ? unused_mem : 0;
1540 }
1541
sk_mem_reclaim(struct sock * sk)1542 static inline void sk_mem_reclaim(struct sock *sk)
1543 {
1544 int reclaimable;
1545
1546 if (!sk_has_account(sk))
1547 return;
1548
1549 reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
1550
1551 if (reclaimable >= SK_MEM_QUANTUM)
1552 __sk_mem_reclaim(sk, reclaimable);
1553 }
1554
sk_mem_reclaim_final(struct sock * sk)1555 static inline void sk_mem_reclaim_final(struct sock *sk)
1556 {
1557 sk->sk_reserved_mem = 0;
1558 sk_mem_reclaim(sk);
1559 }
1560
sk_mem_reclaim_partial(struct sock * sk)1561 static inline void sk_mem_reclaim_partial(struct sock *sk)
1562 {
1563 int reclaimable;
1564
1565 if (!sk_has_account(sk))
1566 return;
1567
1568 reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
1569
1570 if (reclaimable > SK_MEM_QUANTUM)
1571 __sk_mem_reclaim(sk, reclaimable - 1);
1572 }
1573
sk_mem_charge(struct sock * sk,int size)1574 static inline void sk_mem_charge(struct sock *sk, int size)
1575 {
1576 if (!sk_has_account(sk))
1577 return;
1578 sk->sk_forward_alloc -= size;
1579 }
1580
1581 /* the following macros control memory reclaiming in sk_mem_uncharge()
1582 */
1583 #define SK_RECLAIM_THRESHOLD (1 << 21)
1584 #define SK_RECLAIM_CHUNK (1 << 20)
1585
sk_mem_uncharge(struct sock * sk,int size)1586 static inline void sk_mem_uncharge(struct sock *sk, int size)
1587 {
1588 int reclaimable;
1589
1590 if (!sk_has_account(sk))
1591 return;
1592 sk->sk_forward_alloc += size;
1593 reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
1594
1595 /* Avoid a possible overflow.
1596 * TCP send queues can make this happen, if sk_mem_reclaim()
1597 * is not called and more than 2 GBytes are released at once.
1598 *
1599 * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
1600 * no need to hold that much forward allocation anyway.
1601 */
1602 if (unlikely(reclaimable >= SK_RECLAIM_THRESHOLD))
1603 __sk_mem_reclaim(sk, SK_RECLAIM_CHUNK);
1604 }
1605
sock_release_ownership(struct sock * sk)1606 static inline void sock_release_ownership(struct sock *sk)
1607 {
1608 if (sk->sk_lock.owned) {
1609 sk->sk_lock.owned = 0;
1610
1611 /* The sk_lock has mutex_unlock() semantics: */
1612 mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
1613 }
1614 }
1615
1616 /*
1617 * Macro so as to not evaluate some arguments when
1618 * lockdep is not enabled.
1619 *
1620 * Mark both the sk_lock and the sk_lock.slock as a
1621 * per-address-family lock class.
1622 */
1623 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
1624 do { \
1625 sk->sk_lock.owned = 0; \
1626 init_waitqueue_head(&sk->sk_lock.wq); \
1627 spin_lock_init(&(sk)->sk_lock.slock); \
1628 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
1629 sizeof((sk)->sk_lock)); \
1630 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
1631 (skey), (sname)); \
1632 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
1633 } while (0)
1634
lockdep_sock_is_held(const struct sock * sk)1635 static inline bool lockdep_sock_is_held(const struct sock *sk)
1636 {
1637 return lockdep_is_held(&sk->sk_lock) ||
1638 lockdep_is_held(&sk->sk_lock.slock);
1639 }
1640
1641 void lock_sock_nested(struct sock *sk, int subclass);
1642
lock_sock(struct sock * sk)1643 static inline void lock_sock(struct sock *sk)
1644 {
1645 lock_sock_nested(sk, 0);
1646 }
1647
1648 void __lock_sock(struct sock *sk);
1649 void __release_sock(struct sock *sk);
1650 void release_sock(struct sock *sk);
1651
1652 /* BH context may only use the following locking interface. */
1653 #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
1654 #define bh_lock_sock_nested(__sk) \
1655 spin_lock_nested(&((__sk)->sk_lock.slock), \
1656 SINGLE_DEPTH_NESTING)
1657 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
1658
1659 bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
1660
1661 /**
1662 * lock_sock_fast - fast version of lock_sock
1663 * @sk: socket
1664 *
1665 * This version should be used for very small section, where process wont block
1666 * return false if fast path is taken:
1667 *
1668 * sk_lock.slock locked, owned = 0, BH disabled
1669 *
1670 * return true if slow path is taken:
1671 *
1672 * sk_lock.slock unlocked, owned = 1, BH enabled
1673 */
lock_sock_fast(struct sock * sk)1674 static inline bool lock_sock_fast(struct sock *sk)
1675 {
1676 /* The sk_lock has mutex_lock() semantics here. */
1677 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
1678
1679 return __lock_sock_fast(sk);
1680 }
1681
1682 /* fast socket lock variant for caller already holding a [different] socket lock */
lock_sock_fast_nested(struct sock * sk)1683 static inline bool lock_sock_fast_nested(struct sock *sk)
1684 {
1685 mutex_acquire(&sk->sk_lock.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
1686
1687 return __lock_sock_fast(sk);
1688 }
1689
1690 /**
1691 * unlock_sock_fast - complement of lock_sock_fast
1692 * @sk: socket
1693 * @slow: slow mode
1694 *
1695 * fast unlock socket for user context.
1696 * If slow mode is on, we call regular release_sock()
1697 */
unlock_sock_fast(struct sock * sk,bool slow)1698 static inline void unlock_sock_fast(struct sock *sk, bool slow)
1699 __releases(&sk->sk_lock.slock)
1700 {
1701 if (slow) {
1702 release_sock(sk);
1703 __release(&sk->sk_lock.slock);
1704 } else {
1705 mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
1706 spin_unlock_bh(&sk->sk_lock.slock);
1707 }
1708 }
1709
1710 /* Used by processes to "lock" a socket state, so that
1711 * interrupts and bottom half handlers won't change it
1712 * from under us. It essentially blocks any incoming
1713 * packets, so that we won't get any new data or any
1714 * packets that change the state of the socket.
1715 *
1716 * While locked, BH processing will add new packets to
1717 * the backlog queue. This queue is processed by the
1718 * owner of the socket lock right before it is released.
1719 *
1720 * Since ~2.3.5 it is also exclusive sleep lock serializing
1721 * accesses from user process context.
1722 */
1723
sock_owned_by_me(const struct sock * sk)1724 static inline void sock_owned_by_me(const struct sock *sk)
1725 {
1726 #ifdef CONFIG_LOCKDEP
1727 WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
1728 #endif
1729 }
1730
sock_owned_by_user(const struct sock * sk)1731 static inline bool sock_owned_by_user(const struct sock *sk)
1732 {
1733 sock_owned_by_me(sk);
1734 return sk->sk_lock.owned;
1735 }
1736
sock_owned_by_user_nocheck(const struct sock * sk)1737 static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
1738 {
1739 return sk->sk_lock.owned;
1740 }
1741
1742 /* no reclassification while locks are held */
sock_allow_reclassification(const struct sock * csk)1743 static inline bool sock_allow_reclassification(const struct sock *csk)
1744 {
1745 struct sock *sk = (struct sock *)csk;
1746
1747 return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
1748 }
1749
1750 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1751 struct proto *prot, int kern);
1752 void sk_free(struct sock *sk);
1753 void sk_destruct(struct sock *sk);
1754 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1755 void sk_free_unlock_clone(struct sock *sk);
1756
1757 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1758 gfp_t priority);
1759 void __sock_wfree(struct sk_buff *skb);
1760 void sock_wfree(struct sk_buff *skb);
1761 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
1762 gfp_t priority);
1763 void skb_orphan_partial(struct sk_buff *skb);
1764 void sock_rfree(struct sk_buff *skb);
1765 void sock_efree(struct sk_buff *skb);
1766 #ifdef CONFIG_INET
1767 void sock_edemux(struct sk_buff *skb);
1768 void sock_pfree(struct sk_buff *skb);
1769 #else
1770 #define sock_edemux sock_efree
1771 #endif
1772
1773 int sock_setsockopt(struct socket *sock, int level, int op,
1774 sockptr_t optval, unsigned int optlen);
1775
1776 int sock_getsockopt(struct socket *sock, int level, int op,
1777 char __user *optval, int __user *optlen);
1778 int sock_gettstamp(struct socket *sock, void __user *userstamp,
1779 bool timeval, bool time32);
1780 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1781 int noblock, int *errcode);
1782 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1783 unsigned long data_len, int noblock,
1784 int *errcode, int max_page_order);
1785 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1786 void sock_kfree_s(struct sock *sk, void *mem, int size);
1787 void sock_kzfree_s(struct sock *sk, void *mem, int size);
1788 void sk_send_sigurg(struct sock *sk);
1789
1790 struct sockcm_cookie {
1791 u64 transmit_time;
1792 u32 mark;
1793 u16 tsflags;
1794 };
1795
sockcm_init(struct sockcm_cookie * sockc,const struct sock * sk)1796 static inline void sockcm_init(struct sockcm_cookie *sockc,
1797 const struct sock *sk)
1798 {
1799 *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
1800 }
1801
1802 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1803 struct sockcm_cookie *sockc);
1804 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1805 struct sockcm_cookie *sockc);
1806
1807 /*
1808 * Functions to fill in entries in struct proto_ops when a protocol
1809 * does not implement a particular function.
1810 */
1811 int sock_no_bind(struct socket *, struct sockaddr *, int);
1812 int sock_no_connect(struct socket *, struct sockaddr *, int, int);
1813 int sock_no_socketpair(struct socket *, struct socket *);
1814 int sock_no_accept(struct socket *, struct socket *, int, bool);
1815 int sock_no_getname(struct socket *, struct sockaddr *, int);
1816 int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
1817 int sock_no_listen(struct socket *, int);
1818 int sock_no_shutdown(struct socket *, int);
1819 int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
1820 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
1821 int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
1822 int sock_no_mmap(struct file *file, struct socket *sock,
1823 struct vm_area_struct *vma);
1824 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
1825 size_t size, int flags);
1826 ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
1827 int offset, size_t size, int flags);
1828
1829 /*
1830 * Functions to fill in entries in struct proto_ops when a protocol
1831 * uses the inet style.
1832 */
1833 int sock_common_getsockopt(struct socket *sock, int level, int optname,
1834 char __user *optval, int __user *optlen);
1835 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1836 int flags);
1837 int sock_common_setsockopt(struct socket *sock, int level, int optname,
1838 sockptr_t optval, unsigned int optlen);
1839
1840 void sk_common_release(struct sock *sk);
1841
1842 /*
1843 * Default socket callbacks and setup code
1844 */
1845
1846 /* Initialise core socket variables */
1847 void sock_init_data(struct socket *sock, struct sock *sk);
1848
1849 /*
1850 * Socket reference counting postulates.
1851 *
1852 * * Each user of socket SHOULD hold a reference count.
1853 * * Each access point to socket (an hash table bucket, reference from a list,
1854 * running timer, skb in flight MUST hold a reference count.
1855 * * When reference count hits 0, it means it will never increase back.
1856 * * When reference count hits 0, it means that no references from
1857 * outside exist to this socket and current process on current CPU
1858 * is last user and may/should destroy this socket.
1859 * * sk_free is called from any context: process, BH, IRQ. When
1860 * it is called, socket has no references from outside -> sk_free
1861 * may release descendant resources allocated by the socket, but
1862 * to the time when it is called, socket is NOT referenced by any
1863 * hash tables, lists etc.
1864 * * Packets, delivered from outside (from network or from another process)
1865 * and enqueued on receive/error queues SHOULD NOT grab reference count,
1866 * when they sit in queue. Otherwise, packets will leak to hole, when
1867 * socket is looked up by one cpu and unhasing is made by another CPU.
1868 * It is true for udp/raw, netlink (leak to receive and error queues), tcp
1869 * (leak to backlog). Packet socket does all the processing inside
1870 * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
1871 * use separate SMP lock, so that they are prone too.
1872 */
1873
1874 /* Ungrab socket and destroy it, if it was the last reference. */
sock_put(struct sock * sk)1875 static inline void sock_put(struct sock *sk)
1876 {
1877 if (refcount_dec_and_test(&sk->sk_refcnt))
1878 sk_free(sk);
1879 }
1880 /* Generic version of sock_put(), dealing with all sockets
1881 * (TCP_TIMEWAIT, TCP_NEW_SYN_RECV, ESTABLISHED...)
1882 */
1883 void sock_gen_put(struct sock *sk);
1884
1885 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1886 unsigned int trim_cap, bool refcounted);
sk_receive_skb(struct sock * sk,struct sk_buff * skb,const int nested)1887 static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1888 const int nested)
1889 {
1890 return __sk_receive_skb(sk, skb, nested, 1, true);
1891 }
1892
sk_tx_queue_set(struct sock * sk,int tx_queue)1893 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1894 {
1895 /* sk_tx_queue_mapping accept only upto a 16-bit value */
1896 if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
1897 return;
1898 sk->sk_tx_queue_mapping = tx_queue;
1899 }
1900
1901 #define NO_QUEUE_MAPPING USHRT_MAX
1902
sk_tx_queue_clear(struct sock * sk)1903 static inline void sk_tx_queue_clear(struct sock *sk)
1904 {
1905 sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
1906 }
1907
sk_tx_queue_get(const struct sock * sk)1908 static inline int sk_tx_queue_get(const struct sock *sk)
1909 {
1910 if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
1911 return sk->sk_tx_queue_mapping;
1912
1913 return -1;
1914 }
1915
__sk_rx_queue_set(struct sock * sk,const struct sk_buff * skb,bool force_set)1916 static inline void __sk_rx_queue_set(struct sock *sk,
1917 const struct sk_buff *skb,
1918 bool force_set)
1919 {
1920 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
1921 if (skb_rx_queue_recorded(skb)) {
1922 u16 rx_queue = skb_get_rx_queue(skb);
1923
1924 if (force_set ||
1925 unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
1926 WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue);
1927 }
1928 #endif
1929 }
1930
sk_rx_queue_set(struct sock * sk,const struct sk_buff * skb)1931 static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
1932 {
1933 __sk_rx_queue_set(sk, skb, true);
1934 }
1935
sk_rx_queue_update(struct sock * sk,const struct sk_buff * skb)1936 static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb)
1937 {
1938 __sk_rx_queue_set(sk, skb, false);
1939 }
1940
sk_rx_queue_clear(struct sock * sk)1941 static inline void sk_rx_queue_clear(struct sock *sk)
1942 {
1943 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
1944 WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING);
1945 #endif
1946 }
1947
sk_rx_queue_get(const struct sock * sk)1948 static inline int sk_rx_queue_get(const struct sock *sk)
1949 {
1950 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
1951 if (sk) {
1952 int res = READ_ONCE(sk->sk_rx_queue_mapping);
1953
1954 if (res != NO_QUEUE_MAPPING)
1955 return res;
1956 }
1957 #endif
1958
1959 return -1;
1960 }
1961
sk_set_socket(struct sock * sk,struct socket * sock)1962 static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1963 {
1964 sk->sk_socket = sock;
1965 }
1966
sk_sleep(struct sock * sk)1967 static inline wait_queue_head_t *sk_sleep(struct sock *sk)
1968 {
1969 BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
1970 return &rcu_dereference_raw(sk->sk_wq)->wait;
1971 }
1972 /* Detach socket from process context.
1973 * Announce socket dead, detach it from wait queue and inode.
1974 * Note that parent inode held reference count on this struct sock,
1975 * we do not release it in this function, because protocol
1976 * probably wants some additional cleanups or even continuing
1977 * to work with this socket (TCP).
1978 */
sock_orphan(struct sock * sk)1979 static inline void sock_orphan(struct sock *sk)
1980 {
1981 write_lock_bh(&sk->sk_callback_lock);
1982 sock_set_flag(sk, SOCK_DEAD);
1983 sk_set_socket(sk, NULL);
1984 sk->sk_wq = NULL;
1985 write_unlock_bh(&sk->sk_callback_lock);
1986 }
1987
sock_graft(struct sock * sk,struct socket * parent)1988 static inline void sock_graft(struct sock *sk, struct socket *parent)
1989 {
1990 WARN_ON(parent->sk);
1991 write_lock_bh(&sk->sk_callback_lock);
1992 rcu_assign_pointer(sk->sk_wq, &parent->wq);
1993 parent->sk = sk;
1994 sk_set_socket(sk, parent);
1995 sk->sk_uid = SOCK_INODE(parent)->i_uid;
1996 security_sock_graft(sk, parent);
1997 write_unlock_bh(&sk->sk_callback_lock);
1998 }
1999
2000 kuid_t sock_i_uid(struct sock *sk);
2001 unsigned long sock_i_ino(struct sock *sk);
2002
sock_net_uid(const struct net * net,const struct sock * sk)2003 static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
2004 {
2005 return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
2006 }
2007
net_tx_rndhash(void)2008 static inline u32 net_tx_rndhash(void)
2009 {
2010 u32 v = prandom_u32();
2011
2012 return v ?: 1;
2013 }
2014
sk_set_txhash(struct sock * sk)2015 static inline void sk_set_txhash(struct sock *sk)
2016 {
2017 /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
2018 WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
2019 }
2020
sk_rethink_txhash(struct sock * sk)2021 static inline bool sk_rethink_txhash(struct sock *sk)
2022 {
2023 if (sk->sk_txhash) {
2024 sk_set_txhash(sk);
2025 return true;
2026 }
2027 return false;
2028 }
2029
2030 static inline struct dst_entry *
__sk_dst_get(struct sock * sk)2031 __sk_dst_get(struct sock *sk)
2032 {
2033 return rcu_dereference_check(sk->sk_dst_cache,
2034 lockdep_sock_is_held(sk));
2035 }
2036
2037 static inline struct dst_entry *
sk_dst_get(struct sock * sk)2038 sk_dst_get(struct sock *sk)
2039 {
2040 struct dst_entry *dst;
2041
2042 rcu_read_lock();
2043 dst = rcu_dereference(sk->sk_dst_cache);
2044 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
2045 dst = NULL;
2046 rcu_read_unlock();
2047 return dst;
2048 }
2049
__dst_negative_advice(struct sock * sk)2050 static inline void __dst_negative_advice(struct sock *sk)
2051 {
2052 struct dst_entry *ndst, *dst = __sk_dst_get(sk);
2053
2054 if (dst && dst->ops->negative_advice) {
2055 ndst = dst->ops->negative_advice(dst);
2056
2057 if (ndst != dst) {
2058 rcu_assign_pointer(sk->sk_dst_cache, ndst);
2059 sk_tx_queue_clear(sk);
2060 sk->sk_dst_pending_confirm = 0;
2061 }
2062 }
2063 }
2064
dst_negative_advice(struct sock * sk)2065 static inline void dst_negative_advice(struct sock *sk)
2066 {
2067 sk_rethink_txhash(sk);
2068 __dst_negative_advice(sk);
2069 }
2070
2071 static inline void
__sk_dst_set(struct sock * sk,struct dst_entry * dst)2072 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
2073 {
2074 struct dst_entry *old_dst;
2075
2076 sk_tx_queue_clear(sk);
2077 sk->sk_dst_pending_confirm = 0;
2078 old_dst = rcu_dereference_protected(sk->sk_dst_cache,
2079 lockdep_sock_is_held(sk));
2080 rcu_assign_pointer(sk->sk_dst_cache, dst);
2081 dst_release(old_dst);
2082 }
2083
2084 static inline void
sk_dst_set(struct sock * sk,struct dst_entry * dst)2085 sk_dst_set(struct sock *sk, struct dst_entry *dst)
2086 {
2087 struct dst_entry *old_dst;
2088
2089 sk_tx_queue_clear(sk);
2090 sk->sk_dst_pending_confirm = 0;
2091 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
2092 dst_release(old_dst);
2093 }
2094
2095 static inline void
__sk_dst_reset(struct sock * sk)2096 __sk_dst_reset(struct sock *sk)
2097 {
2098 __sk_dst_set(sk, NULL);
2099 }
2100
2101 static inline void
sk_dst_reset(struct sock * sk)2102 sk_dst_reset(struct sock *sk)
2103 {
2104 sk_dst_set(sk, NULL);
2105 }
2106
2107 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
2108
2109 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
2110
sk_dst_confirm(struct sock * sk)2111 static inline void sk_dst_confirm(struct sock *sk)
2112 {
2113 if (!READ_ONCE(sk->sk_dst_pending_confirm))
2114 WRITE_ONCE(sk->sk_dst_pending_confirm, 1);
2115 }
2116
sock_confirm_neigh(struct sk_buff * skb,struct neighbour * n)2117 static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
2118 {
2119 if (skb_get_dst_pending_confirm(skb)) {
2120 struct sock *sk = skb->sk;
2121 unsigned long now = jiffies;
2122
2123 /* avoid dirtying neighbour */
2124 if (READ_ONCE(n->confirmed) != now)
2125 WRITE_ONCE(n->confirmed, now);
2126 if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
2127 WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
2128 }
2129 }
2130
2131 bool sk_mc_loop(struct sock *sk);
2132
sk_can_gso(const struct sock * sk)2133 static inline bool sk_can_gso(const struct sock *sk)
2134 {
2135 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
2136 }
2137
2138 void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
2139
sk_nocaps_add(struct sock * sk,netdev_features_t flags)2140 static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
2141 {
2142 sk->sk_route_nocaps |= flags;
2143 sk->sk_route_caps &= ~flags;
2144 }
2145
skb_do_copy_data_nocache(struct sock * sk,struct sk_buff * skb,struct iov_iter * from,char * to,int copy,int offset)2146 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
2147 struct iov_iter *from, char *to,
2148 int copy, int offset)
2149 {
2150 if (skb->ip_summed == CHECKSUM_NONE) {
2151 __wsum csum = 0;
2152 if (!csum_and_copy_from_iter_full(to, copy, &csum, from))
2153 return -EFAULT;
2154 skb->csum = csum_block_add(skb->csum, csum, offset);
2155 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
2156 if (!copy_from_iter_full_nocache(to, copy, from))
2157 return -EFAULT;
2158 } else if (!copy_from_iter_full(to, copy, from))
2159 return -EFAULT;
2160
2161 return 0;
2162 }
2163
skb_add_data_nocache(struct sock * sk,struct sk_buff * skb,struct iov_iter * from,int copy)2164 static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
2165 struct iov_iter *from, int copy)
2166 {
2167 int err, offset = skb->len;
2168
2169 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
2170 copy, offset);
2171 if (err)
2172 __skb_trim(skb, offset);
2173
2174 return err;
2175 }
2176
skb_copy_to_page_nocache(struct sock * sk,struct iov_iter * from,struct sk_buff * skb,struct page * page,int off,int copy)2177 static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
2178 struct sk_buff *skb,
2179 struct page *page,
2180 int off, int copy)
2181 {
2182 int err;
2183
2184 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
2185 copy, skb->len);
2186 if (err)
2187 return err;
2188
2189 skb->len += copy;
2190 skb->data_len += copy;
2191 skb->truesize += copy;
2192 sk_wmem_queued_add(sk, copy);
2193 sk_mem_charge(sk, copy);
2194 return 0;
2195 }
2196
2197 /**
2198 * sk_wmem_alloc_get - returns write allocations
2199 * @sk: socket
2200 *
2201 * Return: sk_wmem_alloc minus initial offset of one
2202 */
sk_wmem_alloc_get(const struct sock * sk)2203 static inline int sk_wmem_alloc_get(const struct sock *sk)
2204 {
2205 return refcount_read(&sk->sk_wmem_alloc) - 1;
2206 }
2207
2208 /**
2209 * sk_rmem_alloc_get - returns read allocations
2210 * @sk: socket
2211 *
2212 * Return: sk_rmem_alloc
2213 */
sk_rmem_alloc_get(const struct sock * sk)2214 static inline int sk_rmem_alloc_get(const struct sock *sk)
2215 {
2216 return atomic_read(&sk->sk_rmem_alloc);
2217 }
2218
2219 /**
2220 * sk_has_allocations - check if allocations are outstanding
2221 * @sk: socket
2222 *
2223 * Return: true if socket has write or read allocations
2224 */
sk_has_allocations(const struct sock * sk)2225 static inline bool sk_has_allocations(const struct sock *sk)
2226 {
2227 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
2228 }
2229
2230 /**
2231 * skwq_has_sleeper - check if there are any waiting processes
2232 * @wq: struct socket_wq
2233 *
2234 * Return: true if socket_wq has waiting processes
2235 *
2236 * The purpose of the skwq_has_sleeper and sock_poll_wait is to wrap the memory
2237 * barrier call. They were added due to the race found within the tcp code.
2238 *
2239 * Consider following tcp code paths::
2240 *
2241 * CPU1 CPU2
2242 * sys_select receive packet
2243 * ... ...
2244 * __add_wait_queue update tp->rcv_nxt
2245 * ... ...
2246 * tp->rcv_nxt check sock_def_readable
2247 * ... {
2248 * schedule rcu_read_lock();
2249 * wq = rcu_dereference(sk->sk_wq);
2250 * if (wq && waitqueue_active(&wq->wait))
2251 * wake_up_interruptible(&wq->wait)
2252 * ...
2253 * }
2254 *
2255 * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
2256 * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1
2257 * could then endup calling schedule and sleep forever if there are no more
2258 * data on the socket.
2259 *
2260 */
skwq_has_sleeper(struct socket_wq * wq)2261 static inline bool skwq_has_sleeper(struct socket_wq *wq)
2262 {
2263 return wq && wq_has_sleeper(&wq->wait);
2264 }
2265
2266 /**
2267 * sock_poll_wait - place memory barrier behind the poll_wait call.
2268 * @filp: file
2269 * @sock: socket to wait on
2270 * @p: poll_table
2271 *
2272 * See the comments in the wq_has_sleeper function.
2273 */
sock_poll_wait(struct file * filp,struct socket * sock,poll_table * p)2274 static inline void sock_poll_wait(struct file *filp, struct socket *sock,
2275 poll_table *p)
2276 {
2277 if (!poll_does_not_wait(p)) {
2278 poll_wait(filp, &sock->wq.wait, p);
2279 /* We need to be sure we are in sync with the
2280 * socket flags modification.
2281 *
2282 * This memory barrier is paired in the wq_has_sleeper.
2283 */
2284 smp_mb();
2285 }
2286 }
2287
skb_set_hash_from_sk(struct sk_buff * skb,struct sock * sk)2288 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
2289 {
2290 /* This pairs with WRITE_ONCE() in sk_set_txhash() */
2291 u32 txhash = READ_ONCE(sk->sk_txhash);
2292
2293 if (txhash) {
2294 skb->l4_hash = 1;
2295 skb->hash = txhash;
2296 }
2297 }
2298
2299 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
2300
2301 /*
2302 * Queue a received datagram if it will fit. Stream and sequenced
2303 * protocols can't normally use this as they need to fit buffers in
2304 * and play with them.
2305 *
2306 * Inlined as it's very short and called for pretty much every
2307 * packet ever received.
2308 */
skb_set_owner_r(struct sk_buff * skb,struct sock * sk)2309 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
2310 {
2311 skb_orphan(skb);
2312 skb->sk = sk;
2313 skb->destructor = sock_rfree;
2314 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2315 sk_mem_charge(sk, skb->truesize);
2316 }
2317
skb_set_owner_sk_safe(struct sk_buff * skb,struct sock * sk)2318 static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
2319 {
2320 if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
2321 skb_orphan(skb);
2322 skb->destructor = sock_efree;
2323 skb->sk = sk;
2324 return true;
2325 }
2326 return false;
2327 }
2328
skb_prepare_for_gro(struct sk_buff * skb)2329 static inline void skb_prepare_for_gro(struct sk_buff *skb)
2330 {
2331 if (skb->destructor != sock_wfree) {
2332 skb_orphan(skb);
2333 return;
2334 }
2335 skb->slow_gro = 1;
2336 }
2337
2338 void sk_reset_timer(struct sock *sk, struct timer_list *timer,
2339 unsigned long expires);
2340
2341 void sk_stop_timer(struct sock *sk, struct timer_list *timer);
2342
2343 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer);
2344
2345 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
2346 struct sk_buff *skb, unsigned int flags,
2347 void (*destructor)(struct sock *sk,
2348 struct sk_buff *skb));
2349 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2350 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2351
2352 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
2353 struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
2354
2355 /*
2356 * Recover an error report and clear atomically
2357 */
2358
sock_error(struct sock * sk)2359 static inline int sock_error(struct sock *sk)
2360 {
2361 int err;
2362
2363 /* Avoid an atomic operation for the common case.
2364 * This is racy since another cpu/thread can change sk_err under us.
2365 */
2366 if (likely(data_race(!sk->sk_err)))
2367 return 0;
2368
2369 err = xchg(&sk->sk_err, 0);
2370 return -err;
2371 }
2372
2373 void sk_error_report(struct sock *sk);
2374
sock_wspace(struct sock * sk)2375 static inline unsigned long sock_wspace(struct sock *sk)
2376 {
2377 int amt = 0;
2378
2379 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
2380 amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc);
2381 if (amt < 0)
2382 amt = 0;
2383 }
2384 return amt;
2385 }
2386
2387 /* Note:
2388 * We use sk->sk_wq_raw, from contexts knowing this
2389 * pointer is not NULL and cannot disappear/change.
2390 */
sk_set_bit(int nr,struct sock * sk)2391 static inline void sk_set_bit(int nr, struct sock *sk)
2392 {
2393 if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
2394 !sock_flag(sk, SOCK_FASYNC))
2395 return;
2396
2397 set_bit(nr, &sk->sk_wq_raw->flags);
2398 }
2399
sk_clear_bit(int nr,struct sock * sk)2400 static inline void sk_clear_bit(int nr, struct sock *sk)
2401 {
2402 if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
2403 !sock_flag(sk, SOCK_FASYNC))
2404 return;
2405
2406 clear_bit(nr, &sk->sk_wq_raw->flags);
2407 }
2408
sk_wake_async(const struct sock * sk,int how,int band)2409 static inline void sk_wake_async(const struct sock *sk, int how, int band)
2410 {
2411 if (sock_flag(sk, SOCK_FASYNC)) {
2412 rcu_read_lock();
2413 sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
2414 rcu_read_unlock();
2415 }
2416 }
2417
2418 /* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might
2419 * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak.
2420 * Note: for send buffers, TCP works better if we can build two skbs at
2421 * minimum.
2422 */
2423 #define TCP_SKB_MIN_TRUESIZE (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff)))
2424
2425 #define SOCK_MIN_SNDBUF (TCP_SKB_MIN_TRUESIZE * 2)
2426 #define SOCK_MIN_RCVBUF TCP_SKB_MIN_TRUESIZE
2427
sk_stream_moderate_sndbuf(struct sock * sk)2428 static inline void sk_stream_moderate_sndbuf(struct sock *sk)
2429 {
2430 u32 val;
2431
2432 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
2433 return;
2434
2435 val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
2436 val = max_t(u32, val, sk_unused_reserved_mem(sk));
2437
2438 WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
2439 }
2440
2441 /**
2442 * sk_page_frag - return an appropriate page_frag
2443 * @sk: socket
2444 *
2445 * Use the per task page_frag instead of the per socket one for
2446 * optimization when we know that we're in process context and own
2447 * everything that's associated with %current.
2448 *
2449 * Both direct reclaim and page faults can nest inside other
2450 * socket operations and end up recursing into sk_page_frag()
2451 * while it's already in use: explicitly avoid task page_frag
2452 * usage if the caller is potentially doing any of them.
2453 * This assumes that page fault handlers use the GFP_NOFS flags.
2454 *
2455 * Return: a per task page_frag if context allows that,
2456 * otherwise a per socket one.
2457 */
sk_page_frag(struct sock * sk)2458 static inline struct page_frag *sk_page_frag(struct sock *sk)
2459 {
2460 if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) ==
2461 (__GFP_DIRECT_RECLAIM | __GFP_FS))
2462 return ¤t->task_frag;
2463
2464 return &sk->sk_frag;
2465 }
2466
2467 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2468
2469 /*
2470 * Default write policy as shown to user space via poll/select/SIGIO
2471 */
sock_writeable(const struct sock * sk)2472 static inline bool sock_writeable(const struct sock *sk)
2473 {
2474 return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
2475 }
2476
gfp_any(void)2477 static inline gfp_t gfp_any(void)
2478 {
2479 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
2480 }
2481
gfp_memcg_charge(void)2482 static inline gfp_t gfp_memcg_charge(void)
2483 {
2484 return in_softirq() ? GFP_NOWAIT : GFP_KERNEL;
2485 }
2486
sock_rcvtimeo(const struct sock * sk,bool noblock)2487 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
2488 {
2489 return noblock ? 0 : sk->sk_rcvtimeo;
2490 }
2491
sock_sndtimeo(const struct sock * sk,bool noblock)2492 static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
2493 {
2494 return noblock ? 0 : sk->sk_sndtimeo;
2495 }
2496
sock_rcvlowat(const struct sock * sk,int waitall,int len)2497 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
2498 {
2499 int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len);
2500
2501 return v ?: 1;
2502 }
2503
2504 /* Alas, with timeout socket operations are not restartable.
2505 * Compare this to poll().
2506 */
sock_intr_errno(long timeo)2507 static inline int sock_intr_errno(long timeo)
2508 {
2509 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
2510 }
2511
2512 struct sock_skb_cb {
2513 u32 dropcount;
2514 };
2515
2516 /* Store sock_skb_cb at the end of skb->cb[] so protocol families
2517 * using skb->cb[] would keep using it directly and utilize its
2518 * alignement guarantee.
2519 */
2520 #define SOCK_SKB_CB_OFFSET ((sizeof_field(struct sk_buff, cb) - \
2521 sizeof(struct sock_skb_cb)))
2522
2523 #define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
2524 SOCK_SKB_CB_OFFSET))
2525
2526 #define sock_skb_cb_check_size(size) \
2527 BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
2528
2529 static inline void
sock_skb_set_dropcount(const struct sock * sk,struct sk_buff * skb)2530 sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
2531 {
2532 SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
2533 atomic_read(&sk->sk_drops) : 0;
2534 }
2535
sk_drops_add(struct sock * sk,const struct sk_buff * skb)2536 static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
2537 {
2538 int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2539
2540 atomic_add(segs, &sk->sk_drops);
2541 }
2542
sock_read_timestamp(struct sock * sk)2543 static inline ktime_t sock_read_timestamp(struct sock *sk)
2544 {
2545 #if BITS_PER_LONG==32
2546 unsigned int seq;
2547 ktime_t kt;
2548
2549 do {
2550 seq = read_seqbegin(&sk->sk_stamp_seq);
2551 kt = sk->sk_stamp;
2552 } while (read_seqretry(&sk->sk_stamp_seq, seq));
2553
2554 return kt;
2555 #else
2556 return READ_ONCE(sk->sk_stamp);
2557 #endif
2558 }
2559
sock_write_timestamp(struct sock * sk,ktime_t kt)2560 static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
2561 {
2562 #if BITS_PER_LONG==32
2563 write_seqlock(&sk->sk_stamp_seq);
2564 sk->sk_stamp = kt;
2565 write_sequnlock(&sk->sk_stamp_seq);
2566 #else
2567 WRITE_ONCE(sk->sk_stamp, kt);
2568 #endif
2569 }
2570
2571 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2572 struct sk_buff *skb);
2573 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
2574 struct sk_buff *skb);
2575
2576 static inline void
sock_recv_timestamp(struct msghdr * msg,struct sock * sk,struct sk_buff * skb)2577 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
2578 {
2579 ktime_t kt = skb->tstamp;
2580 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
2581
2582 /*
2583 * generate control messages if
2584 * - receive time stamping in software requested
2585 * - software time stamp available and wanted
2586 * - hardware time stamps available and wanted
2587 */
2588 if (sock_flag(sk, SOCK_RCVTSTAMP) ||
2589 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
2590 (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
2591 (hwtstamps->hwtstamp &&
2592 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
2593 __sock_recv_timestamp(msg, sk, skb);
2594 else
2595 sock_write_timestamp(sk, kt);
2596
2597 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
2598 __sock_recv_wifi_status(msg, sk, skb);
2599 }
2600
2601 void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2602 struct sk_buff *skb);
2603
2604 #define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC)
sock_recv_ts_and_drops(struct msghdr * msg,struct sock * sk,struct sk_buff * skb)2605 static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2606 struct sk_buff *skb)
2607 {
2608 #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
2609 (1UL << SOCK_RCVTSTAMP))
2610 #define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
2611 SOF_TIMESTAMPING_RAW_HARDWARE)
2612
2613 if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
2614 __sock_recv_ts_and_drops(msg, sk, skb);
2615 else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
2616 sock_write_timestamp(sk, skb->tstamp);
2617 else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
2618 sock_write_timestamp(sk, 0);
2619 }
2620
2621 void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
2622
2623 /**
2624 * _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
2625 * @sk: socket sending this packet
2626 * @tsflags: timestamping flags to use
2627 * @tx_flags: completed with instructions for time stamping
2628 * @tskey: filled in with next sk_tskey (not for TCP, which uses seqno)
2629 *
2630 * Note: callers should take care of initial ``*tx_flags`` value (usually 0)
2631 */
_sock_tx_timestamp(struct sock * sk,__u16 tsflags,__u8 * tx_flags,__u32 * tskey)2632 static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2633 __u8 *tx_flags, __u32 *tskey)
2634 {
2635 if (unlikely(tsflags)) {
2636 __sock_tx_timestamp(tsflags, tx_flags);
2637 if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
2638 tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
2639 *tskey = sk->sk_tskey++;
2640 }
2641 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
2642 *tx_flags |= SKBTX_WIFI_STATUS;
2643 }
2644
sock_tx_timestamp(struct sock * sk,__u16 tsflags,__u8 * tx_flags)2645 static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2646 __u8 *tx_flags)
2647 {
2648 _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
2649 }
2650
skb_setup_tx_timestamp(struct sk_buff * skb,__u16 tsflags)2651 static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
2652 {
2653 _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
2654 &skb_shinfo(skb)->tskey);
2655 }
2656
2657 /**
2658 * sk_eat_skb - Release a skb if it is no longer needed
2659 * @sk: socket to eat this skb from
2660 * @skb: socket buffer to eat
2661 *
2662 * This routine must be called with interrupts disabled or with the socket
2663 * locked so that the sk_buff queue operation is ok.
2664 */
sk_eat_skb(struct sock * sk,struct sk_buff * skb)2665 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
2666 {
2667 __skb_unlink(skb, &sk->sk_receive_queue);
2668 __kfree_skb(skb);
2669 }
2670
2671 static inline
sock_net(const struct sock * sk)2672 struct net *sock_net(const struct sock *sk)
2673 {
2674 return read_pnet(&sk->sk_net);
2675 }
2676
2677 static inline
sock_net_set(struct sock * sk,struct net * net)2678 void sock_net_set(struct sock *sk, struct net *net)
2679 {
2680 write_pnet(&sk->sk_net, net);
2681 }
2682
2683 static inline bool
skb_sk_is_prefetched(struct sk_buff * skb)2684 skb_sk_is_prefetched(struct sk_buff *skb)
2685 {
2686 #ifdef CONFIG_INET
2687 return skb->destructor == sock_pfree;
2688 #else
2689 return false;
2690 #endif /* CONFIG_INET */
2691 }
2692
2693 /* This helper checks if a socket is a full socket,
2694 * ie _not_ a timewait or request socket.
2695 */
sk_fullsock(const struct sock * sk)2696 static inline bool sk_fullsock(const struct sock *sk)
2697 {
2698 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
2699 }
2700
2701 static inline bool
sk_is_refcounted(struct sock * sk)2702 sk_is_refcounted(struct sock *sk)
2703 {
2704 /* Only full sockets have sk->sk_flags. */
2705 return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
2706 }
2707
2708 /**
2709 * skb_steal_sock - steal a socket from an sk_buff
2710 * @skb: sk_buff to steal the socket from
2711 * @refcounted: is set to true if the socket is reference-counted
2712 */
2713 static inline struct sock *
skb_steal_sock(struct sk_buff * skb,bool * refcounted)2714 skb_steal_sock(struct sk_buff *skb, bool *refcounted)
2715 {
2716 if (skb->sk) {
2717 struct sock *sk = skb->sk;
2718
2719 *refcounted = true;
2720 if (skb_sk_is_prefetched(skb))
2721 *refcounted = sk_is_refcounted(sk);
2722 skb->destructor = NULL;
2723 skb->sk = NULL;
2724 return sk;
2725 }
2726 *refcounted = false;
2727 return NULL;
2728 }
2729
2730 /* Checks if this SKB belongs to an HW offloaded socket
2731 * and whether any SW fallbacks are required based on dev.
2732 * Check decrypted mark in case skb_orphan() cleared socket.
2733 */
sk_validate_xmit_skb(struct sk_buff * skb,struct net_device * dev)2734 static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2735 struct net_device *dev)
2736 {
2737 #ifdef CONFIG_SOCK_VALIDATE_XMIT
2738 struct sock *sk = skb->sk;
2739
2740 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
2741 skb = sk->sk_validate_xmit_skb(sk, dev, skb);
2742 #ifdef CONFIG_TLS_DEVICE
2743 } else if (unlikely(skb->decrypted)) {
2744 pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
2745 kfree_skb(skb);
2746 skb = NULL;
2747 #endif
2748 }
2749 #endif
2750
2751 return skb;
2752 }
2753
2754 /* This helper checks if a socket is a LISTEN or NEW_SYN_RECV
2755 * SYNACK messages can be attached to either ones (depending on SYNCOOKIE)
2756 */
sk_listener(const struct sock * sk)2757 static inline bool sk_listener(const struct sock *sk)
2758 {
2759 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
2760 }
2761
2762 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag);
2763 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
2764 int type);
2765
2766 bool sk_ns_capable(const struct sock *sk,
2767 struct user_namespace *user_ns, int cap);
2768 bool sk_capable(const struct sock *sk, int cap);
2769 bool sk_net_capable(const struct sock *sk, int cap);
2770
2771 void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
2772
2773 /* Take into consideration the size of the struct sk_buff overhead in the
2774 * determination of these values, since that is non-constant across
2775 * platforms. This makes socket queueing behavior and performance
2776 * not depend upon such differences.
2777 */
2778 #define _SK_MEM_PACKETS 256
2779 #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
2780 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2781 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2782
2783 extern __u32 sysctl_wmem_max;
2784 extern __u32 sysctl_rmem_max;
2785
2786 extern int sysctl_tstamp_allow_data;
2787 extern int sysctl_optmem_max;
2788
2789 extern __u32 sysctl_wmem_default;
2790 extern __u32 sysctl_rmem_default;
2791
2792 #define SKB_FRAG_PAGE_ORDER get_order(32768)
2793 DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
2794
sk_get_wmem0(const struct sock * sk,const struct proto * proto)2795 static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
2796 {
2797 /* Does this proto have per netns sysctl_wmem ? */
2798 if (proto->sysctl_wmem_offset)
2799 return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
2800
2801 return *proto->sysctl_wmem;
2802 }
2803
sk_get_rmem0(const struct sock * sk,const struct proto * proto)2804 static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
2805 {
2806 /* Does this proto have per netns sysctl_rmem ? */
2807 if (proto->sysctl_rmem_offset)
2808 return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
2809
2810 return *proto->sysctl_rmem;
2811 }
2812
2813 /* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
2814 * Some wifi drivers need to tweak it to get more chunks.
2815 * They can use this helper from their ndo_start_xmit()
2816 */
sk_pacing_shift_update(struct sock * sk,int val)2817 static inline void sk_pacing_shift_update(struct sock *sk, int val)
2818 {
2819 if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val)
2820 return;
2821 WRITE_ONCE(sk->sk_pacing_shift, val);
2822 }
2823
2824 /* if a socket is bound to a device, check that the given device
2825 * index is either the same or that the socket is bound to an L3
2826 * master device and the given device index is also enslaved to
2827 * that L3 master
2828 */
sk_dev_equal_l3scope(struct sock * sk,int dif)2829 static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
2830 {
2831 int mdif;
2832
2833 if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
2834 return true;
2835
2836 mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
2837 if (mdif && mdif == sk->sk_bound_dev_if)
2838 return true;
2839
2840 return false;
2841 }
2842
2843 void sock_def_readable(struct sock *sk);
2844
2845 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
2846 void sock_set_timestamp(struct sock *sk, int optname, bool valbool);
2847 int sock_set_timestamping(struct sock *sk, int optname,
2848 struct so_timestamping timestamping);
2849
2850 void sock_enable_timestamps(struct sock *sk);
2851 void sock_no_linger(struct sock *sk);
2852 void sock_set_keepalive(struct sock *sk);
2853 void sock_set_priority(struct sock *sk, u32 priority);
2854 void sock_set_rcvbuf(struct sock *sk, int val);
2855 void sock_set_mark(struct sock *sk, u32 val);
2856 void sock_set_reuseaddr(struct sock *sk);
2857 void sock_set_reuseport(struct sock *sk);
2858 void sock_set_sndtimeo(struct sock *sk, s64 secs);
2859
2860 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
2861
2862 int sock_get_timeout(long timeo, void *optval, bool old_timeval);
2863 int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
2864 sockptr_t optval, int optlen, bool old_timeval);
2865
sk_is_readable(struct sock * sk)2866 static inline bool sk_is_readable(struct sock *sk)
2867 {
2868 if (sk->sk_prot->sock_is_readable)
2869 return sk->sk_prot->sock_is_readable(sk);
2870 return false;
2871 }
2872 #endif /* _SOCK_H */
2873