1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * NET		Generic infrastructure for INET connection oriented protocols.
4  *
5  *		Definitions for inet_connection_sock
6  *
7  * Authors:	Many people, see the TCP sources
8  *
9  * 		From code originally in TCP
10  */
11 #ifndef _INET_CONNECTION_SOCK_H
12 #define _INET_CONNECTION_SOCK_H
13 
14 #include <linux/compiler.h>
15 #include <linux/string.h>
16 #include <linux/timer.h>
17 #include <linux/poll.h>
18 #include <linux/kernel.h>
19 #include <linux/sockptr.h>
20 
21 #include <net/inet_sock.h>
22 #include <net/request_sock.h>
23 
24 /* Cancel timers, when they are not required. */
25 #undef INET_CSK_CLEAR_TIMERS
26 
27 struct inet_bind_bucket;
28 struct tcp_congestion_ops;
29 
30 /*
31  * Pointers to address related TCP functions
32  * (i.e. things that depend on the address family)
33  */
34 struct inet_connection_sock_af_ops {
35 	int	    (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
36 	void	    (*send_check)(struct sock *sk, struct sk_buff *skb);
37 	int	    (*rebuild_header)(struct sock *sk);
38 	void	    (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
39 	int	    (*conn_request)(struct sock *sk, struct sk_buff *skb);
40 	struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
41 				      struct request_sock *req,
42 				      struct dst_entry *dst,
43 				      struct request_sock *req_unhash,
44 				      bool *own_req);
45 	u16	    net_header_len;
46 	u16	    net_frag_header_len;
47 	u16	    sockaddr_len;
48 	int	    (*setsockopt)(struct sock *sk, int level, int optname,
49 				  sockptr_t optval, unsigned int optlen);
50 	int	    (*getsockopt)(struct sock *sk, int level, int optname,
51 				  char __user *optval, int __user *optlen);
52 	void	    (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
53 	void	    (*mtu_reduced)(struct sock *sk);
54 };
55 
56 /** inet_connection_sock - INET connection oriented sock
57  *
58  * @icsk_accept_queue:	   FIFO of established children
59  * @icsk_bind_hash:	   Bind node
60  * @icsk_timeout:	   Timeout
61  * @icsk_retransmit_timer: Resend (no ack)
62  * @icsk_rto:		   Retransmit timeout
63  * @icsk_pmtu_cookie	   Last pmtu seen by socket
64  * @icsk_ca_ops		   Pluggable congestion control hook
65  * @icsk_af_ops		   Operations which are AF_INET{4,6} specific
66  * @icsk_ulp_ops	   Pluggable ULP control hook
67  * @icsk_ulp_data	   ULP private data
68  * @icsk_clean_acked	   Clean acked data hook
69  * @icsk_listen_portaddr_node	hash to the portaddr listener hashtable
70  * @icsk_ca_state:	   Congestion control state
71  * @icsk_retransmits:	   Number of unrecovered [RTO] timeouts
72  * @icsk_pending:	   Scheduled timer event
73  * @icsk_backoff:	   Backoff
74  * @icsk_syn_retries:      Number of allowed SYN (or equivalent) retries
75  * @icsk_probes_out:	   unanswered 0 window probes
76  * @icsk_ext_hdr_len:	   Network protocol overhead (IP/IPv6 options)
77  * @icsk_ack:		   Delayed ACK control data
78  * @icsk_mtup;		   MTU probing control data
79  * @icsk_probes_tstamp:    Probe timestamp (cleared by non-zero window ack)
80  * @icsk_user_timeout:	   TCP_USER_TIMEOUT value
81  */
82 struct inet_connection_sock {
83 	/* inet_sock has to be the first member! */
84 	struct inet_sock	  icsk_inet;
85 	struct request_sock_queue icsk_accept_queue;
86 	struct inet_bind_bucket	  *icsk_bind_hash;
87 	unsigned long		  icsk_timeout;
88  	struct timer_list	  icsk_retransmit_timer;
89  	struct timer_list	  icsk_delack_timer;
90 	__u32			  icsk_rto;
91 	__u32                     icsk_rto_min;
92 	__u32                     icsk_delack_max;
93 	__u32			  icsk_pmtu_cookie;
94 	const struct tcp_congestion_ops *icsk_ca_ops;
95 	const struct inet_connection_sock_af_ops *icsk_af_ops;
96 	const struct tcp_ulp_ops  *icsk_ulp_ops;
97 	void __rcu		  *icsk_ulp_data;
98 	void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
99 	struct hlist_node         icsk_listen_portaddr_node;
100 	unsigned int		  (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
101 	__u8			  icsk_ca_state:5,
102 				  icsk_ca_initialized:1,
103 				  icsk_ca_setsockopt:1,
104 				  icsk_ca_dst_locked:1;
105 	__u8			  icsk_retransmits;
106 	__u8			  icsk_pending;
107 	__u8			  icsk_backoff;
108 	__u8			  icsk_syn_retries;
109 	__u8			  icsk_probes_out;
110 	__u16			  icsk_ext_hdr_len;
111 	struct {
112 		__u8		  pending;	 /* ACK is pending			   */
113 		__u8		  quick;	 /* Scheduled number of quick acks	   */
114 		__u8		  pingpong;	 /* The session is interactive		   */
115 		__u8		  retry;	 /* Number of attempts			   */
116 		__u32		  ato;		 /* Predicted tick of soft clock	   */
117 		unsigned long	  timeout;	 /* Currently scheduled timeout		   */
118 		__u32		  lrcvtime;	 /* timestamp of last received data packet */
119 		__u16		  last_seg_size; /* Size of last incoming segment	   */
120 		__u16		  rcv_mss;	 /* MSS used for delayed ACK decisions	   */
121 	} icsk_ack;
122 	struct {
123 		/* Range of MTUs to search */
124 		int		  search_high;
125 		int		  search_low;
126 
127 		/* Information on the current probe. */
128 		u32		  probe_size:31,
129 		/* Is the MTUP feature enabled for this connection? */
130 				  enabled:1;
131 
132 		u32		  probe_timestamp;
133 	} icsk_mtup;
134 	u32			  icsk_probes_tstamp;
135 	u32			  icsk_user_timeout;
136 
137 	u64			  icsk_ca_priv[104 / sizeof(u64)];
138 #define ICSK_CA_PRIV_SIZE	  sizeof_field(struct inet_connection_sock, icsk_ca_priv)
139 };
140 
141 #define ICSK_TIME_RETRANS	1	/* Retransmit timer */
142 #define ICSK_TIME_DACK		2	/* Delayed ack timer */
143 #define ICSK_TIME_PROBE0	3	/* Zero window probe timer */
144 #define ICSK_TIME_LOSS_PROBE	5	/* Tail loss probe timer */
145 #define ICSK_TIME_REO_TIMEOUT	6	/* Reordering timer */
146 
inet_csk(const struct sock * sk)147 static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
148 {
149 	return (struct inet_connection_sock *)sk;
150 }
151 
inet_csk_ca(const struct sock * sk)152 static inline void *inet_csk_ca(const struct sock *sk)
153 {
154 	return (void *)inet_csk(sk)->icsk_ca_priv;
155 }
156 
157 struct sock *inet_csk_clone_lock(const struct sock *sk,
158 				 const struct request_sock *req,
159 				 const gfp_t priority);
160 
161 enum inet_csk_ack_state_t {
162 	ICSK_ACK_SCHED	= 1,
163 	ICSK_ACK_TIMER  = 2,
164 	ICSK_ACK_PUSHED = 4,
165 	ICSK_ACK_PUSHED2 = 8,
166 	ICSK_ACK_NOW = 16	/* Send the next ACK immediately (once) */
167 };
168 
169 void inet_csk_init_xmit_timers(struct sock *sk,
170 			       void (*retransmit_handler)(struct timer_list *),
171 			       void (*delack_handler)(struct timer_list *),
172 			       void (*keepalive_handler)(struct timer_list *));
173 void inet_csk_clear_xmit_timers(struct sock *sk);
174 
inet_csk_schedule_ack(struct sock * sk)175 static inline void inet_csk_schedule_ack(struct sock *sk)
176 {
177 	inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
178 }
179 
inet_csk_ack_scheduled(const struct sock * sk)180 static inline int inet_csk_ack_scheduled(const struct sock *sk)
181 {
182 	return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
183 }
184 
inet_csk_delack_init(struct sock * sk)185 static inline void inet_csk_delack_init(struct sock *sk)
186 {
187 	memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
188 }
189 
190 void inet_csk_delete_keepalive_timer(struct sock *sk);
191 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
192 
inet_csk_clear_xmit_timer(struct sock * sk,const int what)193 static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
194 {
195 	struct inet_connection_sock *icsk = inet_csk(sk);
196 
197 	if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
198 		icsk->icsk_pending = 0;
199 #ifdef INET_CSK_CLEAR_TIMERS
200 		sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
201 #endif
202 	} else if (what == ICSK_TIME_DACK) {
203 		icsk->icsk_ack.pending = 0;
204 		icsk->icsk_ack.retry = 0;
205 #ifdef INET_CSK_CLEAR_TIMERS
206 		sk_stop_timer(sk, &icsk->icsk_delack_timer);
207 #endif
208 	} else {
209 		pr_debug("inet_csk BUG: unknown timer value\n");
210 	}
211 }
212 
213 /*
214  *	Reset the retransmission timer
215  */
inet_csk_reset_xmit_timer(struct sock * sk,const int what,unsigned long when,const unsigned long max_when)216 static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
217 					     unsigned long when,
218 					     const unsigned long max_when)
219 {
220 	struct inet_connection_sock *icsk = inet_csk(sk);
221 
222 	if (when > max_when) {
223 		pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
224 			 sk, what, when, (void *)_THIS_IP_);
225 		when = max_when;
226 	}
227 
228 	if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
229 	    what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) {
230 		icsk->icsk_pending = what;
231 		icsk->icsk_timeout = jiffies + when;
232 		sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
233 	} else if (what == ICSK_TIME_DACK) {
234 		icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
235 		icsk->icsk_ack.timeout = jiffies + when;
236 		sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
237 	} else {
238 		pr_debug("inet_csk BUG: unknown timer value\n");
239 	}
240 }
241 
242 static inline unsigned long
inet_csk_rto_backoff(const struct inet_connection_sock * icsk,unsigned long max_when)243 inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
244 		     unsigned long max_when)
245 {
246         u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff;
247 
248         return (unsigned long)min_t(u64, when, max_when);
249 }
250 
251 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);
252 
253 int inet_csk_get_port(struct sock *sk, unsigned short snum);
254 
255 struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
256 				     const struct request_sock *req);
257 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
258 					    struct sock *newsk,
259 					    const struct request_sock *req);
260 
261 struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
262 				      struct request_sock *req,
263 				      struct sock *child);
264 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
265 				   unsigned long timeout);
266 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
267 					 struct request_sock *req,
268 					 bool own_req);
269 
inet_csk_reqsk_queue_added(struct sock * sk)270 static inline void inet_csk_reqsk_queue_added(struct sock *sk)
271 {
272 	reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
273 }
274 
inet_csk_reqsk_queue_len(const struct sock * sk)275 static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
276 {
277 	return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
278 }
279 
inet_csk_reqsk_queue_is_full(const struct sock * sk)280 static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
281 {
282 	return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
283 }
284 
285 bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
286 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
287 
inet_csk_prepare_for_destroy_sock(struct sock * sk)288 static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
289 {
290 	/* The below has to be done to allow calling inet_csk_destroy_sock */
291 	sock_set_flag(sk, SOCK_DEAD);
292 	this_cpu_inc(*sk->sk_prot->orphan_count);
293 }
294 
295 void inet_csk_destroy_sock(struct sock *sk);
296 void inet_csk_prepare_forced_close(struct sock *sk);
297 
298 /*
299  * LISTEN is a special case for poll..
300  */
inet_csk_listen_poll(const struct sock * sk)301 static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
302 {
303 	return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
304 			(EPOLLIN | EPOLLRDNORM) : 0;
305 }
306 
307 int inet_csk_listen_start(struct sock *sk, int backlog);
308 void inet_csk_listen_stop(struct sock *sk);
309 
310 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
311 
312 /* update the fast reuse flag when adding a socket */
313 void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
314 			       struct sock *sk);
315 
316 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
317 
318 #define TCP_PINGPONG_THRESH	3
319 
inet_csk_enter_pingpong_mode(struct sock * sk)320 static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
321 {
322 	inet_csk(sk)->icsk_ack.pingpong = TCP_PINGPONG_THRESH;
323 }
324 
inet_csk_exit_pingpong_mode(struct sock * sk)325 static inline void inet_csk_exit_pingpong_mode(struct sock *sk)
326 {
327 	inet_csk(sk)->icsk_ack.pingpong = 0;
328 }
329 
inet_csk_in_pingpong_mode(struct sock * sk)330 static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
331 {
332 	return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
333 }
334 
inet_csk_inc_pingpong_cnt(struct sock * sk)335 static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
336 {
337 	struct inet_connection_sock *icsk = inet_csk(sk);
338 
339 	if (icsk->icsk_ack.pingpong < U8_MAX)
340 		icsk->icsk_ack.pingpong++;
341 }
342 
inet_csk_has_ulp(struct sock * sk)343 static inline bool inet_csk_has_ulp(struct sock *sk)
344 {
345 	return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops;
346 }
347 
348 #endif /* _INET_CONNECTION_SOCK_H */
349