Lines Matching refs:icsk
28 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_rto_to_user_timeout() local
33 if (!icsk->icsk_user_timeout) in tcp_clamp_rto_to_user_timeout()
34 return icsk->icsk_rto; in tcp_clamp_rto_to_user_timeout()
36 remaining = icsk->icsk_user_timeout - elapsed; in tcp_clamp_rto_to_user_timeout()
40 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining)); in tcp_clamp_rto_to_user_timeout()
45 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_probe0_to_user_timeout() local
49 if (!icsk->icsk_user_timeout || !icsk->icsk_probes_tstamp) in tcp_clamp_probe0_to_user_timeout()
52 elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp; in tcp_clamp_probe0_to_user_timeout()
55 remaining = msecs_to_jiffies(icsk->icsk_user_timeout) - elapsed; in tcp_clamp_probe0_to_user_timeout()
160 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) in tcp_mtu_probing() argument
169 if (!icsk->icsk_mtup.enabled) { in tcp_mtu_probing()
170 icsk->icsk_mtup.enabled = 1; in tcp_mtu_probing()
171 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; in tcp_mtu_probing()
173 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; in tcp_mtu_probing()
177 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_probing()
179 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtu_probing()
233 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timeout() local
240 if (icsk->icsk_retransmits) in tcp_write_timeout()
242 retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; in tcp_write_timeout()
243 expired = icsk->icsk_retransmits >= retry_until; in tcp_write_timeout()
247 tcp_mtu_probing(icsk, sk); in tcp_write_timeout()
254 const bool alive = icsk->icsk_rto < TCP_RTO_MAX; in tcp_write_timeout()
266 icsk->icsk_user_timeout); in tcp_write_timeout()
271 icsk->icsk_retransmits, in tcp_write_timeout()
272 icsk->icsk_rto, (int)expired); in tcp_write_timeout()
291 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_delack_timer_handler() local
296 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) in tcp_delack_timer_handler()
299 if (time_after(icsk->icsk_ack.timeout, jiffies)) { in tcp_delack_timer_handler()
300 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); in tcp_delack_timer_handler()
303 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; in tcp_delack_timer_handler()
308 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); in tcp_delack_timer_handler()
314 icsk->icsk_ack.ato = TCP_ATO_MIN; in tcp_delack_timer_handler()
338 struct inet_connection_sock *icsk = in tcp_delack_timer() local
339 from_timer(icsk, t, icsk_delack_timer); in tcp_delack_timer()
340 struct sock *sk = &icsk->icsk_inet.sk; in tcp_delack_timer()
357 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_probe_timer() local
363 icsk->icsk_probes_out = 0; in tcp_probe_timer()
364 icsk->icsk_probes_tstamp = 0; in tcp_probe_timer()
376 if (!icsk->icsk_probes_tstamp) in tcp_probe_timer()
377 icsk->icsk_probes_tstamp = tcp_jiffies32; in tcp_probe_timer()
378 else if (icsk->icsk_user_timeout && in tcp_probe_timer()
379 (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >= in tcp_probe_timer()
380 msecs_to_jiffies(icsk->icsk_user_timeout)) in tcp_probe_timer()
385 const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; in tcp_probe_timer()
388 if (!alive && icsk->icsk_backoff >= max_probes) in tcp_probe_timer()
394 if (icsk->icsk_probes_out >= max_probes) { in tcp_probe_timer()
408 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastopen_synack_timer() local
409 int max_retries = icsk->icsk_syn_retries ? : in tcp_fastopen_synack_timer()
420 if (icsk->icsk_retransmits == 1) in tcp_fastopen_synack_timer()
429 icsk->icsk_retransmits++; in tcp_fastopen_synack_timer()
452 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_retransmit_timer() local
515 if (icsk->icsk_retransmits == 0) { in tcp_retransmit_timer()
518 if (icsk->icsk_ca_state == TCP_CA_Recovery) { in tcp_retransmit_timer()
523 } else if (icsk->icsk_ca_state == TCP_CA_Loss) { in tcp_retransmit_timer()
525 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) || in tcp_retransmit_timer()
538 icsk->icsk_retransmits++; in tcp_retransmit_timer()
564 icsk->icsk_backoff++; in tcp_retransmit_timer()
579 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { in tcp_retransmit_timer()
580 icsk->icsk_backoff = 0; in tcp_retransmit_timer()
581 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX); in tcp_retransmit_timer()
584 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); in tcp_retransmit_timer()
598 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timer_handler() local
602 !icsk->icsk_pending) in tcp_write_timer_handler()
605 if (time_after(icsk->icsk_timeout, jiffies)) { in tcp_write_timer_handler()
606 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); in tcp_write_timer_handler()
611 event = icsk->icsk_pending; in tcp_write_timer_handler()
621 icsk->icsk_pending = 0; in tcp_write_timer_handler()
625 icsk->icsk_pending = 0; in tcp_write_timer_handler()
636 struct inet_connection_sock *icsk = in tcp_write_timer() local
637 from_timer(icsk, t, icsk_retransmit_timer); in tcp_write_timer()
638 struct sock *sk = &icsk->icsk_inet.sk; in tcp_write_timer()
676 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_keepalive_timer() local
723 if ((icsk->icsk_user_timeout != 0 && in tcp_keepalive_timer()
724 elapsed >= msecs_to_jiffies(icsk->icsk_user_timeout) && in tcp_keepalive_timer()
725 icsk->icsk_probes_out > 0) || in tcp_keepalive_timer()
726 (icsk->icsk_user_timeout == 0 && in tcp_keepalive_timer()
727 icsk->icsk_probes_out >= keepalive_probes(tp))) { in tcp_keepalive_timer()
733 icsk->icsk_probes_out++; in tcp_keepalive_timer()