/linux/net/netfilter/ |
A D | nf_conntrack_seqadj.c | 82 struct tcp_sack_block_wire *sack; in nf_ct_sack_block_adjust() local 85 sack = (void *)skb->data + sackoff; in nf_ct_sack_block_adjust() 88 new_start_seq = htonl(ntohl(sack->start_seq) - in nf_ct_sack_block_adjust() 96 new_end_seq = htonl(ntohl(sack->end_seq) - in nf_ct_sack_block_adjust() 99 new_end_seq = htonl(ntohl(sack->end_seq) - in nf_ct_sack_block_adjust() 104 ntohl(sack->end_seq), ntohl(new_end_seq)); in nf_ct_sack_block_adjust() 107 sack->start_seq, new_start_seq, false); in nf_ct_sack_block_adjust() 109 sack->end_seq, new_end_seq, false); in nf_ct_sack_block_adjust() 110 sack->start_seq = new_start_seq; in nf_ct_sack_block_adjust() 111 sack->end_seq = new_end_seq; in nf_ct_sack_block_adjust() [all …]
|
A D | nf_conntrack_proto_tcp.c | 438 if (after(tmp, *sack)) in tcp_sack() 439 *sack = tmp; in tcp_sack() 463 __u32 seq, ack, sack, end, win, swin; in tcp_in_window() local 472 ack = sack = ntohl(tcph->ack_seq); in tcp_in_window() 478 tcp_sack(skb, dataoff, tcph, &sack); in tcp_in_window() 483 sack -= receiver_offset; in tcp_in_window() 569 ack = sack = receiver->td_end; in tcp_in_window() 577 ack = sack = receiver->td_end; in tcp_in_window() 604 before(sack, receiver->td_end + 1), in tcp_in_window() 620 swin = win + (sack - ack); in tcp_in_window() [all …]
|
/linux/net/sctp/ |
A D | outqueue.c | 47 struct sctp_sackhdr *sack, 1216 struct sctp_sackhdr *sack) in sctp_sack_update_unack_data() argument 1224 frags = sack->variable; in sctp_sack_update_unack_data() 1264 sack_ctsn = ntohl(sack->cum_tsn_ack); in sctp_outq_sack() 1377 sack_a_rwnd = ntohl(sack->a_rwnd); in sctp_outq_sack() 1426 struct sctp_sackhdr *sack, in sctp_check_transmitted() argument 1440 sack_ctsn = ntohl(sack->cum_tsn_ack); in sctp_check_transmitted() 1467 if (sctp_acked(sack, tsn)) { in sctp_check_transmitted() 1507 if (sack->num_gap_ack_blocks && in sctp_check_transmitted() 1762 __u32 ctsn = ntohl(sack->cum_tsn_ack); in sctp_acked() [all …]
|
A D | output.c | 296 struct sctp_chunk *sack; in sctp_packet_bundle_sack() local 303 sack = sctp_make_sack(asoc); in sctp_packet_bundle_sack() 304 if (sack) { in sctp_packet_bundle_sack() 305 retval = __sctp_packet_append_chunk(pkt, sack); in sctp_packet_bundle_sack() 307 sctp_chunk_free(sack); in sctp_packet_bundle_sack()
|
A D | associola.c | 1476 struct sctp_chunk *sack; in sctp_assoc_rwnd_increase() local 1517 sack = sctp_make_sack(asoc); in sctp_assoc_rwnd_increase() 1518 if (!sack) in sctp_assoc_rwnd_increase() 1523 sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC); in sctp_assoc_rwnd_increase()
|
A D | sm_sideeffect.c | 143 struct sctp_chunk *sack; in sctp_gen_sack() local 206 sack = sctp_make_sack(asoc); in sctp_gen_sack() 207 if (!sack) { in sctp_gen_sack() 215 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack)); in sctp_gen_sack()
|
A D | sm_make_chunk.c | 761 struct sctp_sackhdr sack; in sctp_make_sack() local 775 sack.cum_tsn_ack = htonl(ctsn); in sctp_make_sack() 776 sack.a_rwnd = htonl(asoc->a_rwnd); in sctp_make_sack() 777 sack.num_gap_ack_blocks = htons(num_gabs); in sctp_make_sack() 778 sack.num_dup_tsns = htons(num_dup_tsns); in sctp_make_sack() 780 len = sizeof(sack) in sctp_make_sack() 822 sctp_addto_chunk(retval, sizeof(sack), &sack); in sctp_make_sack()
|
A D | sm_statefuns.c | 6294 struct sctp_sackhdr *sack; in sctp_sm_pull_sack() local 6302 sack = (struct sctp_sackhdr *) chunk->skb->data; in sctp_sm_pull_sack() 6304 num_blocks = ntohs(sack->num_gap_ack_blocks); in sctp_sm_pull_sack() 6305 num_dup_tsns = ntohs(sack->num_dup_tsns); in sctp_sm_pull_sack() 6313 return sack; in sctp_sm_pull_sack()
|
/linux/drivers/net/ethernet/chelsio/cxgb/ |
A D | cpl5_cmd.h | 184 u8 sack:1; member 188 u8 sack:1; member
|
/linux/net/ipv4/ |
A D | tcp_input.c | 3219 struct tcp_sacktag_state *sack, bool ece_ack) in tcp_clean_rtx_queue() argument 3286 tcp_rate_skb_delivered(sk, skb, sack->rate); in tcp_clean_rtx_queue() 3334 sack->rate->prior_delivered + 1 == tp->delivered && in tcp_clean_rtx_queue() 3343 if (sack->first_sackt) { in tcp_clean_rtx_queue() 3344 sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt); in tcp_clean_rtx_queue() 3345 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt); in tcp_clean_rtx_queue() 3348 ca_rtt_us, sack->rate); in tcp_clean_rtx_queue() 3390 .rtt_us = sack->rate->rtt_us }; in tcp_clean_rtx_queue() 3393 (tp->delivered - sack->rate->prior_delivered); in tcp_clean_rtx_queue()
|
/linux/drivers/net/ethernet/chelsio/cxgb3/ |
A D | t3_cpl.h | 215 __u8 sack:1; member 219 __u8 sack:1; member
|
/linux/drivers/net/ethernet/chelsio/cxgb4/ |
A D | t4_msg.h | 385 __u8 sack:1; member 389 __u8 sack:1; member
|
/linux/Documentation/networking/ |
A D | snmp_counter.rst | 589 When the congestion control comes into Recovery state, if sack is 590 used, TcpExtTCPSackRecovery increases 1, if sack is not used, 1222 …ts sack cubic wscale:7,7 rto:204 rtt:0.98/0.49 mss:1448 pmtu:1500 rcvmss:536 advmss:1448 cwnd:10 b…
|
/linux/drivers/target/iscsi/cxgbit/ |
A D | cxgbit_cm.c | 1185 if (req->tcpopt.sack) in cxgbit_pass_accept_rpl()
|
/linux/drivers/infiniband/hw/cxgb4/ |
A D | cm.c | 2453 if (enable_tcp_sack && req->tcpopt.sack) in accept_cr() 3988 req->tcpopt.sack = 1; in build_cpl_pass_accept_req()
|
/linux/drivers/net/ethernet/chelsio/inline_crypto/chtls/ |
A D | chtls_cm.c | 1052 if (req->tcpopt.sack) in chtls_pass_accept_rpl()
|