/linux/tools/testing/selftests/bpf/progs/ |
A D | test_tcp_check_syncookie_kern.c | 28 struct tcphdr *tcph) in gen_syncookie() argument 30 __u32 thlen = tcph->doff * 4; in gen_syncookie() 32 if (tcph->syn && !tcph->ack) { in gen_syncookie() 53 struct tcphdr *tcph; in check_syncookie() local 74 if (tcph + 1 > data_end) in check_syncookie() 80 tup.ipv4.dport = tcph->dest; in check_syncookie() 91 tcph); in check_syncookie() 94 tcph, sizeof(*tcph)); in check_syncookie() 106 if (tcph + 1 > data_end) in check_syncookie() 123 tcph); in check_syncookie() [all …]
|
A D | cgroup_skb_sk_lookup_kern.c | 31 const struct tcphdr *tcph) in set_tuple() argument 35 tuple->ipv6.sport = tcph->dest; in set_tuple() 36 tuple->ipv6.dport = tcph->source; in set_tuple() 41 const struct tcphdr *tcph) in is_allowed_peer_cg() argument 48 set_tuple(&tuple, ip6h, tcph); in is_allowed_peer_cg() 71 struct tcphdr tcph; in ingress_lookup() local 86 if (bpf_skb_load_bytes(skb, sizeof(ip6h), &tcph, sizeof(tcph))) in ingress_lookup() 92 if (tcph.dest != g_serv_port) in ingress_lookup() 95 return is_allowed_peer_cg(skb, &ip6h, &tcph); in ingress_lookup()
|
A D | test_tc_tunnel.c | 93 struct tcphdr tcph; in __encap_ipv4() local 142 &tcph, sizeof(tcph)) < 0) in __encap_ipv4() 145 if (tcph.dest != __bpf_constant_htons(cfg_port)) in __encap_ipv4() 261 struct tcphdr tcph; in __encap_ipv6() local 273 &tcph, sizeof(tcph)) < 0) in __encap_ipv6() 276 if (tcph.dest != __bpf_constant_htons(cfg_port)) in __encap_ipv6()
|
/linux/net/netfilter/ |
A D | nf_conntrack_seqadj.c | 76 struct tcphdr *tcph, in nf_ct_sack_block_adjust() argument 127 optend = protoff + tcph->doff * 4; in nf_ct_sack_adjust() 132 tcph = (void *)skb->data + protoff; in nf_ct_sack_adjust() 169 struct tcphdr *tcph; in nf_ct_seq_adjust() local 182 tcph = (void *)skb->data + protoff; in nf_ct_seq_adjust() 190 inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, false); in nf_ct_seq_adjust() 193 tcph->seq = newseq; in nf_ct_seq_adjust() 195 if (!tcph->ack) in nf_ct_seq_adjust() 205 inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, in nf_ct_seq_adjust() 208 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq), in nf_ct_seq_adjust() [all …]
|
A D | xt_TCPMSS.c | 78 struct tcphdr *tcph; in tcpmss_mangle_packet() local 96 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); in tcpmss_mangle_packet() 97 tcp_hdrlen = tcph->doff * 4; in tcpmss_mangle_packet() 116 opt = (u_int8_t *)tcph; in tcpmss_mangle_packet() 133 inet_proto_csum_replace2(&tcph->check, skb, in tcpmss_mangle_packet() 176 opt = (u_int8_t *)tcph + sizeof(struct tcphdr); in tcpmss_mangle_packet() 179 inet_proto_csum_replace2(&tcph->check, skb, in tcpmss_mangle_packet() 188 oldval = ((__be16 *)tcph)[6]; in tcpmss_mangle_packet() 189 tcph->doff += TCPOLEN_MSS/4; in tcpmss_mangle_packet() 190 inet_proto_csum_replace2(&tcph->check, skb, in tcpmss_mangle_packet() [all …]
|
A D | nft_exthdr.c | 168 struct tcphdr *tcph; in nft_tcp_header_pointer() local 173 tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt), sizeof(*tcph), buffer); in nft_tcp_header_pointer() 174 if (!tcph) in nft_tcp_header_pointer() 177 *tcphdr_len = __tcp_hdrlen(tcph); in nft_tcp_header_pointer() 192 struct tcphdr *tcph; in nft_exthdr_tcp_eval() local 196 if (!tcph) in nft_exthdr_tcp_eval() 199 opt = (u8 *)tcph; in nft_exthdr_tcp_eval() 234 struct tcphdr *tcph; in nft_exthdr_tcp_set_eval() local 238 if (!tcph) in nft_exthdr_tcp_set_eval() 241 opt = (u8 *)tcph; in nft_exthdr_tcp_set_eval() [all …]
|
A D | xt_TCPOPTSTRIP.c | 34 struct tcphdr *tcph, _th; in tcpoptstrip_mangle_packet() local 44 tcph = skb_header_pointer(skb, tcphoff, sizeof(_th), &_th); in tcpoptstrip_mangle_packet() 45 if (!tcph) in tcpoptstrip_mangle_packet() 48 tcp_hdrlen = tcph->doff * 4; in tcpoptstrip_mangle_packet() 56 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); in tcpoptstrip_mangle_packet() 57 opt = (u8 *)tcph; in tcpoptstrip_mangle_packet() 79 inet_proto_csum_replace2(&tcph->check, skb, htons(o), in tcpoptstrip_mangle_packet()
|
A D | nf_conntrack_proto_tcp.c | 272 else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET); in get_conntrack_index() 315 + (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0)); in segment_seq_plus_len() 329 const struct tcphdr *tcph, in tcp_options() argument 454 const struct tcphdr *tcph, in tcp_in_window() argument 471 seq = ntohl(tcph->seq); in tcp_in_window() 473 win_raw = ntohs(tcph->window); in tcp_in_window() 501 if (tcph->syn) { in tcp_in_window() 520 if (!tcph->ack) in tcp_in_window() 565 if (!(tcph->ack)) { in tcp_in_window() 614 if (!tcph->syn) in tcp_in_window() [all …]
|
A D | nf_nat_helper.c | 95 struct tcphdr *tcph; in __nf_nat_mangle_tcp_packet() local 106 tcph = (void *)skb->data + protoff; in __nf_nat_mangle_tcp_packet() 109 mangle_contents(skb, protoff + tcph->doff*4, in __nf_nat_mangle_tcp_packet() 115 tcph, &tcph->check, datalen, oldlen); in __nf_nat_mangle_tcp_packet() 118 nf_ct_seqadj_set(ct, ctinfo, tcph->seq, in __nf_nat_mangle_tcp_packet()
|
A D | nf_flow_table_ip.c | 26 struct tcphdr *tcph; in nf_flow_state_check() local 31 tcph = (void *)(skb_network_header(skb) + thoff); in nf_flow_state_check() 32 if (unlikely(tcph->fin || tcph->rst)) { in nf_flow_state_check() 43 struct tcphdr *tcph; in nf_flow_nat_ip_tcp() local 45 tcph = (void *)(skb_network_header(skb) + thoff); in nf_flow_nat_ip_tcp() 46 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true); in nf_flow_nat_ip_tcp() 418 struct tcphdr *tcph; in nf_flow_nat_ipv6_tcp() local 420 tcph = (void *)(skb_network_header(skb) + thoff); in nf_flow_nat_ipv6_tcp() 421 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32, in nf_flow_nat_ipv6_tcp()
|
A D | nft_flow_offload.c | 277 struct tcphdr _tcph, *tcph = NULL; in nft_flow_offload_eval() local 294 tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt), in nft_flow_offload_eval() 296 if (unlikely(!tcph || tcph->fin || tcph->rst)) in nft_flow_offload_eval() 326 if (tcph) { in nft_flow_offload_eval()
|
A D | nf_conntrack_pptp.c | 521 const struct tcphdr *tcph; in conntrack_pptp_help() local 546 tcph = skb_header_pointer(skb, nexthdr_off, sizeof(_tcph), &_tcph); in conntrack_pptp_help() 547 if (!tcph) in conntrack_pptp_help() 550 nexthdr_off += tcph->doff * 4; in conntrack_pptp_help() 551 datalen = tcplen - tcph->doff * 4; in conntrack_pptp_help()
|
/linux/net/ipv4/netfilter/ |
A D | ipt_ECN.c | 47 struct tcphdr _tcph, *tcph; in set_ect_tcp() local 52 if (!tcph) in set_ect_tcp() 56 tcph->ece == einfo->proto.tcp.ece) && in set_ect_tcp() 58 tcph->cwr == einfo->proto.tcp.cwr)) in set_ect_tcp() 61 if (skb_ensure_writable(skb, ip_hdrlen(skb) + sizeof(*tcph))) in set_ect_tcp() 63 tcph = (void *)ip_hdr(skb) + ip_hdrlen(skb); in set_ect_tcp() 65 oldval = ((__be16 *)tcph)[6]; in set_ect_tcp() 67 tcph->ece = einfo->proto.tcp.ece; in set_ect_tcp() 69 tcph->cwr = einfo->proto.tcp.cwr; in set_ect_tcp() 71 inet_proto_csum_replace2(&tcph->check, skb, in set_ect_tcp() [all …]
|
A D | nf_reject_ipv4.c | 195 struct tcphdr *tcph; in nf_reject_ip_tcphdr_put() local 198 tcph = skb_put_zero(nskb, sizeof(struct tcphdr)); in nf_reject_ip_tcphdr_put() 199 tcph->source = oth->dest; in nf_reject_ip_tcphdr_put() 200 tcph->dest = oth->source; in nf_reject_ip_tcphdr_put() 201 tcph->doff = sizeof(struct tcphdr) / 4; in nf_reject_ip_tcphdr_put() 204 tcph->seq = oth->ack_seq; in nf_reject_ip_tcphdr_put() 206 tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + in nf_reject_ip_tcphdr_put() 209 tcph->ack = 1; in nf_reject_ip_tcphdr_put() 212 tcph->rst = 1; in nf_reject_ip_tcphdr_put() 213 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr, in nf_reject_ip_tcphdr_put() [all …]
|
/linux/net/netfilter/ipvs/ |
A D | ip_vs_proto_tcp.c | 112 tcph->check = in tcp_fast_csum_update() 118 tcph->check = in tcp_fast_csum_update() 133 tcph->check = in tcp_partial_csum_update() 139 tcph->check = in tcp_partial_csum_update() 150 struct tcphdr *tcph; in tcp_snat_handler() local 183 tcph->source = cp->vport; in tcp_snat_handler() 199 tcph->check = 0; in tcp_snat_handler() 218 (char*)&(tcph->check) - (char*)tcph); in tcp_snat_handler() 228 struct tcphdr *tcph; in tcp_dnat_handler() local 264 tcph->dest = cp->dport; in tcp_dnat_handler() [all …]
|
/linux/net/ipv6/netfilter/ |
A D | nf_reject_ipv6.c | 225 struct tcphdr *tcph; in nf_reject_ip6_tcphdr_put() local 232 tcph->source = oth->dest; in nf_reject_ip6_tcphdr_put() 233 tcph->dest = oth->source; in nf_reject_ip6_tcphdr_put() 237 tcph->seq = oth->ack_seq; in nf_reject_ip6_tcphdr_put() 238 tcph->ack_seq = 0; in nf_reject_ip6_tcphdr_put() 243 tcph->seq = 0; in nf_reject_ip6_tcphdr_put() 248 tcph->rst = 1; in nf_reject_ip6_tcphdr_put() 249 tcph->ack = needs_ack; in nf_reject_ip6_tcphdr_put() 250 tcph->window = 0; in nf_reject_ip6_tcphdr_put() 251 tcph->urg_ptr = 0; in nf_reject_ip6_tcphdr_put() [all …]
|
/linux/tools/testing/selftests/net/ |
A D | gro.c | 259 memset(tcph, 0, sizeof(*tcph)); in fill_transportlayer() 265 tcph->ack = 1; in fill_transportlayer() 266 tcph->fin = fin; in fill_transportlayer() 267 tcph->doff = 5; in fill_transportlayer() 270 tcph->check = tcp_checksum(tcph, payload_len); in fill_transportlayer() 311 tcph->psh = psh; in send_flags() 312 tcph->syn = syn; in send_flags() 316 tcph->check = tcp_checksum(tcph, payload_len); in send_flags() 478 tcph->check = tcph->check - 1; in send_changed_checksum() 493 tcph->seq = ntohl(htonl(tcph->seq) + 1); in send_changed_seq() [all …]
|
/linux/drivers/infiniband/hw/irdma/ |
A D | cm.c | 356 tcph->ack = 1; in irdma_form_ah_cm_frame() 363 tcph->syn = 1; in irdma_form_ah_cm_frame() 376 tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2); in irdma_form_ah_cm_frame() 564 tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2); in irdma_form_uda_cm_frame() 2623 struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; in irdma_handle_syn_pkt() local 2689 struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; in irdma_handle_synack_pkt() local 2762 struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; in irdma_handle_ack_pkt() local 2846 struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; in irdma_process_pkt() local 3121 tcph = (struct tcphdr *)rbuf->tcph; in irdma_receive_ilq() 3146 if (!tcph->syn || tcph->ack) in irdma_receive_ilq() [all …]
|
A D | utils.c | 1394 struct tcphdr *tcph = (struct tcphdr *)buf->tcph; in irdma_ieq_get_qp() local 1404 loc_port = ntohs(tcph->dest); in irdma_ieq_get_qp() 1425 struct tcphdr *tcph = (struct tcphdr *)buf->tcph; in irdma_send_ieq_ack() local 1487 struct tcphdr *tcph; in irdma_gen1_ieq_update_tcpip_info() local 1498 tcph->seq = htonl(seqnum); in irdma_gen1_ieq_update_tcpip_info() 1510 struct tcphdr *tcph; in irdma_ieq_update_tcpip_info() local 1518 tcph->seq = htonl(seqnum); in irdma_ieq_update_tcpip_info() 1533 struct tcphdr *tcph; in irdma_gen1_puda_get_tcpip_info() local 1551 tcph = (struct tcphdr *)buf->tcph; in irdma_gen1_puda_get_tcpip_info() 1587 struct tcphdr *tcph; in irdma_puda_get_tcpip_info() local [all …]
|
/linux/net/core/ |
A D | tso.c | 36 struct tcphdr *tcph = (struct tcphdr *)hdr; in tso_build_hdr() local 38 put_unaligned_be32(tso->tcp_seq, &tcph->seq); in tso_build_hdr() 42 tcph->psh = 0; in tso_build_hdr() 43 tcph->fin = 0; in tso_build_hdr() 44 tcph->rst = 0; in tso_build_hdr()
|
/linux/net/sched/ |
A D | act_csum.c | 208 struct tcphdr *tcph; in tcf_csum_ipv4_tcp() local 214 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); in tcf_csum_ipv4_tcp() 215 if (tcph == NULL) in tcf_csum_ipv4_tcp() 219 tcph->check = 0; in tcf_csum_ipv4_tcp() 220 skb->csum = csum_partial(tcph, ipl - ihl, 0); in tcf_csum_ipv4_tcp() 221 tcph->check = tcp_v4_check(ipl - ihl, in tcf_csum_ipv4_tcp() 232 struct tcphdr *tcph; in tcf_csum_ipv6_tcp() local 238 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); in tcf_csum_ipv6_tcp() 239 if (tcph == NULL) in tcf_csum_ipv6_tcp() 243 tcph->check = 0; in tcf_csum_ipv6_tcp() [all …]
|
A D | act_nat.c | 169 struct tcphdr *tcph; in tcf_nat_act() local 171 if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) || in tcf_nat_act() 172 skb_try_make_writable(skb, ihl + sizeof(*tcph) + noff)) in tcf_nat_act() 175 tcph = (void *)(skb_network_header(skb) + ihl); in tcf_nat_act() 176 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, in tcf_nat_act()
|
A D | sch_cake.c | 906 const struct tcphdr *tcph; in cake_get_tcphdr() local 946 if (!tcph || tcph->doff < 5) in cake_get_tcphdr() 958 const u8 *ptr = (const u8 *)(tcph + 1); in cake_get_tcpopt() 1084 const u8 *ptr = (const u8 *)(tcph + 1); in cake_tcph_may_drop() 1095 if (((tcp_flag_word(tcph) & in cake_tcph_may_drop() 1159 const struct tcphdr *tcph, *tcph_check; in cake_ack_filter() local 1175 if (!tcph) in cake_ack_filter() 1183 if ((tcp_flag_word(tcph) & in cake_ack_filter() 1203 tcph_check->source != tcph->source || in cake_ack_filter() 1204 tcph_check->dest != tcph->dest) in cake_ack_filter() [all …]
|
A D | act_ct.c | 421 struct tcphdr **tcph) in tcf_ct_flow_table_fill_tuple_ipv4() argument 451 *tcph = (void *)(skb_network_header(skb) + thoff); in tcf_ct_flow_table_fill_tuple_ipv4() 467 struct tcphdr **tcph) in tcf_ct_flow_table_fill_tuple_ipv6() argument 493 *tcph = (void *)(skb_network_header(skb) + thoff); in tcf_ct_flow_table_fill_tuple_ipv6() 514 struct tcphdr *tcph = NULL; in tcf_ct_flow_table_lookup() local 526 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph)) in tcf_ct_flow_table_lookup() 530 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph)) in tcf_ct_flow_table_lookup() 545 if (tcph && (unlikely(tcph->fin || tcph->rst))) { in tcf_ct_flow_table_lookup()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
A D | ipsec_rxtx.c | 243 struct tcphdr *tcph; in mlx5e_ipsec_set_metadata() local 248 tcph = inner_tcp_hdr(skb); in mlx5e_ipsec_set_metadata() 256 ntohs(tcph->source), ntohs(tcph->dest), in mlx5e_ipsec_set_metadata() 257 ntohl(tcph->seq), ntohl(esph->seq_no)); in mlx5e_ipsec_set_metadata() 260 mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF); in mlx5e_ipsec_set_metadata()
|