/linux/Documentation/devicetree/bindings/remoteproc/ |
A D | qcom,q6v5.txt | 45 qcom,msm8916-mss-pil: 46 qcom,msm8974-mss-pil: 48 qcom,msm8996-mss-pil: 49 qcom,msm8998-mss-pil: 50 qcom,sc7180-mss-pil: 51 qcom,sc7280-mss-pil: 52 qcom,sdm845-mss-pil: 89 qcom,sc7180-mss-pil: 92 qcom,sc7280-mss-pil: 94 qcom,sdm845-mss-pil: [all …]
|
/linux/fs/proc/ |
A D | task_mmu.c | 406 mss->pss += pss; in smaps_page_accumulate() 409 mss->pss_anon += pss; in smaps_page_accumulate() 411 mss->pss_shmem += pss; in smaps_page_accumulate() 413 mss->pss_file += pss; in smaps_page_accumulate() 416 mss->pss_locked += pss; in smaps_page_accumulate() 442 mss->anonymous += size; in smaps_account() 444 mss->lazyfree += size; in smaps_account() 447 mss->resident += size; in smaps_account() 450 mss->referenced += size; in smaps_account() 820 memset(&mss, 0, sizeof(mss)); in show_smap() [all …]
|
/linux/net/ipv4/ |
A D | tcp_offload.c | 18 if (before(ts_seq, seq + mss)) { in tcp_gso_tstamp() 25 seq += mss; in tcp_gso_tstamp() 64 unsigned int mss; in tcp_gso_segment() local 80 mss = skb_shinfo(skb)->gso_size; in tcp_gso_segment() 81 if (unlikely(skb->len <= mss)) in tcp_gso_segment() 133 seq += mss; in tcp_gso_segment() 189 unsigned int mss = 1; in tcp_gro_receive() local 258 mss = skb_shinfo(p)->gso_size; in tcp_gro_receive() 260 flush |= (len - 1) >= mss; in tcp_gro_receive() 267 mss = 1; in tcp_gro_receive() [all …]
|
A D | tcp_recovery.c | 228 u32 mss; in tcp_newreno_mark_lost() local 233 mss = tcp_skb_mss(skb); in tcp_newreno_mark_lost() 234 if (tcp_skb_pcount(skb) > 1 && skb->len > mss) in tcp_newreno_mark_lost() 236 mss, mss, GFP_ATOMIC); in tcp_newreno_mark_lost()
|
A D | syncookies.c | 163 const __u16 mss = *mssp; in __cookie_v4_init_sequence() local 166 if (mss >= msstab[mssind]) in __cookie_v4_init_sequence() 332 int full_space, mss; in cookie_v4_check() local 344 mss = __cookie_v4_check(ip_hdr(skb), th, cookie); in cookie_v4_check() 345 if (mss == 0) { in cookie_v4_check() 377 req->mss = mss; in cookie_v4_check() 434 tcp_select_initial_window(sk, full_space, req->mss, in cookie_v4_check()
|
A D | tcp_timer.c | 163 int mss; in tcp_mtu_probing() local 173 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; in tcp_mtu_probing() 174 mss = min(net->ipv4.sysctl_tcp_base_mss, mss); in tcp_mtu_probing() 175 mss = max(mss, net->ipv4.sysctl_tcp_mtu_probe_floor); in tcp_mtu_probing() 176 mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss); in tcp_mtu_probing() 177 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_probing()
|
A D | tcp_output.c | 124 int mss = tp->advmss; in tcp_advertise_mss() local 129 if (metric < mss) { in tcp_advertise_mss() 130 mss = metric; in tcp_advertise_mss() 131 tp->advmss = mss; in tcp_advertise_mss() 135 return (__u16)mss; in tcp_advertise_mss() 221 if (space > mss) in tcp_select_initial_window() 868 opts->mss = mss; in tcp_synack_options() 1739 mtu = mss + in tcp_mss_to_mtu() 2825 (pcount - 1) * mss, mss, in tcp_send_loss_probe() 2955 if (mss <= 0) in __tcp_select_window() [all …]
|
A D | udp_offload.c | 252 unsigned int mss = skb_shinfo(skb)->gso_size; in __udp_gso_segment_list() local 258 udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss); in __udp_gso_segment_list() 270 unsigned int mss; in __udp_gso_segment() local 278 mss = skb_shinfo(gso_skb)->gso_size; in __udp_gso_segment() 279 if (gso_skb->len <= sizeof(*uh) + mss) in __udp_gso_segment() 301 mss *= skb_shinfo(segs)->gso_segs; in __udp_gso_segment() 312 newlen = htons(sizeof(*uh) + mss); in __udp_gso_segment() 371 unsigned int mss; in udp4_ufo_fragment() local 392 mss = skb_shinfo(skb)->gso_size; in udp4_ufo_fragment() 393 if (unlikely(skb->len <= mss)) in udp4_ufo_fragment()
|
A D | tcp_metrics.c | 29 u16 mss; member 124 tm->tcpm_fastopen.mss = 0; in tcpm_suck_dst() 543 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, in tcp_fastopen_cache_get() argument 556 if (tfom->mss) in tcp_fastopen_cache_get() 557 *mss = tfom->mss; in tcp_fastopen_cache_get() 566 void tcp_fastopen_cache_set(struct sock *sk, u16 mss, in tcp_fastopen_cache_set() argument 581 if (mss) in tcp_fastopen_cache_set() 582 tfom->mss = mss; in tcp_fastopen_cache_set() 698 if (tfom->mss && in tcp_metrics_fill_info() 700 tfom->mss) < 0) in tcp_metrics_fill_info()
|
/linux/tools/testing/selftests/net/ |
A D | psock_snd.sh | 15 readonly mss="$((${mtu} - ${iphlen} - ${udphlen}))" 16 readonly mss_exceeds="$((${mss} + 1))" 58 ./in_netns.sh ./psock_snd -l "${mss}" 72 ./in_netns.sh ./psock_snd -d -l "${mss}"
|
A D | tcp_mmap.c | 380 int mss = 0; in main() local 411 mss = atoi(optarg); in main() 452 if (mss && in main() 454 &mss, sizeof(mss)) == -1) { in main() 484 if (mss && in main() 485 setsockopt(fd, IPPROTO_TCP, TCP_MAXSEG, &mss, sizeof(mss)) == -1) { in main()
|
/linux/net/ipv6/ |
A D | syncookies.c | 96 const __u16 mss = *mssp; in __cookie_v6_init_sequence() local 99 if (mss >= msstab[mssind]) in __cookie_v6_init_sequence() 139 int full_space, mss; in cookie_v6_check() local 150 mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie); in cookie_v6_check() 151 if (mss == 0) { in cookie_v6_check() 184 req->mss = mss; in cookie_v6_check() 250 tcp_select_initial_window(sk, full_space, req->mss, in cookie_v6_check()
|
/linux/Documentation/devicetree/bindings/clock/ |
A D | qcom,sc7180-mss.yaml | 4 $id: http://devicetree.org/schemas/clock/qcom,sc7180-mss.yaml# 16 - dt-bindings/clock/qcom,mss-sc7180.h 20 const: qcom,sc7180-mss 52 compatible = "qcom,sc7180-mss";
|
/linux/drivers/scsi/snic/ |
A D | wq_enet_desc.h | 52 u64 address, u16 length, u16 mss, u16 header_length, in wq_enet_desc_enc() argument 58 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << in wq_enet_desc_enc() 71 u64 *address, u16 *length, u16 *mss, u16 *header_length, in wq_enet_desc_dec() argument 77 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & in wq_enet_desc_dec()
|
/linux/drivers/scsi/fnic/ |
A D | wq_enet_desc.h | 52 u64 address, u16 length, u16 mss, u16 header_length, in wq_enet_desc_enc() argument 58 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << in wq_enet_desc_enc() 71 u64 *address, u16 *length, u16 *mss, u16 *header_length, in wq_enet_desc_dec() argument 77 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & in wq_enet_desc_dec()
|
/linux/drivers/net/ethernet/cisco/enic/ |
A D | wq_enet_desc.h | 54 u64 address, u16 length, u16 mss, u16 header_length, in wq_enet_desc_enc() argument 60 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << in wq_enet_desc_enc() 73 u64 *address, u16 *length, u16 *mss, u16 *header_length, in wq_enet_desc_dec() argument 79 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & in wq_enet_desc_dec()
|
/linux/net/netfilter/ |
A D | nfnetlink_osf.c | 73 u16 mss = 0; in nf_osf_match_one() local 106 mss = ctx->optp[3]; in nf_osf_match_one() 107 mss <<= 8; in nf_osf_match_one() 108 mss |= ctx->optp[2]; in nf_osf_match_one() 110 mss = ntohs((__force __be16)mss); in nf_osf_match_one() 141 if (ctx->window == f->wss.val * mss || in nf_osf_match_one() 147 if (ctx->window == f->wss.val * (mss + 40) || in nf_osf_match_one()
|
A D | nf_synproxy_core.c | 478 u16 mss = opts->mss_encode; in synproxy_send_client_synack() local 495 nth->seq = htonl(__cookie_v4_init_sequence(iph, th, &mss)); in synproxy_send_client_synack() 641 int mss; in synproxy_recv_client_ack() local 643 mss = __cookie_v4_check(ip_hdr(skb), th, ntohl(th->ack_seq) - 1); in synproxy_recv_client_ack() 644 if (mss == 0) { in synproxy_recv_client_ack() 650 opts->mss_option = mss; in synproxy_recv_client_ack() 891 u16 mss = opts->mss_encode; in synproxy_send_client_synack_ipv6() local 908 nth->seq = htonl(nf_ipv6_cookie_init_sequence(iph, th, &mss)); in synproxy_send_client_synack_ipv6() 1058 int mss; in synproxy_recv_client_ack_ipv6() local 1061 if (mss == 0) { in synproxy_recv_client_ack_ipv6() [all …]
|
A D | xt_TCPMSS.c | 102 if (info->mss == XT_TCPMSS_CLAMP_PMTU) { in tcpmss_mangle_packet() 114 newmss = info->mss; in tcpmss_mangle_packet() 269 if (info->mss == XT_TCPMSS_CLAMP_PMTU && in tcpmss_tg4_check() 293 if (info->mss == XT_TCPMSS_CLAMP_PMTU && in tcpmss_tg6_check()
|
/linux/net/batman-adv/ |
A D | tp_meter.c | 150 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss); in batadv_tp_update_cwnd() 157 ((mss * mss) << 6) / (tp_vars->cwnd << 3)); in batadv_tp_update_cwnd() 158 if (tp_vars->dec_cwnd < (mss << 3)) { in batadv_tp_update_cwnd() 163 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss); in batadv_tp_update_cwnd() 635 size_t packet_len, mss; in batadv_tp_recv_ack() local 639 mss = BATADV_TP_PLEN; in batadv_tp_recv_ack() 705 mss); in batadv_tp_recv_ack() 731 mss, mss); in batadv_tp_recv_ack() 739 mss); in batadv_tp_recv_ack() 745 if (recv_ack - atomic_read(&tp_vars->last_acked) >= mss) in batadv_tp_recv_ack() [all …]
|
/linux/drivers/net/ethernet/sfc/ |
A D | ef100_tx.c | 58 u32 mss; in ef100_tx_can_tso() local 67 mss = skb_shinfo(skb)->gso_size; in ef100_tx_can_tso() 68 if (unlikely(mss < 4)) { in ef100_tx_can_tso() 69 WARN_ONCE(1, "MSS of %u is too small for TSO\n", mss); in ef100_tx_can_tso() 83 if (skb->data_len / mss > nic_data->tso_max_frames) in ef100_tx_can_tso() 195 u32 mss = skb_shinfo(skb)->gso_size; in ef100_make_tso_desc() local 233 ESF_GZ_TX_TSO_MSS, mss, in ef100_make_tso_desc()
|
/linux/ipc/ |
A D | msg.c | 193 struct msg_sender *mss, size_t msgsz) in ss_add() argument 195 mss->tsk = current; in ss_add() 196 mss->msgsz = msgsz; in ss_add() 202 list_add_tail(&mss->list, &msq->q_senders); in ss_add() 207 if (mss->list.next) in ss_del() 208 list_del(&mss->list); in ss_del() 214 struct msg_sender *mss, *t; in ss_wakeup() local 220 mss->list.next = NULL; in ss_wakeup() 227 else if (stop_tsk == mss->tsk) in ss_wakeup() 238 stop_tsk = mss->tsk; in ss_wakeup() [all …]
|
/linux/net/tipc/ |
A D | msg.c | 202 int mss, struct sk_buff_head *txq) in tipc_msg_append() argument 214 if (!skb || skb->len >= mss) { in tipc_msg_append() 215 skb = tipc_buf_acquire(mss, GFP_KERNEL); in tipc_msg_append() 230 cpy = min_t(size_t, rem, mss - mlen); in tipc_msg_append() 516 bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss, in tipc_msg_try_bundle() argument 530 if (mss <= INT_H_SIZE + msg_size(msg)) in tipc_msg_try_bundle() 545 if (unlikely(mss < BUF_ALIGN(INT_H_SIZE + tsz) + msg_size(msg))) in tipc_msg_try_bundle() 547 if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE, in tipc_msg_try_bundle() 561 if (likely(tipc_msg_bundle(tskb, msg, mss))) { in tipc_msg_try_bundle()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
A D | tls_rxtx.c | 151 int data_len, mss; in mlx5e_tls_complete_sync_skb() local 167 mss = nskb->dev->mtu - (headln - skb_network_offset(nskb)); in mlx5e_tls_complete_sync_skb() 169 if (data_len > mss) { in mlx5e_tls_complete_sync_skb() 170 skb_shinfo(nskb)->gso_size = mss; in mlx5e_tls_complete_sync_skb() 171 skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss); in mlx5e_tls_complete_sync_skb()
|
/linux/drivers/net/ethernet/google/gve/ |
A D | gve_desc.h | 40 __be16 mss; /* TSO MSS */ member 67 __be16 mss; member
|