Home
last modified time | relevance | path

Searched refs:sk_listener (Results 1 – 13 of 13) sorted by relevance

/linux/include/net/
A Drequest_sock.h86 reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener, in reqsk_alloc() argument
96 if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) { in reqsk_alloc()
100 req->rsk_listener = sk_listener; in reqsk_alloc()
103 req_to_sk(req)->sk_prot = sk_listener->sk_prot; in reqsk_alloc()
A Dmptcp.h187 const struct sock *sk_listener,
275 const struct sock *sk_listener, in mptcp_subflow_init_cookie_req() argument
A Dinet_sock.h339 struct sock *sk_listener,
A Dsock.h2757 static inline bool sk_listener(const struct sock *sk) in sk_listener() function
A Dtcp.h1470 const struct sock *sk_listener,
/linux/net/mptcp/
A Dsubflow.c111 subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener)); in subflow_init_req()
138 const struct sock *sk_listener, in subflow_check_req() argument
141 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); in subflow_check_req()
152 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) in subflow_check_req()
156 mptcp_get_options(sk_listener, skb, &mp_opt); in subflow_check_req()
215 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) { in subflow_check_req()
217 ntohs(inet_sk(sk_listener)->inet_sport), in subflow_check_req()
219 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) { in subflow_check_req()
243 const struct sock *sk_listener, in mptcp_subflow_init_cookie_req() argument
252 subflow_init_req(req, sk_listener); in mptcp_subflow_init_cookie_req()
[all …]
/linux/net/ipv4/
A Dinet_connection_sock.c800 struct sock *sk_listener = req->rsk_listener; in reqsk_timer_handler() local
806 if (inet_sk_state_load(sk_listener) != TCP_LISTEN) { in reqsk_timer_handler()
809 nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL); in reqsk_timer_handler()
827 sk_listener = nsk; in reqsk_timer_handler()
830 icsk = inet_csk(sk_listener); in reqsk_timer_handler()
831 net = sock_net(sk_listener); in reqsk_timer_handler()
852 if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) { in reqsk_timer_handler()
867 !inet_rtx_syn_ack(sk_listener, req) || in reqsk_timer_handler()
881 inet_csk_reqsk_queue_drop(sk_listener, nreq); in reqsk_timer_handler()
A Dtcp_minisocks.c354 const struct sock *sk_listener, in tcp_openreq_init_rwin() argument
358 const struct tcp_sock *tp = tcp_sk(sk_listener); in tcp_openreq_init_rwin()
359 int full_space = tcp_full_space(sk_listener); in tcp_openreq_init_rwin()
371 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK && in tcp_openreq_init_rwin()
382 tcp_select_initial_window(sk_listener, full_space, in tcp_openreq_init_rwin()
A Dtcp_input.c6709 struct sock *sk_listener, in inet_reqsk_alloc() argument
6712 struct request_sock *req = reqsk_alloc(ops, sk_listener, in inet_reqsk_alloc()
6724 write_pnet(&ireq->ireq_net, sock_net(sk_listener)); in inet_reqsk_alloc()
6725 ireq->ireq_family = sk_listener->sk_family; in inet_reqsk_alloc()
A Dtcp_ipv4.c1473 const struct sock *sk_listener, in tcp_v4_init_req() argument
1477 struct net *net = sock_net(sk_listener); in tcp_v4_init_req()
/linux/net/ipv6/
A Dtcp_ipv6.c827 const struct sock *sk_listener, in tcp_v6_init_req() argument
832 const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener); in tcp_v6_init_req()
838 if ((!sk_listener->sk_bound_dev_if || l3_slave) && in tcp_v6_init_req()
843 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) || in tcp_v6_init_req()
/linux/net/sched/
A Dsch_fq.c281 if (!sk || sk_listener(sk)) { in fq_classify()
/linux/security/selinux/
A Dhooks.c5754 if (sk_listener(sk)) in selinux_ip_output()
5855 !(sk && sk_listener(sk))) in selinux_ip_postroute()
5873 } else if (sk_listener(sk)) { in selinux_ip_postroute()

Completed in 59 milliseconds