1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 
4 #include <linux/types.h>
5 #include <linux/bpf_verifier.h>
6 #include <linux/bpf.h>
7 #include <linux/btf.h>
8 #include <linux/btf_ids.h>
9 #include <linux/filter.h>
10 #include <net/tcp.h>
11 #include <net/bpf_sk_storage.h>
12 
13 /* "extern" is to avoid sparse warning.  It is only used in bpf_struct_ops.c. */
14 extern struct bpf_struct_ops bpf_tcp_congestion_ops;
15 
16 static u32 optional_ops[] = {
17 	offsetof(struct tcp_congestion_ops, init),
18 	offsetof(struct tcp_congestion_ops, release),
19 	offsetof(struct tcp_congestion_ops, set_state),
20 	offsetof(struct tcp_congestion_ops, cwnd_event),
21 	offsetof(struct tcp_congestion_ops, in_ack_event),
22 	offsetof(struct tcp_congestion_ops, pkts_acked),
23 	offsetof(struct tcp_congestion_ops, min_tso_segs),
24 	offsetof(struct tcp_congestion_ops, sndbuf_expand),
25 	offsetof(struct tcp_congestion_ops, cong_control),
26 };
27 
28 static u32 unsupported_ops[] = {
29 	offsetof(struct tcp_congestion_ops, get_info),
30 };
31 
32 static const struct btf_type *tcp_sock_type;
33 static u32 tcp_sock_id, sock_id;
34 
bpf_tcp_ca_init(struct btf * btf)35 static int bpf_tcp_ca_init(struct btf *btf)
36 {
37 	s32 type_id;
38 
39 	type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
40 	if (type_id < 0)
41 		return -EINVAL;
42 	sock_id = type_id;
43 
44 	type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
45 	if (type_id < 0)
46 		return -EINVAL;
47 	tcp_sock_id = type_id;
48 	tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
49 
50 	return 0;
51 }
52 
is_optional(u32 member_offset)53 static bool is_optional(u32 member_offset)
54 {
55 	unsigned int i;
56 
57 	for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
58 		if (member_offset == optional_ops[i])
59 			return true;
60 	}
61 
62 	return false;
63 }
64 
is_unsupported(u32 member_offset)65 static bool is_unsupported(u32 member_offset)
66 {
67 	unsigned int i;
68 
69 	for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
70 		if (member_offset == unsupported_ops[i])
71 			return true;
72 	}
73 
74 	return false;
75 }
76 
77 extern struct btf *btf_vmlinux;
78 
bpf_tcp_ca_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)79 static bool bpf_tcp_ca_is_valid_access(int off, int size,
80 				       enum bpf_access_type type,
81 				       const struct bpf_prog *prog,
82 				       struct bpf_insn_access_aux *info)
83 {
84 	if (!bpf_tracing_btf_ctx_access(off, size, type, prog, info))
85 		return false;
86 
87 	if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id)
88 		/* promote it to tcp_sock */
89 		info->btf_id = tcp_sock_id;
90 
91 	return true;
92 }
93 
bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log * log,const struct btf * btf,const struct btf_type * t,int off,int size,enum bpf_access_type atype,u32 * next_btf_id)94 static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
95 					const struct btf *btf,
96 					const struct btf_type *t, int off,
97 					int size, enum bpf_access_type atype,
98 					u32 *next_btf_id)
99 {
100 	size_t end;
101 
102 	if (atype == BPF_READ)
103 		return btf_struct_access(log, btf, t, off, size, atype, next_btf_id);
104 
105 	if (t != tcp_sock_type) {
106 		bpf_log(log, "only read is supported\n");
107 		return -EACCES;
108 	}
109 
110 	switch (off) {
111 	case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
112 		end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
113 		break;
114 	case offsetof(struct inet_connection_sock, icsk_ack.pending):
115 		end = offsetofend(struct inet_connection_sock,
116 				  icsk_ack.pending);
117 		break;
118 	case offsetof(struct tcp_sock, snd_cwnd):
119 		end = offsetofend(struct tcp_sock, snd_cwnd);
120 		break;
121 	case offsetof(struct tcp_sock, snd_cwnd_cnt):
122 		end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
123 		break;
124 	case offsetof(struct tcp_sock, snd_ssthresh):
125 		end = offsetofend(struct tcp_sock, snd_ssthresh);
126 		break;
127 	case offsetof(struct tcp_sock, ecn_flags):
128 		end = offsetofend(struct tcp_sock, ecn_flags);
129 		break;
130 	default:
131 		bpf_log(log, "no write support to tcp_sock at off %d\n", off);
132 		return -EACCES;
133 	}
134 
135 	if (off + size > end) {
136 		bpf_log(log,
137 			"write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
138 			off, size, end);
139 		return -EACCES;
140 	}
141 
142 	return NOT_INIT;
143 }
144 
BPF_CALL_2(bpf_tcp_send_ack,struct tcp_sock *,tp,u32,rcv_nxt)145 BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
146 {
147 	/* bpf_tcp_ca prog cannot have NULL tp */
148 	__tcp_send_ack((struct sock *)tp, rcv_nxt);
149 	return 0;
150 }
151 
152 static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
153 	.func		= bpf_tcp_send_ack,
154 	.gpl_only	= false,
155 	/* In case we want to report error later */
156 	.ret_type	= RET_INTEGER,
157 	.arg1_type	= ARG_PTR_TO_BTF_ID,
158 	.arg1_btf_id	= &tcp_sock_id,
159 	.arg2_type	= ARG_ANYTHING,
160 };
161 
prog_ops_moff(const struct bpf_prog * prog)162 static u32 prog_ops_moff(const struct bpf_prog *prog)
163 {
164 	const struct btf_member *m;
165 	const struct btf_type *t;
166 	u32 midx;
167 
168 	midx = prog->expected_attach_type;
169 	t = bpf_tcp_congestion_ops.type;
170 	m = &btf_type_member(t)[midx];
171 
172 	return btf_member_bit_offset(t, m) / 8;
173 }
174 
175 static const struct bpf_func_proto *
bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)176 bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
177 			  const struct bpf_prog *prog)
178 {
179 	switch (func_id) {
180 	case BPF_FUNC_tcp_send_ack:
181 		return &bpf_tcp_send_ack_proto;
182 	case BPF_FUNC_sk_storage_get:
183 		return &bpf_sk_storage_get_proto;
184 	case BPF_FUNC_sk_storage_delete:
185 		return &bpf_sk_storage_delete_proto;
186 	case BPF_FUNC_setsockopt:
187 		/* Does not allow release() to call setsockopt.
188 		 * release() is called when the current bpf-tcp-cc
189 		 * is retiring.  It is not allowed to call
190 		 * setsockopt() to make further changes which
191 		 * may potentially allocate new resources.
192 		 */
193 		if (prog_ops_moff(prog) !=
194 		    offsetof(struct tcp_congestion_ops, release))
195 			return &bpf_sk_setsockopt_proto;
196 		return NULL;
197 	case BPF_FUNC_getsockopt:
198 		/* Since get/setsockopt is usually expected to
199 		 * be available together, disable getsockopt for
200 		 * release also to avoid usage surprise.
201 		 * The bpf-tcp-cc already has a more powerful way
202 		 * to read tcp_sock from the PTR_TO_BTF_ID.
203 		 */
204 		if (prog_ops_moff(prog) !=
205 		    offsetof(struct tcp_congestion_ops, release))
206 			return &bpf_sk_getsockopt_proto;
207 		return NULL;
208 	case BPF_FUNC_ktime_get_coarse_ns:
209 		return &bpf_ktime_get_coarse_ns_proto;
210 	default:
211 		return bpf_base_func_proto(func_id);
212 	}
213 }
214 
215 BTF_SET_START(bpf_tcp_ca_kfunc_ids)
BTF_ID(func,tcp_reno_ssthresh)216 BTF_ID(func, tcp_reno_ssthresh)
217 BTF_ID(func, tcp_reno_cong_avoid)
218 BTF_ID(func, tcp_reno_undo_cwnd)
219 BTF_ID(func, tcp_slow_start)
220 BTF_ID(func, tcp_cong_avoid_ai)
221 BTF_SET_END(bpf_tcp_ca_kfunc_ids)
222 
223 static bool bpf_tcp_ca_check_kfunc_call(u32 kfunc_btf_id, struct module *owner)
224 {
225 	if (btf_id_set_contains(&bpf_tcp_ca_kfunc_ids, kfunc_btf_id))
226 		return true;
227 	return bpf_check_mod_kfunc_call(&bpf_tcp_ca_kfunc_list, kfunc_btf_id, owner);
228 }
229 
230 static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
231 	.get_func_proto		= bpf_tcp_ca_get_func_proto,
232 	.is_valid_access	= bpf_tcp_ca_is_valid_access,
233 	.btf_struct_access	= bpf_tcp_ca_btf_struct_access,
234 	.check_kfunc_call	= bpf_tcp_ca_check_kfunc_call,
235 };
236 
bpf_tcp_ca_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)237 static int bpf_tcp_ca_init_member(const struct btf_type *t,
238 				  const struct btf_member *member,
239 				  void *kdata, const void *udata)
240 {
241 	const struct tcp_congestion_ops *utcp_ca;
242 	struct tcp_congestion_ops *tcp_ca;
243 	int prog_fd;
244 	u32 moff;
245 
246 	utcp_ca = (const struct tcp_congestion_ops *)udata;
247 	tcp_ca = (struct tcp_congestion_ops *)kdata;
248 
249 	moff = btf_member_bit_offset(t, member) / 8;
250 	switch (moff) {
251 	case offsetof(struct tcp_congestion_ops, flags):
252 		if (utcp_ca->flags & ~TCP_CONG_MASK)
253 			return -EINVAL;
254 		tcp_ca->flags = utcp_ca->flags;
255 		return 1;
256 	case offsetof(struct tcp_congestion_ops, name):
257 		if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
258 				     sizeof(tcp_ca->name)) <= 0)
259 			return -EINVAL;
260 		if (tcp_ca_find(utcp_ca->name))
261 			return -EEXIST;
262 		return 1;
263 	}
264 
265 	if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
266 		return 0;
267 
268 	/* Ensure bpf_prog is provided for compulsory func ptr */
269 	prog_fd = (int)(*(unsigned long *)(udata + moff));
270 	if (!prog_fd && !is_optional(moff) && !is_unsupported(moff))
271 		return -EINVAL;
272 
273 	return 0;
274 }
275 
bpf_tcp_ca_check_member(const struct btf_type * t,const struct btf_member * member)276 static int bpf_tcp_ca_check_member(const struct btf_type *t,
277 				   const struct btf_member *member)
278 {
279 	if (is_unsupported(btf_member_bit_offset(t, member) / 8))
280 		return -ENOTSUPP;
281 	return 0;
282 }
283 
bpf_tcp_ca_reg(void * kdata)284 static int bpf_tcp_ca_reg(void *kdata)
285 {
286 	return tcp_register_congestion_control(kdata);
287 }
288 
bpf_tcp_ca_unreg(void * kdata)289 static void bpf_tcp_ca_unreg(void *kdata)
290 {
291 	tcp_unregister_congestion_control(kdata);
292 }
293 
294 struct bpf_struct_ops bpf_tcp_congestion_ops = {
295 	.verifier_ops = &bpf_tcp_ca_verifier_ops,
296 	.reg = bpf_tcp_ca_reg,
297 	.unreg = bpf_tcp_ca_unreg,
298 	.check_member = bpf_tcp_ca_check_member,
299 	.init_member = bpf_tcp_ca_init_member,
300 	.init = bpf_tcp_ca_init,
301 	.name = "tcp_congestion_ops",
302 };
303