1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4
5 #include <linux/bpf.h>
6 #include <linux/errno.h>
7 #include <linux/jump_label.h>
8 #include <linux/percpu.h>
9 #include <linux/percpu-refcount.h>
10 #include <linux/rbtree.h>
11 #include <uapi/linux/bpf.h>
12
13 struct sock;
14 struct sockaddr;
15 struct cgroup;
16 struct sk_buff;
17 struct bpf_map;
18 struct bpf_prog;
19 struct bpf_sock_ops_kern;
20 struct bpf_cgroup_storage;
21 struct ctl_table;
22 struct ctl_table_header;
23 struct task_struct;
24
25 #ifdef CONFIG_CGROUP_BPF
26 enum cgroup_bpf_attach_type {
27 CGROUP_BPF_ATTACH_TYPE_INVALID = -1,
28 CGROUP_INET_INGRESS = 0,
29 CGROUP_INET_EGRESS,
30 CGROUP_INET_SOCK_CREATE,
31 CGROUP_SOCK_OPS,
32 CGROUP_DEVICE,
33 CGROUP_INET4_BIND,
34 CGROUP_INET6_BIND,
35 CGROUP_INET4_CONNECT,
36 CGROUP_INET6_CONNECT,
37 CGROUP_INET4_POST_BIND,
38 CGROUP_INET6_POST_BIND,
39 CGROUP_UDP4_SENDMSG,
40 CGROUP_UDP6_SENDMSG,
41 CGROUP_SYSCTL,
42 CGROUP_UDP4_RECVMSG,
43 CGROUP_UDP6_RECVMSG,
44 CGROUP_GETSOCKOPT,
45 CGROUP_SETSOCKOPT,
46 CGROUP_INET4_GETPEERNAME,
47 CGROUP_INET6_GETPEERNAME,
48 CGROUP_INET4_GETSOCKNAME,
49 CGROUP_INET6_GETSOCKNAME,
50 CGROUP_INET_SOCK_RELEASE,
51 MAX_CGROUP_BPF_ATTACH_TYPE
52 };
53
54 #define CGROUP_ATYPE(type) \
55 case BPF_##type: return type
56
57 static inline enum cgroup_bpf_attach_type
to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)58 to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
59 {
60 switch (attach_type) {
61 CGROUP_ATYPE(CGROUP_INET_INGRESS);
62 CGROUP_ATYPE(CGROUP_INET_EGRESS);
63 CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
64 CGROUP_ATYPE(CGROUP_SOCK_OPS);
65 CGROUP_ATYPE(CGROUP_DEVICE);
66 CGROUP_ATYPE(CGROUP_INET4_BIND);
67 CGROUP_ATYPE(CGROUP_INET6_BIND);
68 CGROUP_ATYPE(CGROUP_INET4_CONNECT);
69 CGROUP_ATYPE(CGROUP_INET6_CONNECT);
70 CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
71 CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
72 CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
73 CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
74 CGROUP_ATYPE(CGROUP_SYSCTL);
75 CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
76 CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
77 CGROUP_ATYPE(CGROUP_GETSOCKOPT);
78 CGROUP_ATYPE(CGROUP_SETSOCKOPT);
79 CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
80 CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
81 CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
82 CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
83 CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
84 default:
85 return CGROUP_BPF_ATTACH_TYPE_INVALID;
86 }
87 }
88
89 #undef CGROUP_ATYPE
90
91 extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
92 #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
93
94 #define for_each_cgroup_storage_type(stype) \
95 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
96
97 struct bpf_cgroup_storage_map;
98
99 struct bpf_storage_buffer {
100 struct rcu_head rcu;
101 char data[];
102 };
103
104 struct bpf_cgroup_storage {
105 union {
106 struct bpf_storage_buffer *buf;
107 void __percpu *percpu_buf;
108 };
109 struct bpf_cgroup_storage_map *map;
110 struct bpf_cgroup_storage_key key;
111 struct list_head list_map;
112 struct list_head list_cg;
113 struct rb_node node;
114 struct rcu_head rcu;
115 };
116
117 struct bpf_cgroup_link {
118 struct bpf_link link;
119 struct cgroup *cgroup;
120 enum bpf_attach_type type;
121 };
122
123 struct bpf_prog_list {
124 struct list_head node;
125 struct bpf_prog *prog;
126 struct bpf_cgroup_link *link;
127 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
128 };
129
130 struct bpf_prog_array;
131
132 struct cgroup_bpf {
133 /* array of effective progs in this cgroup */
134 struct bpf_prog_array __rcu *effective[MAX_CGROUP_BPF_ATTACH_TYPE];
135
136 /* attached progs to this cgroup and attach flags
137 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
138 * have either zero or one element
139 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
140 */
141 struct list_head progs[MAX_CGROUP_BPF_ATTACH_TYPE];
142 u32 flags[MAX_CGROUP_BPF_ATTACH_TYPE];
143
144 /* list of cgroup shared storages */
145 struct list_head storages;
146
147 /* temp storage for effective prog array used by prog_attach/detach */
148 struct bpf_prog_array *inactive;
149
150 /* reference counter used to detach bpf programs after cgroup removal */
151 struct percpu_ref refcnt;
152
153 /* cgroup_bpf is released using a work queue */
154 struct work_struct release_work;
155 };
156
157 int cgroup_bpf_inherit(struct cgroup *cgrp);
158 void cgroup_bpf_offline(struct cgroup *cgrp);
159
160 int __cgroup_bpf_run_filter_skb(struct sock *sk,
161 struct sk_buff *skb,
162 enum cgroup_bpf_attach_type atype);
163
164 int __cgroup_bpf_run_filter_sk(struct sock *sk,
165 enum cgroup_bpf_attach_type atype);
166
167 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
168 struct sockaddr *uaddr,
169 enum cgroup_bpf_attach_type atype,
170 void *t_ctx,
171 u32 *flags);
172
173 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
174 struct bpf_sock_ops_kern *sock_ops,
175 enum cgroup_bpf_attach_type atype);
176
177 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
178 short access, enum cgroup_bpf_attach_type atype);
179
180 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
181 struct ctl_table *table, int write,
182 char **buf, size_t *pcount, loff_t *ppos,
183 enum cgroup_bpf_attach_type atype);
184
185 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
186 int *optname, char __user *optval,
187 int *optlen, char **kernel_optval);
188 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
189 int optname, char __user *optval,
190 int __user *optlen, int max_optlen,
191 int retval);
192
193 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
194 int optname, void *optval,
195 int *optlen, int retval);
196
cgroup_storage_type(struct bpf_map * map)197 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
198 struct bpf_map *map)
199 {
200 if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
201 return BPF_CGROUP_STORAGE_PERCPU;
202
203 return BPF_CGROUP_STORAGE_SHARED;
204 }
205
206 struct bpf_cgroup_storage *
207 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
208 void *key, bool locked);
209 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
210 enum bpf_cgroup_storage_type stype);
211 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
212 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
213 struct cgroup *cgroup,
214 enum bpf_attach_type type);
215 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
216 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
217
218 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
219 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
220 void *value, u64 flags);
221
222 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
223 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
224 ({ \
225 int __ret = 0; \
226 if (cgroup_bpf_enabled(CGROUP_INET_INGRESS)) \
227 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
228 CGROUP_INET_INGRESS); \
229 \
230 __ret; \
231 })
232
233 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
234 ({ \
235 int __ret = 0; \
236 if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
237 typeof(sk) __sk = sk_to_full_sk(sk); \
238 if (sk_fullsock(__sk)) \
239 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
240 CGROUP_INET_EGRESS); \
241 } \
242 __ret; \
243 })
244
245 #define BPF_CGROUP_RUN_SK_PROG(sk, atype) \
246 ({ \
247 int __ret = 0; \
248 if (cgroup_bpf_enabled(atype)) { \
249 __ret = __cgroup_bpf_run_filter_sk(sk, atype); \
250 } \
251 __ret; \
252 })
253
254 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
255 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
256
257 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
258 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
259
260 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
261 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
262
263 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
264 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
265
266 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
267 ({ \
268 u32 __unused_flags; \
269 int __ret = 0; \
270 if (cgroup_bpf_enabled(atype)) \
271 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
272 NULL, \
273 &__unused_flags); \
274 __ret; \
275 })
276
277 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
278 ({ \
279 u32 __unused_flags; \
280 int __ret = 0; \
281 if (cgroup_bpf_enabled(atype)) { \
282 lock_sock(sk); \
283 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
284 t_ctx, \
285 &__unused_flags); \
286 release_sock(sk); \
287 } \
288 __ret; \
289 })
290
291 /* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
292 * via upper bits of return code. The only flag that is supported
293 * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
294 * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
295 */
296 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \
297 ({ \
298 u32 __flags = 0; \
299 int __ret = 0; \
300 if (cgroup_bpf_enabled(atype)) { \
301 lock_sock(sk); \
302 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
303 NULL, &__flags); \
304 release_sock(sk); \
305 if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
306 *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
307 } \
308 __ret; \
309 })
310
311 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
312 ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \
313 cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
314 (sk)->sk_prot->pre_connect)
315
316 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
317 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
318
319 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
320 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
321
322 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
323 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
324
325 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
326 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
327
328 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
329 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
330
331 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
332 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
333
334 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
335 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
336
337 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
338 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
339
340 /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
341 * fullsock and its parent fullsock cannot be traced by
342 * sk_to_full_sk().
343 *
344 * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
345 * Its listener-sk is not attached to the rsk_listener.
346 * In this case, the caller holds the listener-sk (unlocked),
347 * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
348 * the listener-sk such that the cgroup-bpf-progs of the
349 * listener-sk will be run.
350 *
351 * Regardless of syncookie mode or not,
352 * calling bpf_setsockopt on listener-sk will not make sense anyway,
353 * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
354 */
355 #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
356 ({ \
357 int __ret = 0; \
358 if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \
359 __ret = __cgroup_bpf_run_filter_sock_ops(sk, \
360 sock_ops, \
361 CGROUP_SOCK_OPS); \
362 __ret; \
363 })
364
365 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
366 ({ \
367 int __ret = 0; \
368 if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
369 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
370 if (__sk && sk_fullsock(__sk)) \
371 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
372 sock_ops, \
373 CGROUP_SOCK_OPS); \
374 } \
375 __ret; \
376 })
377
378 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \
379 ({ \
380 int __ret = 0; \
381 if (cgroup_bpf_enabled(CGROUP_DEVICE)) \
382 __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
383 access, \
384 CGROUP_DEVICE); \
385 \
386 __ret; \
387 })
388
389
390 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
391 ({ \
392 int __ret = 0; \
393 if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \
394 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
395 buf, count, pos, \
396 CGROUP_SYSCTL); \
397 __ret; \
398 })
399
400 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
401 kernel_optval) \
402 ({ \
403 int __ret = 0; \
404 if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT)) \
405 __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
406 optname, optval, \
407 optlen, \
408 kernel_optval); \
409 __ret; \
410 })
411
412 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
413 ({ \
414 int __ret = 0; \
415 if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
416 get_user(__ret, optlen); \
417 __ret; \
418 })
419
420 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
421 max_optlen, retval) \
422 ({ \
423 int __ret = retval; \
424 if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
425 if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
426 !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
427 tcp_bpf_bypass_getsockopt, \
428 level, optname)) \
429 __ret = __cgroup_bpf_run_filter_getsockopt( \
430 sock, level, optname, optval, optlen, \
431 max_optlen, retval); \
432 __ret; \
433 })
434
435 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
436 optlen, retval) \
437 ({ \
438 int __ret = retval; \
439 if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
440 __ret = __cgroup_bpf_run_filter_getsockopt_kern( \
441 sock, level, optname, optval, optlen, retval); \
442 __ret; \
443 })
444
445 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
446 enum bpf_prog_type ptype, struct bpf_prog *prog);
447 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
448 enum bpf_prog_type ptype);
449 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
450 int cgroup_bpf_prog_query(const union bpf_attr *attr,
451 union bpf_attr __user *uattr);
452 #else
453
454 struct cgroup_bpf {};
cgroup_bpf_inherit(struct cgroup * cgrp)455 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
cgroup_bpf_offline(struct cgroup * cgrp)456 static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
457
cgroup_bpf_prog_attach(const union bpf_attr * attr,enum bpf_prog_type ptype,struct bpf_prog * prog)458 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
459 enum bpf_prog_type ptype,
460 struct bpf_prog *prog)
461 {
462 return -EINVAL;
463 }
464
cgroup_bpf_prog_detach(const union bpf_attr * attr,enum bpf_prog_type ptype)465 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
466 enum bpf_prog_type ptype)
467 {
468 return -EINVAL;
469 }
470
cgroup_bpf_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)471 static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
472 struct bpf_prog *prog)
473 {
474 return -EINVAL;
475 }
476
cgroup_bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)477 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
478 union bpf_attr __user *uattr)
479 {
480 return -EINVAL;
481 }
482
bpf_cgroup_storage_assign(struct bpf_prog_aux * aux,struct bpf_map * map)483 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
484 struct bpf_map *map) { return 0; }
bpf_cgroup_storage_alloc(struct bpf_prog * prog,enum bpf_cgroup_storage_type stype)485 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
486 struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
bpf_cgroup_storage_free(struct bpf_cgroup_storage * storage)487 static inline void bpf_cgroup_storage_free(
488 struct bpf_cgroup_storage *storage) {}
bpf_percpu_cgroup_storage_copy(struct bpf_map * map,void * key,void * value)489 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
490 void *value) {
491 return 0;
492 }
bpf_percpu_cgroup_storage_update(struct bpf_map * map,void * key,void * value,u64 flags)493 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
494 void *key, void *value, u64 flags) {
495 return 0;
496 }
497
498 #define cgroup_bpf_enabled(atype) (0)
499 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
500 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; })
501 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
502 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
503 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
504 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
505 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
506 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
507 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
508 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
509 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
510 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
511 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
512 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
513 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
514 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
515 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
516 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
517 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
518 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
519 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
520 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
521 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
522 optlen, max_optlen, retval) ({ retval; })
523 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
524 optlen, retval) ({ retval; })
525 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
526 kernel_optval) ({ 0; })
527
528 #define for_each_cgroup_storage_type(stype) for (; false; )
529
530 #endif /* CONFIG_CGROUP_BPF */
531
532 #endif /* _BPF_CGROUP_H */
533