0001
0002 #ifndef _BPF_CGROUP_H
0003 #define _BPF_CGROUP_H
0004
0005 #include <linux/bpf.h>
0006 #include <linux/bpf-cgroup-defs.h>
0007 #include <linux/errno.h>
0008 #include <linux/jump_label.h>
0009 #include <linux/percpu.h>
0010 #include <linux/rbtree.h>
0011 #include <net/sock.h>
0012 #include <uapi/linux/bpf.h>
0013
0014 struct sock;
0015 struct sockaddr;
0016 struct cgroup;
0017 struct sk_buff;
0018 struct bpf_map;
0019 struct bpf_prog;
0020 struct bpf_sock_ops_kern;
0021 struct bpf_cgroup_storage;
0022 struct ctl_table;
0023 struct ctl_table_header;
0024 struct task_struct;
0025
0026 unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
0027 const struct bpf_insn *insn);
0028 unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
0029 const struct bpf_insn *insn);
0030 unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
0031 const struct bpf_insn *insn);
0032
0033 #ifdef CONFIG_CGROUP_BPF
0034
0035 #define CGROUP_ATYPE(type) \
0036 case BPF_##type: return type
0037
0038 static inline enum cgroup_bpf_attach_type
0039 to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
0040 {
0041 switch (attach_type) {
0042 CGROUP_ATYPE(CGROUP_INET_INGRESS);
0043 CGROUP_ATYPE(CGROUP_INET_EGRESS);
0044 CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
0045 CGROUP_ATYPE(CGROUP_SOCK_OPS);
0046 CGROUP_ATYPE(CGROUP_DEVICE);
0047 CGROUP_ATYPE(CGROUP_INET4_BIND);
0048 CGROUP_ATYPE(CGROUP_INET6_BIND);
0049 CGROUP_ATYPE(CGROUP_INET4_CONNECT);
0050 CGROUP_ATYPE(CGROUP_INET6_CONNECT);
0051 CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
0052 CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
0053 CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
0054 CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
0055 CGROUP_ATYPE(CGROUP_SYSCTL);
0056 CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
0057 CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
0058 CGROUP_ATYPE(CGROUP_GETSOCKOPT);
0059 CGROUP_ATYPE(CGROUP_SETSOCKOPT);
0060 CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
0061 CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
0062 CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
0063 CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
0064 CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
0065 default:
0066 return CGROUP_BPF_ATTACH_TYPE_INVALID;
0067 }
0068 }
0069
0070 #undef CGROUP_ATYPE
0071
0072 extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
0073 #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
0074
0075 #define for_each_cgroup_storage_type(stype) \
0076 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
0077
0078 struct bpf_cgroup_storage_map;
0079
0080 struct bpf_storage_buffer {
0081 struct rcu_head rcu;
0082 char data[];
0083 };
0084
0085 struct bpf_cgroup_storage {
0086 union {
0087 struct bpf_storage_buffer *buf;
0088 void __percpu *percpu_buf;
0089 };
0090 struct bpf_cgroup_storage_map *map;
0091 struct bpf_cgroup_storage_key key;
0092 struct list_head list_map;
0093 struct list_head list_cg;
0094 struct rb_node node;
0095 struct rcu_head rcu;
0096 };
0097
0098 struct bpf_cgroup_link {
0099 struct bpf_link link;
0100 struct cgroup *cgroup;
0101 enum bpf_attach_type type;
0102 };
0103
0104 struct bpf_prog_list {
0105 struct hlist_node node;
0106 struct bpf_prog *prog;
0107 struct bpf_cgroup_link *link;
0108 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
0109 };
0110
0111 int cgroup_bpf_inherit(struct cgroup *cgrp);
0112 void cgroup_bpf_offline(struct cgroup *cgrp);
0113
0114 int __cgroup_bpf_run_filter_skb(struct sock *sk,
0115 struct sk_buff *skb,
0116 enum cgroup_bpf_attach_type atype);
0117
0118 int __cgroup_bpf_run_filter_sk(struct sock *sk,
0119 enum cgroup_bpf_attach_type atype);
0120
0121 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
0122 struct sockaddr *uaddr,
0123 enum cgroup_bpf_attach_type atype,
0124 void *t_ctx,
0125 u32 *flags);
0126
0127 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
0128 struct bpf_sock_ops_kern *sock_ops,
0129 enum cgroup_bpf_attach_type atype);
0130
0131 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
0132 short access, enum cgroup_bpf_attach_type atype);
0133
0134 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
0135 struct ctl_table *table, int write,
0136 char **buf, size_t *pcount, loff_t *ppos,
0137 enum cgroup_bpf_attach_type atype);
0138
0139 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
0140 int *optname, char __user *optval,
0141 int *optlen, char **kernel_optval);
0142 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
0143 int optname, char __user *optval,
0144 int __user *optlen, int max_optlen,
0145 int retval);
0146
0147 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
0148 int optname, void *optval,
0149 int *optlen, int retval);
0150
0151 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
0152 struct bpf_map *map)
0153 {
0154 if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
0155 return BPF_CGROUP_STORAGE_PERCPU;
0156
0157 return BPF_CGROUP_STORAGE_SHARED;
0158 }
0159
0160 struct bpf_cgroup_storage *
0161 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
0162 void *key, bool locked);
0163 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
0164 enum bpf_cgroup_storage_type stype);
0165 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
0166 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
0167 struct cgroup *cgroup,
0168 enum bpf_attach_type type);
0169 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
0170 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
0171
0172 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
0173 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
0174 void *value, u64 flags);
0175
0176
0177 static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
0178 enum cgroup_bpf_attach_type type)
0179 {
0180 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
0181 struct bpf_prog_array *array;
0182
0183 array = rcu_access_pointer(cgrp->bpf.effective[type]);
0184 return array != &bpf_empty_prog_array.hdr;
0185 }
0186
0187
0188 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
0189 ({ \
0190 int __ret = 0; \
0191 if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \
0192 cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS)) \
0193 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
0194 CGROUP_INET_INGRESS); \
0195 \
0196 __ret; \
0197 })
0198
0199 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
0200 ({ \
0201 int __ret = 0; \
0202 if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
0203 typeof(sk) __sk = sk_to_full_sk(sk); \
0204 if (sk_fullsock(__sk) && \
0205 cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \
0206 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
0207 CGROUP_INET_EGRESS); \
0208 } \
0209 __ret; \
0210 })
0211
0212 #define BPF_CGROUP_RUN_SK_PROG(sk, atype) \
0213 ({ \
0214 int __ret = 0; \
0215 if (cgroup_bpf_enabled(atype)) { \
0216 __ret = __cgroup_bpf_run_filter_sk(sk, atype); \
0217 } \
0218 __ret; \
0219 })
0220
0221 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
0222 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
0223
0224 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
0225 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
0226
0227 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
0228 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
0229
0230 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
0231 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
0232
0233 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
0234 ({ \
0235 int __ret = 0; \
0236 if (cgroup_bpf_enabled(atype)) \
0237 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
0238 NULL, NULL); \
0239 __ret; \
0240 })
0241
0242 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
0243 ({ \
0244 int __ret = 0; \
0245 if (cgroup_bpf_enabled(atype)) { \
0246 lock_sock(sk); \
0247 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
0248 t_ctx, NULL); \
0249 release_sock(sk); \
0250 } \
0251 __ret; \
0252 })
0253
0254
0255
0256
0257
0258
0259 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \
0260 ({ \
0261 u32 __flags = 0; \
0262 int __ret = 0; \
0263 if (cgroup_bpf_enabled(atype)) { \
0264 lock_sock(sk); \
0265 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
0266 NULL, &__flags); \
0267 release_sock(sk); \
0268 if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
0269 *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
0270 } \
0271 __ret; \
0272 })
0273
0274 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
0275 ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \
0276 cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
0277 (sk)->sk_prot->pre_connect)
0278
0279 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
0280 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
0281
0282 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
0283 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
0284
0285 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
0286 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
0287
0288 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
0289 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
0290
0291 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
0292 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
0293
0294 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
0295 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
0296
0297 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
0298 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
0299
0300 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
0301 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318 #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
0319 ({ \
0320 int __ret = 0; \
0321 if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \
0322 __ret = __cgroup_bpf_run_filter_sock_ops(sk, \
0323 sock_ops, \
0324 CGROUP_SOCK_OPS); \
0325 __ret; \
0326 })
0327
0328 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
0329 ({ \
0330 int __ret = 0; \
0331 if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
0332 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
0333 if (__sk && sk_fullsock(__sk)) \
0334 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
0335 sock_ops, \
0336 CGROUP_SOCK_OPS); \
0337 } \
0338 __ret; \
0339 })
0340
0341 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \
0342 ({ \
0343 int __ret = 0; \
0344 if (cgroup_bpf_enabled(CGROUP_DEVICE)) \
0345 __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
0346 access, \
0347 CGROUP_DEVICE); \
0348 \
0349 __ret; \
0350 })
0351
0352
0353 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
0354 ({ \
0355 int __ret = 0; \
0356 if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \
0357 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
0358 buf, count, pos, \
0359 CGROUP_SYSCTL); \
0360 __ret; \
0361 })
0362
0363 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
0364 kernel_optval) \
0365 ({ \
0366 int __ret = 0; \
0367 if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \
0368 cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \
0369 __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
0370 optname, optval, \
0371 optlen, \
0372 kernel_optval); \
0373 __ret; \
0374 })
0375
0376 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
0377 ({ \
0378 int __ret = 0; \
0379 if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
0380 get_user(__ret, optlen); \
0381 __ret; \
0382 })
0383
0384 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
0385 max_optlen, retval) \
0386 ({ \
0387 int __ret = retval; \
0388 if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \
0389 cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \
0390 if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
0391 !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
0392 tcp_bpf_bypass_getsockopt, \
0393 level, optname)) \
0394 __ret = __cgroup_bpf_run_filter_getsockopt( \
0395 sock, level, optname, optval, optlen, \
0396 max_optlen, retval); \
0397 __ret; \
0398 })
0399
0400 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
0401 optlen, retval) \
0402 ({ \
0403 int __ret = retval; \
0404 if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
0405 __ret = __cgroup_bpf_run_filter_getsockopt_kern( \
0406 sock, level, optname, optval, optlen, retval); \
0407 __ret; \
0408 })
0409
0410 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
0411 enum bpf_prog_type ptype, struct bpf_prog *prog);
0412 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
0413 enum bpf_prog_type ptype);
0414 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
0415 int cgroup_bpf_prog_query(const union bpf_attr *attr,
0416 union bpf_attr __user *uattr);
0417 #else
0418
0419 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
0420 static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
0421
0422 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
0423 enum bpf_prog_type ptype,
0424 struct bpf_prog *prog)
0425 {
0426 return -EINVAL;
0427 }
0428
0429 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
0430 enum bpf_prog_type ptype)
0431 {
0432 return -EINVAL;
0433 }
0434
0435 static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
0436 struct bpf_prog *prog)
0437 {
0438 return -EINVAL;
0439 }
0440
0441 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
0442 union bpf_attr __user *uattr)
0443 {
0444 return -EINVAL;
0445 }
0446
0447 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
0448 struct bpf_map *map) { return 0; }
0449 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
0450 struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
0451 static inline void bpf_cgroup_storage_free(
0452 struct bpf_cgroup_storage *storage) {}
0453 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
0454 void *value) {
0455 return 0;
0456 }
0457 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
0458 void *key, void *value, u64 flags) {
0459 return 0;
0460 }
0461
0462 #define cgroup_bpf_enabled(atype) (0)
0463 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
0464 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; })
0465 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
0466 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
0467 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
0468 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
0469 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
0470 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
0471 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
0472 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
0473 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
0474 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
0475 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
0476 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
0477 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
0478 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
0479 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
0480 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
0481 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
0482 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
0483 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
0484 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
0485 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
0486 optlen, max_optlen, retval) ({ retval; })
0487 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
0488 optlen, retval) ({ retval; })
0489 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
0490 kernel_optval) ({ 0; })
0491
0492 #define for_each_cgroup_storage_type(stype) for (; false; )
0493
0494 #endif
0495
0496 #endif