0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #include <linux/bitops.h>
0020 #include <linux/bpf.h>
0021 #include <linux/filter.h>
0022 #include <linux/ptr_ring.h>
0023 #include <net/xdp.h>
0024
0025 #include <linux/sched.h>
0026 #include <linux/workqueue.h>
0027 #include <linux/kthread.h>
0028 #include <linux/capability.h>
0029 #include <trace/events/xdp.h>
0030 #include <linux/btf_ids.h>
0031
0032 #include <linux/netdevice.h> /* netif_receive_skb_list */
0033 #include <linux/etherdevice.h> /* eth_type_trans */
0034
0035
0036
0037
0038
0039
0040
0041
0042 #define CPU_MAP_BULK_SIZE 8
0043 struct bpf_cpu_map_entry;
0044 struct bpf_cpu_map;
0045
0046 struct xdp_bulk_queue {
0047 void *q[CPU_MAP_BULK_SIZE];
0048 struct list_head flush_node;
0049 struct bpf_cpu_map_entry *obj;
0050 unsigned int count;
0051 };
0052
0053
0054 struct bpf_cpu_map_entry {
0055 u32 cpu;
0056 int map_id;
0057
0058
0059 struct xdp_bulk_queue __percpu *bulkq;
0060
0061 struct bpf_cpu_map *cmap;
0062
0063
0064 struct ptr_ring *queue;
0065 struct task_struct *kthread;
0066
0067 struct bpf_cpumap_val value;
0068 struct bpf_prog *prog;
0069
0070 atomic_t refcnt;
0071 struct rcu_head rcu;
0072
0073 struct work_struct kthread_stop_wq;
0074 };
0075
0076 struct bpf_cpu_map {
0077 struct bpf_map map;
0078
0079 struct bpf_cpu_map_entry __rcu **cpu_map;
0080 };
0081
0082 static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
0083
0084 static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
0085 {
0086 u32 value_size = attr->value_size;
0087 struct bpf_cpu_map *cmap;
0088 int err = -ENOMEM;
0089
0090 if (!bpf_capable())
0091 return ERR_PTR(-EPERM);
0092
0093
0094 if (attr->max_entries == 0 || attr->key_size != 4 ||
0095 (value_size != offsetofend(struct bpf_cpumap_val, qsize) &&
0096 value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) ||
0097 attr->map_flags & ~BPF_F_NUMA_NODE)
0098 return ERR_PTR(-EINVAL);
0099
0100 cmap = kzalloc(sizeof(*cmap), GFP_USER | __GFP_ACCOUNT);
0101 if (!cmap)
0102 return ERR_PTR(-ENOMEM);
0103
0104 bpf_map_init_from_attr(&cmap->map, attr);
0105
0106
0107 if (cmap->map.max_entries > NR_CPUS) {
0108 err = -E2BIG;
0109 goto free_cmap;
0110 }
0111
0112
0113 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
0114 sizeof(struct bpf_cpu_map_entry *),
0115 cmap->map.numa_node);
0116 if (!cmap->cpu_map)
0117 goto free_cmap;
0118
0119 return &cmap->map;
0120 free_cmap:
0121 kfree(cmap);
0122 return ERR_PTR(err);
0123 }
0124
0125 static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
0126 {
0127 atomic_inc(&rcpu->refcnt);
0128 }
0129
0130
0131 static void cpu_map_kthread_stop(struct work_struct *work)
0132 {
0133 struct bpf_cpu_map_entry *rcpu;
0134
0135 rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
0136
0137
0138
0139
0140 rcu_barrier();
0141
0142
0143 kthread_stop(rcpu->kthread);
0144 }
0145
0146 static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
0147 {
0148
0149
0150
0151
0152
0153 struct xdp_frame *xdpf;
0154
0155 while ((xdpf = ptr_ring_consume(ring)))
0156 if (WARN_ON_ONCE(xdpf))
0157 xdp_return_frame(xdpf);
0158 }
0159
0160 static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
0161 {
0162 if (atomic_dec_and_test(&rcpu->refcnt)) {
0163 if (rcpu->prog)
0164 bpf_prog_put(rcpu->prog);
0165
0166 __cpu_map_ring_cleanup(rcpu->queue);
0167 ptr_ring_cleanup(rcpu->queue, NULL);
0168 kfree(rcpu->queue);
0169 kfree(rcpu);
0170 }
0171 }
0172
0173 static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
0174 struct list_head *listp,
0175 struct xdp_cpumap_stats *stats)
0176 {
0177 struct sk_buff *skb, *tmp;
0178 struct xdp_buff xdp;
0179 u32 act;
0180 int err;
0181
0182 list_for_each_entry_safe(skb, tmp, listp, list) {
0183 act = bpf_prog_run_generic_xdp(skb, &xdp, rcpu->prog);
0184 switch (act) {
0185 case XDP_PASS:
0186 break;
0187 case XDP_REDIRECT:
0188 skb_list_del_init(skb);
0189 err = xdp_do_generic_redirect(skb->dev, skb, &xdp,
0190 rcpu->prog);
0191 if (unlikely(err)) {
0192 kfree_skb(skb);
0193 stats->drop++;
0194 } else {
0195 stats->redirect++;
0196 }
0197 return;
0198 default:
0199 bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act);
0200 fallthrough;
0201 case XDP_ABORTED:
0202 trace_xdp_exception(skb->dev, rcpu->prog, act);
0203 fallthrough;
0204 case XDP_DROP:
0205 skb_list_del_init(skb);
0206 kfree_skb(skb);
0207 stats->drop++;
0208 return;
0209 }
0210 }
0211 }
0212
0213 static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
0214 void **frames, int n,
0215 struct xdp_cpumap_stats *stats)
0216 {
0217 struct xdp_rxq_info rxq;
0218 struct xdp_buff xdp;
0219 int i, nframes = 0;
0220
0221 xdp_set_return_frame_no_direct();
0222 xdp.rxq = &rxq;
0223
0224 for (i = 0; i < n; i++) {
0225 struct xdp_frame *xdpf = frames[i];
0226 u32 act;
0227 int err;
0228
0229 rxq.dev = xdpf->dev_rx;
0230 rxq.mem = xdpf->mem;
0231
0232
0233 xdp_convert_frame_to_buff(xdpf, &xdp);
0234
0235 act = bpf_prog_run_xdp(rcpu->prog, &xdp);
0236 switch (act) {
0237 case XDP_PASS:
0238 err = xdp_update_frame_from_buff(&xdp, xdpf);
0239 if (err < 0) {
0240 xdp_return_frame(xdpf);
0241 stats->drop++;
0242 } else {
0243 frames[nframes++] = xdpf;
0244 stats->pass++;
0245 }
0246 break;
0247 case XDP_REDIRECT:
0248 err = xdp_do_redirect(xdpf->dev_rx, &xdp,
0249 rcpu->prog);
0250 if (unlikely(err)) {
0251 xdp_return_frame(xdpf);
0252 stats->drop++;
0253 } else {
0254 stats->redirect++;
0255 }
0256 break;
0257 default:
0258 bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act);
0259 fallthrough;
0260 case XDP_DROP:
0261 xdp_return_frame(xdpf);
0262 stats->drop++;
0263 break;
0264 }
0265 }
0266
0267 xdp_clear_return_frame_no_direct();
0268
0269 return nframes;
0270 }
0271
0272 #define CPUMAP_BATCH 8
0273
0274 static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
0275 int xdp_n, struct xdp_cpumap_stats *stats,
0276 struct list_head *list)
0277 {
0278 int nframes;
0279
0280 if (!rcpu->prog)
0281 return xdp_n;
0282
0283 rcu_read_lock_bh();
0284
0285 nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, xdp_n, stats);
0286
0287 if (stats->redirect)
0288 xdp_do_flush();
0289
0290 if (unlikely(!list_empty(list)))
0291 cpu_map_bpf_prog_run_skb(rcpu, list, stats);
0292
0293 rcu_read_unlock_bh();
0294
0295 return nframes;
0296 }
0297
0298
0299 static int cpu_map_kthread_run(void *data)
0300 {
0301 struct bpf_cpu_map_entry *rcpu = data;
0302
0303 set_current_state(TASK_INTERRUPTIBLE);
0304
0305
0306
0307
0308
0309
0310 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
0311 struct xdp_cpumap_stats stats = {};
0312 unsigned int kmem_alloc_drops = 0, sched = 0;
0313 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
0314 int i, n, m, nframes, xdp_n;
0315 void *frames[CPUMAP_BATCH];
0316 void *skbs[CPUMAP_BATCH];
0317 LIST_HEAD(list);
0318
0319
0320 if (__ptr_ring_empty(rcpu->queue)) {
0321 set_current_state(TASK_INTERRUPTIBLE);
0322
0323 if (__ptr_ring_empty(rcpu->queue)) {
0324 schedule();
0325 sched = 1;
0326 } else {
0327 __set_current_state(TASK_RUNNING);
0328 }
0329 } else {
0330 sched = cond_resched();
0331 }
0332
0333
0334
0335
0336
0337
0338 n = __ptr_ring_consume_batched(rcpu->queue, frames,
0339 CPUMAP_BATCH);
0340 for (i = 0, xdp_n = 0; i < n; i++) {
0341 void *f = frames[i];
0342 struct page *page;
0343
0344 if (unlikely(__ptr_test_bit(0, &f))) {
0345 struct sk_buff *skb = f;
0346
0347 __ptr_clear_bit(0, &skb);
0348 list_add_tail(&skb->list, &list);
0349 continue;
0350 }
0351
0352 frames[xdp_n++] = f;
0353 page = virt_to_page(f);
0354
0355
0356
0357
0358
0359 prefetchw(page);
0360 }
0361
0362
0363 nframes = cpu_map_bpf_prog_run(rcpu, frames, xdp_n, &stats, &list);
0364 if (nframes) {
0365 m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, skbs);
0366 if (unlikely(m == 0)) {
0367 for (i = 0; i < nframes; i++)
0368 skbs[i] = NULL;
0369 kmem_alloc_drops += nframes;
0370 }
0371 }
0372
0373 local_bh_disable();
0374 for (i = 0; i < nframes; i++) {
0375 struct xdp_frame *xdpf = frames[i];
0376 struct sk_buff *skb = skbs[i];
0377
0378 skb = __xdp_build_skb_from_frame(xdpf, skb,
0379 xdpf->dev_rx);
0380 if (!skb) {
0381 xdp_return_frame(xdpf);
0382 continue;
0383 }
0384
0385 list_add_tail(&skb->list, &list);
0386 }
0387 netif_receive_skb_list(&list);
0388
0389
0390 trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops,
0391 sched, &stats);
0392
0393 local_bh_enable();
0394 }
0395 __set_current_state(TASK_RUNNING);
0396
0397 put_cpu_map_entry(rcpu);
0398 return 0;
0399 }
0400
0401 static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu,
0402 struct bpf_map *map, int fd)
0403 {
0404 struct bpf_prog *prog;
0405
0406 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
0407 if (IS_ERR(prog))
0408 return PTR_ERR(prog);
0409
0410 if (prog->expected_attach_type != BPF_XDP_CPUMAP ||
0411 !bpf_prog_map_compatible(map, prog)) {
0412 bpf_prog_put(prog);
0413 return -EINVAL;
0414 }
0415
0416 rcpu->value.bpf_prog.id = prog->aux->id;
0417 rcpu->prog = prog;
0418
0419 return 0;
0420 }
0421
0422 static struct bpf_cpu_map_entry *
0423 __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
0424 u32 cpu)
0425 {
0426 int numa, err, i, fd = value->bpf_prog.fd;
0427 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
0428 struct bpf_cpu_map_entry *rcpu;
0429 struct xdp_bulk_queue *bq;
0430
0431
0432 numa = cpu_to_node(cpu);
0433
0434 rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa);
0435 if (!rcpu)
0436 return NULL;
0437
0438
0439 rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq),
0440 sizeof(void *), gfp);
0441 if (!rcpu->bulkq)
0442 goto free_rcu;
0443
0444 for_each_possible_cpu(i) {
0445 bq = per_cpu_ptr(rcpu->bulkq, i);
0446 bq->obj = rcpu;
0447 }
0448
0449
0450 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp,
0451 numa);
0452 if (!rcpu->queue)
0453 goto free_bulkq;
0454
0455 err = ptr_ring_init(rcpu->queue, value->qsize, gfp);
0456 if (err)
0457 goto free_queue;
0458
0459 rcpu->cpu = cpu;
0460 rcpu->map_id = map->id;
0461 rcpu->value.qsize = value->qsize;
0462
0463 if (fd > 0 && __cpu_map_load_bpf_program(rcpu, map, fd))
0464 goto free_ptr_ring;
0465
0466
0467 rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
0468 "cpumap/%d/map:%d", cpu,
0469 map->id);
0470 if (IS_ERR(rcpu->kthread))
0471 goto free_prog;
0472
0473 get_cpu_map_entry(rcpu);
0474 get_cpu_map_entry(rcpu);
0475
0476
0477 kthread_bind(rcpu->kthread, cpu);
0478 wake_up_process(rcpu->kthread);
0479
0480 return rcpu;
0481
0482 free_prog:
0483 if (rcpu->prog)
0484 bpf_prog_put(rcpu->prog);
0485 free_ptr_ring:
0486 ptr_ring_cleanup(rcpu->queue, NULL);
0487 free_queue:
0488 kfree(rcpu->queue);
0489 free_bulkq:
0490 free_percpu(rcpu->bulkq);
0491 free_rcu:
0492 kfree(rcpu);
0493 return NULL;
0494 }
0495
0496 static void __cpu_map_entry_free(struct rcu_head *rcu)
0497 {
0498 struct bpf_cpu_map_entry *rcpu;
0499
0500
0501
0502
0503
0504
0505 rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu);
0506
0507 free_percpu(rcpu->bulkq);
0508
0509 put_cpu_map_entry(rcpu);
0510 }
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531 static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
0532 u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
0533 {
0534 struct bpf_cpu_map_entry *old_rcpu;
0535
0536 old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu)));
0537 if (old_rcpu) {
0538 call_rcu(&old_rcpu->rcu, __cpu_map_entry_free);
0539 INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop);
0540 schedule_work(&old_rcpu->kthread_stop_wq);
0541 }
0542 }
0543
0544 static int cpu_map_delete_elem(struct bpf_map *map, void *key)
0545 {
0546 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
0547 u32 key_cpu = *(u32 *)key;
0548
0549 if (key_cpu >= map->max_entries)
0550 return -EINVAL;
0551
0552
0553 __cpu_map_entry_replace(cmap, key_cpu, NULL);
0554 return 0;
0555 }
0556
0557 static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
0558 u64 map_flags)
0559 {
0560 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
0561 struct bpf_cpumap_val cpumap_value = {};
0562 struct bpf_cpu_map_entry *rcpu;
0563
0564 u32 key_cpu = *(u32 *)key;
0565
0566 memcpy(&cpumap_value, value, map->value_size);
0567
0568 if (unlikely(map_flags > BPF_EXIST))
0569 return -EINVAL;
0570 if (unlikely(key_cpu >= cmap->map.max_entries))
0571 return -E2BIG;
0572 if (unlikely(map_flags == BPF_NOEXIST))
0573 return -EEXIST;
0574 if (unlikely(cpumap_value.qsize > 16384))
0575 return -EOVERFLOW;
0576
0577
0578 if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
0579 return -ENODEV;
0580
0581 if (cpumap_value.qsize == 0) {
0582 rcpu = NULL;
0583 } else {
0584
0585 rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu);
0586 if (!rcpu)
0587 return -ENOMEM;
0588 rcpu->cmap = cmap;
0589 }
0590 rcu_read_lock();
0591 __cpu_map_entry_replace(cmap, key_cpu, rcpu);
0592 rcu_read_unlock();
0593 return 0;
0594 }
0595
0596 static void cpu_map_free(struct bpf_map *map)
0597 {
0598 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
0599 u32 i;
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610 synchronize_rcu();
0611
0612
0613
0614
0615 for (i = 0; i < cmap->map.max_entries; i++) {
0616 struct bpf_cpu_map_entry *rcpu;
0617
0618 rcpu = rcu_dereference_raw(cmap->cpu_map[i]);
0619 if (!rcpu)
0620 continue;
0621
0622
0623 __cpu_map_entry_replace(cmap, i, NULL);
0624 }
0625 bpf_map_area_free(cmap->cpu_map);
0626 kfree(cmap);
0627 }
0628
0629
0630
0631
0632
0633 static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
0634 {
0635 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
0636 struct bpf_cpu_map_entry *rcpu;
0637
0638 if (key >= map->max_entries)
0639 return NULL;
0640
0641 rcpu = rcu_dereference_check(cmap->cpu_map[key],
0642 rcu_read_lock_bh_held());
0643 return rcpu;
0644 }
0645
0646 static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
0647 {
0648 struct bpf_cpu_map_entry *rcpu =
0649 __cpu_map_lookup_elem(map, *(u32 *)key);
0650
0651 return rcpu ? &rcpu->value : NULL;
0652 }
0653
0654 static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
0655 {
0656 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
0657 u32 index = key ? *(u32 *)key : U32_MAX;
0658 u32 *next = next_key;
0659
0660 if (index >= cmap->map.max_entries) {
0661 *next = 0;
0662 return 0;
0663 }
0664
0665 if (index == cmap->map.max_entries - 1)
0666 return -ENOENT;
0667 *next = index + 1;
0668 return 0;
0669 }
0670
0671 static int cpu_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
0672 {
0673 return __bpf_xdp_redirect_map(map, ifindex, flags, 0,
0674 __cpu_map_lookup_elem);
0675 }
0676
0677 BTF_ID_LIST_SINGLE(cpu_map_btf_ids, struct, bpf_cpu_map)
0678 const struct bpf_map_ops cpu_map_ops = {
0679 .map_meta_equal = bpf_map_meta_equal,
0680 .map_alloc = cpu_map_alloc,
0681 .map_free = cpu_map_free,
0682 .map_delete_elem = cpu_map_delete_elem,
0683 .map_update_elem = cpu_map_update_elem,
0684 .map_lookup_elem = cpu_map_lookup_elem,
0685 .map_get_next_key = cpu_map_get_next_key,
0686 .map_check_btf = map_check_no_btf,
0687 .map_btf_id = &cpu_map_btf_ids[0],
0688 .map_redirect = cpu_map_redirect,
0689 };
0690
0691 static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
0692 {
0693 struct bpf_cpu_map_entry *rcpu = bq->obj;
0694 unsigned int processed = 0, drops = 0;
0695 const int to_cpu = rcpu->cpu;
0696 struct ptr_ring *q;
0697 int i;
0698
0699 if (unlikely(!bq->count))
0700 return;
0701
0702 q = rcpu->queue;
0703 spin_lock(&q->producer_lock);
0704
0705 for (i = 0; i < bq->count; i++) {
0706 struct xdp_frame *xdpf = bq->q[i];
0707 int err;
0708
0709 err = __ptr_ring_produce(q, xdpf);
0710 if (err) {
0711 drops++;
0712 xdp_return_frame_rx_napi(xdpf);
0713 }
0714 processed++;
0715 }
0716 bq->count = 0;
0717 spin_unlock(&q->producer_lock);
0718
0719 __list_del_clearprev(&bq->flush_node);
0720
0721
0722 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu);
0723 }
0724
0725
0726
0727
0728 static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
0729 {
0730 struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
0731 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
0732
0733 if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
0734 bq_flush_to_queue(bq);
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745 bq->q[bq->count++] = xdpf;
0746
0747 if (!bq->flush_node.prev)
0748 list_add(&bq->flush_node, flush_list);
0749 }
0750
0751 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
0752 struct net_device *dev_rx)
0753 {
0754
0755 xdpf->dev_rx = dev_rx;
0756
0757 bq_enqueue(rcpu, xdpf);
0758 return 0;
0759 }
0760
0761 int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
0762 struct sk_buff *skb)
0763 {
0764 int ret;
0765
0766 __skb_pull(skb, skb->mac_len);
0767 skb_set_redirected(skb, false);
0768 __ptr_set_bit(0, &skb);
0769
0770 ret = ptr_ring_produce(rcpu->queue, skb);
0771 if (ret < 0)
0772 goto trace;
0773
0774 wake_up_process(rcpu->kthread);
0775 trace:
0776 trace_xdp_cpumap_enqueue(rcpu->map_id, !ret, !!ret, rcpu->cpu);
0777 return ret;
0778 }
0779
0780 void __cpu_map_flush(void)
0781 {
0782 struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
0783 struct xdp_bulk_queue *bq, *tmp;
0784
0785 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
0786 bq_flush_to_queue(bq);
0787
0788
0789 wake_up_process(bq->obj->kthread);
0790 }
0791 }
0792
0793 static int __init cpu_map_init(void)
0794 {
0795 int cpu;
0796
0797 for_each_possible_cpu(cpu)
0798 INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu));
0799 return 0;
0800 }
0801
0802 subsys_initcall(cpu_map_init);