Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright (c) 2016 Facebook
0003  */
0004 #include <linux/cpumask.h>
0005 #include <linux/spinlock.h>
0006 #include <linux/percpu.h>
0007 
0008 #include "bpf_lru_list.h"
0009 
0010 #define LOCAL_FREE_TARGET       (128)
0011 #define LOCAL_NR_SCANS          LOCAL_FREE_TARGET
0012 
0013 #define PERCPU_FREE_TARGET      (4)
0014 #define PERCPU_NR_SCANS         PERCPU_FREE_TARGET
0015 
0016 /* Helpers to get the local list index */
0017 #define LOCAL_LIST_IDX(t)   ((t) - BPF_LOCAL_LIST_T_OFFSET)
0018 #define LOCAL_FREE_LIST_IDX LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_FREE)
0019 #define LOCAL_PENDING_LIST_IDX  LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_PENDING)
0020 #define IS_LOCAL_LIST_TYPE(t)   ((t) >= BPF_LOCAL_LIST_T_OFFSET)
0021 
0022 static int get_next_cpu(int cpu)
0023 {
0024     cpu = cpumask_next(cpu, cpu_possible_mask);
0025     if (cpu >= nr_cpu_ids)
0026         cpu = cpumask_first(cpu_possible_mask);
0027     return cpu;
0028 }
0029 
0030 /* Local list helpers */
0031 static struct list_head *local_free_list(struct bpf_lru_locallist *loc_l)
0032 {
0033     return &loc_l->lists[LOCAL_FREE_LIST_IDX];
0034 }
0035 
0036 static struct list_head *local_pending_list(struct bpf_lru_locallist *loc_l)
0037 {
0038     return &loc_l->lists[LOCAL_PENDING_LIST_IDX];
0039 }
0040 
0041 /* bpf_lru_node helpers */
0042 static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)
0043 {
0044     return node->ref;
0045 }
0046 
0047 static void bpf_lru_list_count_inc(struct bpf_lru_list *l,
0048                    enum bpf_lru_list_type type)
0049 {
0050     if (type < NR_BPF_LRU_LIST_COUNT)
0051         l->counts[type]++;
0052 }
0053 
0054 static void bpf_lru_list_count_dec(struct bpf_lru_list *l,
0055                    enum bpf_lru_list_type type)
0056 {
0057     if (type < NR_BPF_LRU_LIST_COUNT)
0058         l->counts[type]--;
0059 }
0060 
0061 static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l,
0062                     struct bpf_lru_node *node,
0063                     struct list_head *free_list,
0064                     enum bpf_lru_list_type tgt_free_type)
0065 {
0066     if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
0067         return;
0068 
0069     /* If the removing node is the next_inactive_rotation candidate,
0070      * move the next_inactive_rotation pointer also.
0071      */
0072     if (&node->list == l->next_inactive_rotation)
0073         l->next_inactive_rotation = l->next_inactive_rotation->prev;
0074 
0075     bpf_lru_list_count_dec(l, node->type);
0076 
0077     node->type = tgt_free_type;
0078     list_move(&node->list, free_list);
0079 }
0080 
0081 /* Move nodes from local list to the LRU list */
0082 static void __bpf_lru_node_move_in(struct bpf_lru_list *l,
0083                    struct bpf_lru_node *node,
0084                    enum bpf_lru_list_type tgt_type)
0085 {
0086     if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) ||
0087         WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
0088         return;
0089 
0090     bpf_lru_list_count_inc(l, tgt_type);
0091     node->type = tgt_type;
0092     node->ref = 0;
0093     list_move(&node->list, &l->lists[tgt_type]);
0094 }
0095 
0096 /* Move nodes between or within active and inactive list (like
0097  * active to inactive, inactive to active or tail of active back to
0098  * the head of active).
0099  */
0100 static void __bpf_lru_node_move(struct bpf_lru_list *l,
0101                 struct bpf_lru_node *node,
0102                 enum bpf_lru_list_type tgt_type)
0103 {
0104     if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)) ||
0105         WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
0106         return;
0107 
0108     if (node->type != tgt_type) {
0109         bpf_lru_list_count_dec(l, node->type);
0110         bpf_lru_list_count_inc(l, tgt_type);
0111         node->type = tgt_type;
0112     }
0113     node->ref = 0;
0114 
0115     /* If the moving node is the next_inactive_rotation candidate,
0116      * move the next_inactive_rotation pointer also.
0117      */
0118     if (&node->list == l->next_inactive_rotation)
0119         l->next_inactive_rotation = l->next_inactive_rotation->prev;
0120 
0121     list_move(&node->list, &l->lists[tgt_type]);
0122 }
0123 
0124 static bool bpf_lru_list_inactive_low(const struct bpf_lru_list *l)
0125 {
0126     return l->counts[BPF_LRU_LIST_T_INACTIVE] <
0127         l->counts[BPF_LRU_LIST_T_ACTIVE];
0128 }
0129 
0130 /* Rotate the active list:
0131  * 1. Start from tail
0132  * 2. If the node has the ref bit set, it will be rotated
0133  *    back to the head of active list with the ref bit cleared.
0134  *    Give this node one more chance to survive in the active list.
0135  * 3. If the ref bit is not set, move it to the head of the
0136  *    inactive list.
0137  * 4. It will at most scan nr_scans nodes
0138  */
0139 static void __bpf_lru_list_rotate_active(struct bpf_lru *lru,
0140                      struct bpf_lru_list *l)
0141 {
0142     struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE];
0143     struct bpf_lru_node *node, *tmp_node, *first_node;
0144     unsigned int i = 0;
0145 
0146     first_node = list_first_entry(active, struct bpf_lru_node, list);
0147     list_for_each_entry_safe_reverse(node, tmp_node, active, list) {
0148         if (bpf_lru_node_is_ref(node))
0149             __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
0150         else
0151             __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
0152 
0153         if (++i == lru->nr_scans || node == first_node)
0154             break;
0155     }
0156 }
0157 
0158 /* Rotate the inactive list.  It starts from the next_inactive_rotation
0159  * 1. If the node has ref bit set, it will be moved to the head
0160  *    of active list with the ref bit cleared.
0161  * 2. If the node does not have ref bit set, it will leave it
0162  *    at its current location (i.e. do nothing) so that it can
0163  *    be considered during the next inactive_shrink.
0164  * 3. It will at most scan nr_scans nodes
0165  */
0166 static void __bpf_lru_list_rotate_inactive(struct bpf_lru *lru,
0167                        struct bpf_lru_list *l)
0168 {
0169     struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
0170     struct list_head *cur, *last, *next = inactive;
0171     struct bpf_lru_node *node;
0172     unsigned int i = 0;
0173 
0174     if (list_empty(inactive))
0175         return;
0176 
0177     last = l->next_inactive_rotation->next;
0178     if (last == inactive)
0179         last = last->next;
0180 
0181     cur = l->next_inactive_rotation;
0182     while (i < lru->nr_scans) {
0183         if (cur == inactive) {
0184             cur = cur->prev;
0185             continue;
0186         }
0187 
0188         node = list_entry(cur, struct bpf_lru_node, list);
0189         next = cur->prev;
0190         if (bpf_lru_node_is_ref(node))
0191             __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
0192         if (cur == last)
0193             break;
0194         cur = next;
0195         i++;
0196     }
0197 
0198     l->next_inactive_rotation = next;
0199 }
0200 
0201 /* Shrink the inactive list.  It starts from the tail of the
0202  * inactive list and only move the nodes without the ref bit
0203  * set to the designated free list.
0204  */
0205 static unsigned int
0206 __bpf_lru_list_shrink_inactive(struct bpf_lru *lru,
0207                    struct bpf_lru_list *l,
0208                    unsigned int tgt_nshrink,
0209                    struct list_head *free_list,
0210                    enum bpf_lru_list_type tgt_free_type)
0211 {
0212     struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
0213     struct bpf_lru_node *node, *tmp_node;
0214     unsigned int nshrinked = 0;
0215     unsigned int i = 0;
0216 
0217     list_for_each_entry_safe_reverse(node, tmp_node, inactive, list) {
0218         if (bpf_lru_node_is_ref(node)) {
0219             __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
0220         } else if (lru->del_from_htab(lru->del_arg, node)) {
0221             __bpf_lru_node_move_to_free(l, node, free_list,
0222                             tgt_free_type);
0223             if (++nshrinked == tgt_nshrink)
0224                 break;
0225         }
0226 
0227         if (++i == lru->nr_scans)
0228             break;
0229     }
0230 
0231     return nshrinked;
0232 }
0233 
0234 /* 1. Rotate the active list (if needed)
0235  * 2. Always rotate the inactive list
0236  */
0237 static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l)
0238 {
0239     if (bpf_lru_list_inactive_low(l))
0240         __bpf_lru_list_rotate_active(lru, l);
0241 
0242     __bpf_lru_list_rotate_inactive(lru, l);
0243 }
0244 
0245 /* Calls __bpf_lru_list_shrink_inactive() to shrink some
0246  * ref-bit-cleared nodes and move them to the designated
0247  * free list.
0248  *
0249  * If it cannot get a free node after calling
0250  * __bpf_lru_list_shrink_inactive().  It will just remove
0251  * one node from either inactive or active list without
0252  * honoring the ref-bit.  It prefers inactive list to active
0253  * list in this situation.
0254  */
0255 static unsigned int __bpf_lru_list_shrink(struct bpf_lru *lru,
0256                       struct bpf_lru_list *l,
0257                       unsigned int tgt_nshrink,
0258                       struct list_head *free_list,
0259                       enum bpf_lru_list_type tgt_free_type)
0260 
0261 {
0262     struct bpf_lru_node *node, *tmp_node;
0263     struct list_head *force_shrink_list;
0264     unsigned int nshrinked;
0265 
0266     nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink,
0267                            free_list, tgt_free_type);
0268     if (nshrinked)
0269         return nshrinked;
0270 
0271     /* Do a force shrink by ignoring the reference bit */
0272     if (!list_empty(&l->lists[BPF_LRU_LIST_T_INACTIVE]))
0273         force_shrink_list = &l->lists[BPF_LRU_LIST_T_INACTIVE];
0274     else
0275         force_shrink_list = &l->lists[BPF_LRU_LIST_T_ACTIVE];
0276 
0277     list_for_each_entry_safe_reverse(node, tmp_node, force_shrink_list,
0278                      list) {
0279         if (lru->del_from_htab(lru->del_arg, node)) {
0280             __bpf_lru_node_move_to_free(l, node, free_list,
0281                             tgt_free_type);
0282             return 1;
0283         }
0284     }
0285 
0286     return 0;
0287 }
0288 
0289 /* Flush the nodes from the local pending list to the LRU list */
0290 static void __local_list_flush(struct bpf_lru_list *l,
0291                    struct bpf_lru_locallist *loc_l)
0292 {
0293     struct bpf_lru_node *node, *tmp_node;
0294 
0295     list_for_each_entry_safe_reverse(node, tmp_node,
0296                      local_pending_list(loc_l), list) {
0297         if (bpf_lru_node_is_ref(node))
0298             __bpf_lru_node_move_in(l, node, BPF_LRU_LIST_T_ACTIVE);
0299         else
0300             __bpf_lru_node_move_in(l, node,
0301                            BPF_LRU_LIST_T_INACTIVE);
0302     }
0303 }
0304 
0305 static void bpf_lru_list_push_free(struct bpf_lru_list *l,
0306                    struct bpf_lru_node *node)
0307 {
0308     unsigned long flags;
0309 
0310     if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
0311         return;
0312 
0313     raw_spin_lock_irqsave(&l->lock, flags);
0314     __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
0315     raw_spin_unlock_irqrestore(&l->lock, flags);
0316 }
0317 
0318 static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
0319                        struct bpf_lru_locallist *loc_l)
0320 {
0321     struct bpf_lru_list *l = &lru->common_lru.lru_list;
0322     struct bpf_lru_node *node, *tmp_node;
0323     unsigned int nfree = 0;
0324 
0325     raw_spin_lock(&l->lock);
0326 
0327     __local_list_flush(l, loc_l);
0328 
0329     __bpf_lru_list_rotate(lru, l);
0330 
0331     list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE],
0332                  list) {
0333         __bpf_lru_node_move_to_free(l, node, local_free_list(loc_l),
0334                         BPF_LRU_LOCAL_LIST_T_FREE);
0335         if (++nfree == LOCAL_FREE_TARGET)
0336             break;
0337     }
0338 
0339     if (nfree < LOCAL_FREE_TARGET)
0340         __bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree,
0341                       local_free_list(loc_l),
0342                       BPF_LRU_LOCAL_LIST_T_FREE);
0343 
0344     raw_spin_unlock(&l->lock);
0345 }
0346 
0347 static void __local_list_add_pending(struct bpf_lru *lru,
0348                      struct bpf_lru_locallist *loc_l,
0349                      int cpu,
0350                      struct bpf_lru_node *node,
0351                      u32 hash)
0352 {
0353     *(u32 *)((void *)node + lru->hash_offset) = hash;
0354     node->cpu = cpu;
0355     node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
0356     node->ref = 0;
0357     list_add(&node->list, local_pending_list(loc_l));
0358 }
0359 
0360 static struct bpf_lru_node *
0361 __local_list_pop_free(struct bpf_lru_locallist *loc_l)
0362 {
0363     struct bpf_lru_node *node;
0364 
0365     node = list_first_entry_or_null(local_free_list(loc_l),
0366                     struct bpf_lru_node,
0367                     list);
0368     if (node)
0369         list_del(&node->list);
0370 
0371     return node;
0372 }
0373 
0374 static struct bpf_lru_node *
0375 __local_list_pop_pending(struct bpf_lru *lru, struct bpf_lru_locallist *loc_l)
0376 {
0377     struct bpf_lru_node *node;
0378     bool force = false;
0379 
0380 ignore_ref:
0381     /* Get from the tail (i.e. older element) of the pending list. */
0382     list_for_each_entry_reverse(node, local_pending_list(loc_l),
0383                     list) {
0384         if ((!bpf_lru_node_is_ref(node) || force) &&
0385             lru->del_from_htab(lru->del_arg, node)) {
0386             list_del(&node->list);
0387             return node;
0388         }
0389     }
0390 
0391     if (!force) {
0392         force = true;
0393         goto ignore_ref;
0394     }
0395 
0396     return NULL;
0397 }
0398 
0399 static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
0400                             u32 hash)
0401 {
0402     struct list_head *free_list;
0403     struct bpf_lru_node *node = NULL;
0404     struct bpf_lru_list *l;
0405     unsigned long flags;
0406     int cpu = raw_smp_processor_id();
0407 
0408     l = per_cpu_ptr(lru->percpu_lru, cpu);
0409 
0410     raw_spin_lock_irqsave(&l->lock, flags);
0411 
0412     __bpf_lru_list_rotate(lru, l);
0413 
0414     free_list = &l->lists[BPF_LRU_LIST_T_FREE];
0415     if (list_empty(free_list))
0416         __bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list,
0417                       BPF_LRU_LIST_T_FREE);
0418 
0419     if (!list_empty(free_list)) {
0420         node = list_first_entry(free_list, struct bpf_lru_node, list);
0421         *(u32 *)((void *)node + lru->hash_offset) = hash;
0422         node->ref = 0;
0423         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
0424     }
0425 
0426     raw_spin_unlock_irqrestore(&l->lock, flags);
0427 
0428     return node;
0429 }
0430 
0431 static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru,
0432                             u32 hash)
0433 {
0434     struct bpf_lru_locallist *loc_l, *steal_loc_l;
0435     struct bpf_common_lru *clru = &lru->common_lru;
0436     struct bpf_lru_node *node;
0437     int steal, first_steal;
0438     unsigned long flags;
0439     int cpu = raw_smp_processor_id();
0440 
0441     loc_l = per_cpu_ptr(clru->local_list, cpu);
0442 
0443     raw_spin_lock_irqsave(&loc_l->lock, flags);
0444 
0445     node = __local_list_pop_free(loc_l);
0446     if (!node) {
0447         bpf_lru_list_pop_free_to_local(lru, loc_l);
0448         node = __local_list_pop_free(loc_l);
0449     }
0450 
0451     if (node)
0452         __local_list_add_pending(lru, loc_l, cpu, node, hash);
0453 
0454     raw_spin_unlock_irqrestore(&loc_l->lock, flags);
0455 
0456     if (node)
0457         return node;
0458 
0459     /* No free nodes found from the local free list and
0460      * the global LRU list.
0461      *
0462      * Steal from the local free/pending list of the
0463      * current CPU and remote CPU in RR.  It starts
0464      * with the loc_l->next_steal CPU.
0465      */
0466 
0467     first_steal = loc_l->next_steal;
0468     steal = first_steal;
0469     do {
0470         steal_loc_l = per_cpu_ptr(clru->local_list, steal);
0471 
0472         raw_spin_lock_irqsave(&steal_loc_l->lock, flags);
0473 
0474         node = __local_list_pop_free(steal_loc_l);
0475         if (!node)
0476             node = __local_list_pop_pending(lru, steal_loc_l);
0477 
0478         raw_spin_unlock_irqrestore(&steal_loc_l->lock, flags);
0479 
0480         steal = get_next_cpu(steal);
0481     } while (!node && steal != first_steal);
0482 
0483     loc_l->next_steal = steal;
0484 
0485     if (node) {
0486         raw_spin_lock_irqsave(&loc_l->lock, flags);
0487         __local_list_add_pending(lru, loc_l, cpu, node, hash);
0488         raw_spin_unlock_irqrestore(&loc_l->lock, flags);
0489     }
0490 
0491     return node;
0492 }
0493 
0494 struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash)
0495 {
0496     if (lru->percpu)
0497         return bpf_percpu_lru_pop_free(lru, hash);
0498     else
0499         return bpf_common_lru_pop_free(lru, hash);
0500 }
0501 
0502 static void bpf_common_lru_push_free(struct bpf_lru *lru,
0503                      struct bpf_lru_node *node)
0504 {
0505     u8 node_type = READ_ONCE(node->type);
0506     unsigned long flags;
0507 
0508     if (WARN_ON_ONCE(node_type == BPF_LRU_LIST_T_FREE) ||
0509         WARN_ON_ONCE(node_type == BPF_LRU_LOCAL_LIST_T_FREE))
0510         return;
0511 
0512     if (node_type == BPF_LRU_LOCAL_LIST_T_PENDING) {
0513         struct bpf_lru_locallist *loc_l;
0514 
0515         loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
0516 
0517         raw_spin_lock_irqsave(&loc_l->lock, flags);
0518 
0519         if (unlikely(node->type != BPF_LRU_LOCAL_LIST_T_PENDING)) {
0520             raw_spin_unlock_irqrestore(&loc_l->lock, flags);
0521             goto check_lru_list;
0522         }
0523 
0524         node->type = BPF_LRU_LOCAL_LIST_T_FREE;
0525         node->ref = 0;
0526         list_move(&node->list, local_free_list(loc_l));
0527 
0528         raw_spin_unlock_irqrestore(&loc_l->lock, flags);
0529         return;
0530     }
0531 
0532 check_lru_list:
0533     bpf_lru_list_push_free(&lru->common_lru.lru_list, node);
0534 }
0535 
0536 static void bpf_percpu_lru_push_free(struct bpf_lru *lru,
0537                      struct bpf_lru_node *node)
0538 {
0539     struct bpf_lru_list *l;
0540     unsigned long flags;
0541 
0542     l = per_cpu_ptr(lru->percpu_lru, node->cpu);
0543 
0544     raw_spin_lock_irqsave(&l->lock, flags);
0545 
0546     __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
0547 
0548     raw_spin_unlock_irqrestore(&l->lock, flags);
0549 }
0550 
0551 void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node)
0552 {
0553     if (lru->percpu)
0554         bpf_percpu_lru_push_free(lru, node);
0555     else
0556         bpf_common_lru_push_free(lru, node);
0557 }
0558 
0559 static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
0560                     u32 node_offset, u32 elem_size,
0561                     u32 nr_elems)
0562 {
0563     struct bpf_lru_list *l = &lru->common_lru.lru_list;
0564     u32 i;
0565 
0566     for (i = 0; i < nr_elems; i++) {
0567         struct bpf_lru_node *node;
0568 
0569         node = (struct bpf_lru_node *)(buf + node_offset);
0570         node->type = BPF_LRU_LIST_T_FREE;
0571         node->ref = 0;
0572         list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
0573         buf += elem_size;
0574     }
0575 }
0576 
0577 static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf,
0578                     u32 node_offset, u32 elem_size,
0579                     u32 nr_elems)
0580 {
0581     u32 i, pcpu_entries;
0582     int cpu;
0583     struct bpf_lru_list *l;
0584 
0585     pcpu_entries = nr_elems / num_possible_cpus();
0586 
0587     i = 0;
0588 
0589     for_each_possible_cpu(cpu) {
0590         struct bpf_lru_node *node;
0591 
0592         l = per_cpu_ptr(lru->percpu_lru, cpu);
0593 again:
0594         node = (struct bpf_lru_node *)(buf + node_offset);
0595         node->cpu = cpu;
0596         node->type = BPF_LRU_LIST_T_FREE;
0597         node->ref = 0;
0598         list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
0599         i++;
0600         buf += elem_size;
0601         if (i == nr_elems)
0602             break;
0603         if (i % pcpu_entries)
0604             goto again;
0605     }
0606 }
0607 
0608 void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
0609               u32 elem_size, u32 nr_elems)
0610 {
0611     if (lru->percpu)
0612         bpf_percpu_lru_populate(lru, buf, node_offset, elem_size,
0613                     nr_elems);
0614     else
0615         bpf_common_lru_populate(lru, buf, node_offset, elem_size,
0616                     nr_elems);
0617 }
0618 
0619 static void bpf_lru_locallist_init(struct bpf_lru_locallist *loc_l, int cpu)
0620 {
0621     int i;
0622 
0623     for (i = 0; i < NR_BPF_LRU_LOCAL_LIST_T; i++)
0624         INIT_LIST_HEAD(&loc_l->lists[i]);
0625 
0626     loc_l->next_steal = cpu;
0627 
0628     raw_spin_lock_init(&loc_l->lock);
0629 }
0630 
0631 static void bpf_lru_list_init(struct bpf_lru_list *l)
0632 {
0633     int i;
0634 
0635     for (i = 0; i < NR_BPF_LRU_LIST_T; i++)
0636         INIT_LIST_HEAD(&l->lists[i]);
0637 
0638     for (i = 0; i < NR_BPF_LRU_LIST_COUNT; i++)
0639         l->counts[i] = 0;
0640 
0641     l->next_inactive_rotation = &l->lists[BPF_LRU_LIST_T_INACTIVE];
0642 
0643     raw_spin_lock_init(&l->lock);
0644 }
0645 
0646 int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
0647          del_from_htab_func del_from_htab, void *del_arg)
0648 {
0649     int cpu;
0650 
0651     if (percpu) {
0652         lru->percpu_lru = alloc_percpu(struct bpf_lru_list);
0653         if (!lru->percpu_lru)
0654             return -ENOMEM;
0655 
0656         for_each_possible_cpu(cpu) {
0657             struct bpf_lru_list *l;
0658 
0659             l = per_cpu_ptr(lru->percpu_lru, cpu);
0660             bpf_lru_list_init(l);
0661         }
0662         lru->nr_scans = PERCPU_NR_SCANS;
0663     } else {
0664         struct bpf_common_lru *clru = &lru->common_lru;
0665 
0666         clru->local_list = alloc_percpu(struct bpf_lru_locallist);
0667         if (!clru->local_list)
0668             return -ENOMEM;
0669 
0670         for_each_possible_cpu(cpu) {
0671             struct bpf_lru_locallist *loc_l;
0672 
0673             loc_l = per_cpu_ptr(clru->local_list, cpu);
0674             bpf_lru_locallist_init(loc_l, cpu);
0675         }
0676 
0677         bpf_lru_list_init(&clru->lru_list);
0678         lru->nr_scans = LOCAL_NR_SCANS;
0679     }
0680 
0681     lru->percpu = percpu;
0682     lru->del_from_htab = del_from_htab;
0683     lru->del_arg = del_arg;
0684     lru->hash_offset = hash_offset;
0685 
0686     return 0;
0687 }
0688 
0689 void bpf_lru_destroy(struct bpf_lru *lru)
0690 {
0691     if (lru->percpu)
0692         free_percpu(lru->percpu_lru);
0693     else
0694         free_percpu(lru->common_lru.local_list);
0695 }