Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
0004  * Authors: David Chinner and Glauber Costa
0005  *
0006  * Generic LRU infrastructure
0007  */
0008 #ifndef _LRU_LIST_H
0009 #define _LRU_LIST_H
0010 
0011 #include <linux/list.h>
0012 #include <linux/nodemask.h>
0013 #include <linux/shrinker.h>
0014 #include <linux/xarray.h>
0015 
0016 struct mem_cgroup;
0017 
0018 /* list_lru_walk_cb has to always return one of those */
0019 enum lru_status {
0020     LRU_REMOVED,        /* item removed from list */
0021     LRU_REMOVED_RETRY,  /* item removed, but lock has been
0022                    dropped and reacquired */
0023     LRU_ROTATE,     /* item referenced, give another pass */
0024     LRU_SKIP,       /* item cannot be locked, skip */
0025     LRU_RETRY,      /* item not freeable. May drop the lock
0026                    internally, but has to return locked. */
0027 };
0028 
0029 struct list_lru_one {
0030     struct list_head    list;
0031     /* may become negative during memcg reparenting */
0032     long            nr_items;
0033 };
0034 
0035 struct list_lru_memcg {
0036     struct rcu_head     rcu;
0037     /* array of per cgroup per node lists, indexed by node id */
0038     struct list_lru_one node[];
0039 };
0040 
0041 struct list_lru_node {
0042     /* protects all lists on the node, including per cgroup */
0043     spinlock_t      lock;
0044     /* global list, used for the root cgroup in cgroup aware lrus */
0045     struct list_lru_one lru;
0046     long            nr_items;
0047 } ____cacheline_aligned_in_smp;
0048 
0049 struct list_lru {
0050     struct list_lru_node    *node;
0051 #ifdef CONFIG_MEMCG_KMEM
0052     struct list_head    list;
0053     int         shrinker_id;
0054     bool            memcg_aware;
0055     struct xarray       xa;
0056 #endif
0057 };
0058 
0059 void list_lru_destroy(struct list_lru *lru);
0060 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
0061             struct lock_class_key *key, struct shrinker *shrinker);
0062 
0063 #define list_lru_init(lru)              \
0064     __list_lru_init((lru), false, NULL, NULL)
0065 #define list_lru_init_key(lru, key)         \
0066     __list_lru_init((lru), false, (key), NULL)
0067 #define list_lru_init_memcg(lru, shrinker)      \
0068     __list_lru_init((lru), true, NULL, shrinker)
0069 
0070 int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
0071              gfp_t gfp);
0072 void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent);
0073 
0074 /**
0075  * list_lru_add: add an element to the lru list's tail
0076  * @list_lru: the lru pointer
0077  * @item: the item to be added.
0078  *
0079  * If the element is already part of a list, this function returns doing
0080  * nothing. Therefore the caller does not need to keep state about whether or
0081  * not the element already belongs in the list and is allowed to lazy update
0082  * it. Note however that this is valid for *a* list, not *this* list. If
0083  * the caller organize itself in a way that elements can be in more than
0084  * one type of list, it is up to the caller to fully remove the item from
0085  * the previous list (with list_lru_del() for instance) before moving it
0086  * to @list_lru
0087  *
0088  * Return value: true if the list was updated, false otherwise
0089  */
0090 bool list_lru_add(struct list_lru *lru, struct list_head *item);
0091 
0092 /**
0093  * list_lru_del: delete an element to the lru list
0094  * @list_lru: the lru pointer
0095  * @item: the item to be deleted.
0096  *
0097  * This function works analogously as list_lru_add in terms of list
0098  * manipulation. The comments about an element already pertaining to
0099  * a list are also valid for list_lru_del.
0100  *
0101  * Return value: true if the list was updated, false otherwise
0102  */
0103 bool list_lru_del(struct list_lru *lru, struct list_head *item);
0104 
0105 /**
0106  * list_lru_count_one: return the number of objects currently held by @lru
0107  * @lru: the lru pointer.
0108  * @nid: the node id to count from.
0109  * @memcg: the cgroup to count from.
0110  *
0111  * Always return a non-negative number, 0 for empty lists. There is no
0112  * guarantee that the list is not updated while the count is being computed.
0113  * Callers that want such a guarantee need to provide an outer lock.
0114  */
0115 unsigned long list_lru_count_one(struct list_lru *lru,
0116                  int nid, struct mem_cgroup *memcg);
0117 unsigned long list_lru_count_node(struct list_lru *lru, int nid);
0118 
0119 static inline unsigned long list_lru_shrink_count(struct list_lru *lru,
0120                           struct shrink_control *sc)
0121 {
0122     return list_lru_count_one(lru, sc->nid, sc->memcg);
0123 }
0124 
0125 static inline unsigned long list_lru_count(struct list_lru *lru)
0126 {
0127     long count = 0;
0128     int nid;
0129 
0130     for_each_node_state(nid, N_NORMAL_MEMORY)
0131         count += list_lru_count_node(lru, nid);
0132 
0133     return count;
0134 }
0135 
0136 void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
0137 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
0138                struct list_head *head);
0139 
0140 typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
0141         struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
0142 
0143 /**
0144  * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
0145  * @lru: the lru pointer.
0146  * @nid: the node id to scan from.
0147  * @memcg: the cgroup to scan from.
0148  * @isolate: callback function that is responsible for deciding what to do with
0149  *  the item currently being scanned
0150  * @cb_arg: opaque type that will be passed to @isolate
0151  * @nr_to_walk: how many items to scan.
0152  *
0153  * This function will scan all elements in a particular list_lru, calling the
0154  * @isolate callback for each of those items, along with the current list
0155  * spinlock and a caller-provided opaque. The @isolate callback can choose to
0156  * drop the lock internally, but *must* return with the lock held. The callback
0157  * will return an enum lru_status telling the list_lru infrastructure what to
0158  * do with the object being scanned.
0159  *
0160  * Please note that nr_to_walk does not mean how many objects will be freed,
0161  * just how many objects will be scanned.
0162  *
0163  * Return value: the number of objects effectively removed from the LRU.
0164  */
0165 unsigned long list_lru_walk_one(struct list_lru *lru,
0166                 int nid, struct mem_cgroup *memcg,
0167                 list_lru_walk_cb isolate, void *cb_arg,
0168                 unsigned long *nr_to_walk);
0169 /**
0170  * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
0171  * @lru: the lru pointer.
0172  * @nid: the node id to scan from.
0173  * @memcg: the cgroup to scan from.
0174  * @isolate: callback function that is responsible for deciding what to do with
0175  *  the item currently being scanned
0176  * @cb_arg: opaque type that will be passed to @isolate
0177  * @nr_to_walk: how many items to scan.
0178  *
0179  * Same as @list_lru_walk_one except that the spinlock is acquired with
0180  * spin_lock_irq().
0181  */
0182 unsigned long list_lru_walk_one_irq(struct list_lru *lru,
0183                     int nid, struct mem_cgroup *memcg,
0184                     list_lru_walk_cb isolate, void *cb_arg,
0185                     unsigned long *nr_to_walk);
0186 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
0187                  list_lru_walk_cb isolate, void *cb_arg,
0188                  unsigned long *nr_to_walk);
0189 
0190 static inline unsigned long
0191 list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
0192              list_lru_walk_cb isolate, void *cb_arg)
0193 {
0194     return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg,
0195                  &sc->nr_to_scan);
0196 }
0197 
0198 static inline unsigned long
0199 list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc,
0200              list_lru_walk_cb isolate, void *cb_arg)
0201 {
0202     return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg,
0203                      &sc->nr_to_scan);
0204 }
0205 
0206 static inline unsigned long
0207 list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
0208           void *cb_arg, unsigned long nr_to_walk)
0209 {
0210     long isolated = 0;
0211     int nid;
0212 
0213     for_each_node_state(nid, N_NORMAL_MEMORY) {
0214         isolated += list_lru_walk_node(lru, nid, isolate,
0215                            cb_arg, &nr_to_walk);
0216         if (nr_to_walk <= 0)
0217             break;
0218     }
0219     return isolated;
0220 }
0221 #endif /* _LRU_LIST_H */