![]() |
|
|||
0001 /* SPDX-License-Identifier: GPL-2.0-or-later */ 0002 /* 0003 lru_cache.c 0004 0005 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 0006 0007 Copyright (C) 2003-2008, LINBIT Information Technologies GmbH. 0008 Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>. 0009 Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 0010 0011 0012 */ 0013 0014 #ifndef LRU_CACHE_H 0015 #define LRU_CACHE_H 0016 0017 #include <linux/list.h> 0018 #include <linux/slab.h> 0019 #include <linux/bitops.h> 0020 #include <linux/string.h> /* for memset */ 0021 #include <linux/seq_file.h> 0022 0023 /* 0024 This header file (and its .c file; kernel-doc of functions see there) 0025 define a helper framework to easily keep track of index:label associations, 0026 and changes to an "active set" of objects, as well as pending transactions, 0027 to persistently record those changes. 0028 0029 We use an LRU policy if it is necessary to "cool down" a region currently in 0030 the active set before we can "heat" a previously unused region. 0031 0032 Because of this later property, it is called "lru_cache". 0033 As it actually Tracks Objects in an Active SeT, we could also call it 0034 toast (incidentally that is what may happen to the data on the 0035 backend storage upon next resync, if we don't get it right). 0036 0037 What for? 0038 0039 We replicate IO (more or less synchronously) to local and remote disk. 0040 0041 For crash recovery after replication node failure, 0042 we need to resync all regions that have been target of in-flight WRITE IO 0043 (in use, or "hot", regions), as we don't know whether or not those WRITEs 0044 have made it to stable storage. 0045 0046 To avoid a "full resync", we need to persistently track these regions. 0047 0048 This is known as "write intent log", and can be implemented as on-disk 0049 (coarse or fine grained) bitmap, or other meta data. 0050 0051 To avoid the overhead of frequent extra writes to this meta data area, 0052 usually the condition is softened to regions that _may_ have been target of 0053 in-flight WRITE IO, e.g. by only lazily clearing the on-disk write-intent 0054 bitmap, trading frequency of meta data transactions against amount of 0055 (possibly unnecessary) resync traffic. 0056 0057 If we set a hard limit on the area that may be "hot" at any given time, we 0058 limit the amount of resync traffic needed for crash recovery. 0059 0060 For recovery after replication link failure, 0061 we need to resync all blocks that have been changed on the other replica 0062 in the mean time, or, if both replica have been changed independently [*], 0063 all blocks that have been changed on either replica in the mean time. 0064 [*] usually as a result of a cluster split-brain and insufficient protection. 0065 but there are valid use cases to do this on purpose. 0066 0067 Tracking those blocks can be implemented as "dirty bitmap". 0068 Having it fine-grained reduces the amount of resync traffic. 0069 It should also be persistent, to allow for reboots (or crashes) 0070 while the replication link is down. 0071 0072 There are various possible implementations for persistently storing 0073 write intent log information, three of which are mentioned here. 0074 0075 "Chunk dirtying" 0076 The on-disk "dirty bitmap" may be re-used as "write-intent" bitmap as well. 0077 To reduce the frequency of bitmap updates for write-intent log purposes, 0078 one could dirty "chunks" (of some size) at a time of the (fine grained) 0079 on-disk bitmap, while keeping the in-memory "dirty" bitmap as clean as 0080 possible, flushing it to disk again when a previously "hot" (and on-disk 0081 dirtied as full chunk) area "cools down" again (no IO in flight anymore, 0082 and none expected in the near future either). 0083 0084 "Explicit (coarse) write intent bitmap" 0085 An other implementation could chose a (probably coarse) explicit bitmap, 0086 for write-intent log purposes, additionally to the fine grained dirty bitmap. 0087 0088 "Activity log" 0089 Yet an other implementation may keep track of the hot regions, by starting 0090 with an empty set, and writing down a journal of region numbers that have 0091 become "hot", or have "cooled down" again. 0092 0093 To be able to use a ring buffer for this journal of changes to the active 0094 set, we not only record the actual changes to that set, but also record the 0095 not changing members of the set in a round robin fashion. To do so, we use a 0096 fixed (but configurable) number of slots which we can identify by index, and 0097 associate region numbers (labels) with these indices. 0098 For each transaction recording a change to the active set, we record the 0099 change itself (index: -old_label, +new_label), and which index is associated 0100 with which label (index: current_label) within a certain sliding window that 0101 is moved further over the available indices with each such transaction. 0102 0103 Thus, for crash recovery, if the ringbuffer is sufficiently large, we can 0104 accurately reconstruct the active set. 0105 0106 Sufficiently large depends only on maximum number of active objects, and the 0107 size of the sliding window recording "index: current_label" associations within 0108 each transaction. 0109 0110 This is what we call the "activity log". 0111 0112 Currently we need one activity log transaction per single label change, which 0113 does not give much benefit over the "dirty chunks of bitmap" approach, other 0114 than potentially less seeks. 0115 0116 We plan to change the transaction format to support multiple changes per 0117 transaction, which then would reduce several (disjoint, "random") updates to 0118 the bitmap into one transaction to the activity log ring buffer. 0119 */ 0120 0121 /* this defines an element in a tracked set 0122 * .colision is for hash table lookup. 0123 * When we process a new IO request, we know its sector, thus can deduce the 0124 * region number (label) easily. To do the label -> object lookup without a 0125 * full list walk, we use a simple hash table. 0126 * 0127 * .list is on one of three lists: 0128 * in_use: currently in use (refcnt > 0, lc_number != LC_FREE) 0129 * lru: unused but ready to be reused or recycled 0130 * (lc_refcnt == 0, lc_number != LC_FREE), 0131 * free: unused but ready to be recycled 0132 * (lc_refcnt == 0, lc_number == LC_FREE), 0133 * 0134 * an element is said to be "in the active set", 0135 * if either on "in_use" or "lru", i.e. lc_number != LC_FREE. 0136 * 0137 * DRBD currently (May 2009) only uses 61 elements on the resync lru_cache 0138 * (total memory usage 2 pages), and up to 3833 elements on the act_log 0139 * lru_cache, totalling ~215 kB for 64bit architecture, ~53 pages. 0140 * 0141 * We usually do not actually free these objects again, but only "recycle" 0142 * them, as the change "index: -old_label, +LC_FREE" would need a transaction 0143 * as well. Which also means that using a kmem_cache to allocate the objects 0144 * from wastes some resources. 0145 * But it avoids high order page allocations in kmalloc. 0146 */ 0147 struct lc_element { 0148 struct hlist_node colision; 0149 struct list_head list; /* LRU list or free list */ 0150 unsigned refcnt; 0151 /* back "pointer" into lc_cache->element[index], 0152 * for paranoia, and for "lc_element_to_index" */ 0153 unsigned lc_index; 0154 /* if we want to track a larger set of objects, 0155 * it needs to become an architecture independent u64 */ 0156 unsigned lc_number; 0157 /* special label when on free list */ 0158 #define LC_FREE (~0U) 0159 0160 /* for pending changes */ 0161 unsigned lc_new_number; 0162 }; 0163 0164 struct lru_cache { 0165 /* the least recently used item is kept at lru->prev */ 0166 struct list_head lru; 0167 struct list_head free; 0168 struct list_head in_use; 0169 struct list_head to_be_changed; 0170 0171 /* the pre-created kmem cache to allocate the objects from */ 0172 struct kmem_cache *lc_cache; 0173 0174 /* size of tracked objects, used to memset(,0,) them in lc_reset */ 0175 size_t element_size; 0176 /* offset of struct lc_element member in the tracked object */ 0177 size_t element_off; 0178 0179 /* number of elements (indices) */ 0180 unsigned int nr_elements; 0181 /* Arbitrary limit on maximum tracked objects. Practical limit is much 0182 * lower due to allocation failures, probably. For typical use cases, 0183 * nr_elements should be a few thousand at most. 0184 * This also limits the maximum value of lc_element.lc_index, allowing the 0185 * 8 high bits of .lc_index to be overloaded with flags in the future. */ 0186 #define LC_MAX_ACTIVE (1<<24) 0187 0188 /* allow to accumulate a few (index:label) changes, 0189 * but no more than max_pending_changes */ 0190 unsigned int max_pending_changes; 0191 /* number of elements currently on to_be_changed list */ 0192 unsigned int pending_changes; 0193 0194 /* statistics */ 0195 unsigned used; /* number of elements currently on in_use list */ 0196 unsigned long hits, misses, starving, locked, changed; 0197 0198 /* see below: flag-bits for lru_cache */ 0199 unsigned long flags; 0200 0201 0202 void *lc_private; 0203 const char *name; 0204 0205 /* nr_elements there */ 0206 struct hlist_head *lc_slot; 0207 struct lc_element **lc_element; 0208 }; 0209 0210 0211 /* flag-bits for lru_cache */ 0212 enum { 0213 /* debugging aid, to catch concurrent access early. 0214 * user needs to guarantee exclusive access by proper locking! */ 0215 __LC_PARANOIA, 0216 0217 /* annotate that the set is "dirty", possibly accumulating further 0218 * changes, until a transaction is finally triggered */ 0219 __LC_DIRTY, 0220 0221 /* Locked, no further changes allowed. 0222 * Also used to serialize changing transactions. */ 0223 __LC_LOCKED, 0224 0225 /* if we need to change the set, but currently there is no free nor 0226 * unused element available, we are "starving", and must not give out 0227 * further references, to guarantee that eventually some refcnt will 0228 * drop to zero and we will be able to make progress again, changing 0229 * the set, writing the transaction. 0230 * if the statistics say we are frequently starving, 0231 * nr_elements is too small. */ 0232 __LC_STARVING, 0233 }; 0234 #define LC_PARANOIA (1<<__LC_PARANOIA) 0235 #define LC_DIRTY (1<<__LC_DIRTY) 0236 #define LC_LOCKED (1<<__LC_LOCKED) 0237 #define LC_STARVING (1<<__LC_STARVING) 0238 0239 extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache, 0240 unsigned max_pending_changes, 0241 unsigned e_count, size_t e_size, size_t e_off); 0242 extern void lc_reset(struct lru_cache *lc); 0243 extern void lc_destroy(struct lru_cache *lc); 0244 extern void lc_set(struct lru_cache *lc, unsigned int enr, int index); 0245 extern void lc_del(struct lru_cache *lc, struct lc_element *element); 0246 0247 extern struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr); 0248 extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr); 0249 extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr); 0250 extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr); 0251 extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e); 0252 extern void lc_committed(struct lru_cache *lc); 0253 0254 struct seq_file; 0255 extern void lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc); 0256 0257 extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext, 0258 void (*detail) (struct seq_file *, struct lc_element *)); 0259 0260 /** 0261 * lc_try_lock_for_transaction - can be used to stop lc_get() from changing the tracked set 0262 * @lc: the lru cache to operate on 0263 * 0264 * Allows (expects) the set to be "dirty". Note that the reference counts and 0265 * order on the active and lru lists may still change. Used to serialize 0266 * changing transactions. Returns true if we acquired the lock. 0267 */ 0268 static inline int lc_try_lock_for_transaction(struct lru_cache *lc) 0269 { 0270 return !test_and_set_bit(__LC_LOCKED, &lc->flags); 0271 } 0272 0273 /** 0274 * lc_try_lock - variant to stop lc_get() from changing the tracked set 0275 * @lc: the lru cache to operate on 0276 * 0277 * Note that the reference counts and order on the active and lru lists may 0278 * still change. Only works on a "clean" set. Returns true if we acquired the 0279 * lock, which means there are no pending changes, and any further attempt to 0280 * change the set will not succeed until the next lc_unlock(). 0281 */ 0282 extern int lc_try_lock(struct lru_cache *lc); 0283 0284 /** 0285 * lc_unlock - unlock @lc, allow lc_get() to change the set again 0286 * @lc: the lru cache to operate on 0287 */ 0288 static inline void lc_unlock(struct lru_cache *lc) 0289 { 0290 clear_bit(__LC_DIRTY, &lc->flags); 0291 clear_bit_unlock(__LC_LOCKED, &lc->flags); 0292 } 0293 0294 extern bool lc_is_used(struct lru_cache *lc, unsigned int enr); 0295 0296 #define lc_entry(ptr, type, member) \ 0297 container_of(ptr, type, member) 0298 0299 extern struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i); 0300 extern unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e); 0301 0302 #endif
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |