![]() |
|
|||
0001 // SPDX-License-Identifier: GPL-2.0 0002 /* 0003 * Workingset detection 0004 * 0005 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner 0006 */ 0007 0008 #include <linux/memcontrol.h> 0009 #include <linux/mm_inline.h> 0010 #include <linux/writeback.h> 0011 #include <linux/shmem_fs.h> 0012 #include <linux/pagemap.h> 0013 #include <linux/atomic.h> 0014 #include <linux/module.h> 0015 #include <linux/swap.h> 0016 #include <linux/dax.h> 0017 #include <linux/fs.h> 0018 #include <linux/mm.h> 0019 0020 /* 0021 * Double CLOCK lists 0022 * 0023 * Per node, two clock lists are maintained for file pages: the 0024 * inactive and the active list. Freshly faulted pages start out at 0025 * the head of the inactive list and page reclaim scans pages from the 0026 * tail. Pages that are accessed multiple times on the inactive list 0027 * are promoted to the active list, to protect them from reclaim, 0028 * whereas active pages are demoted to the inactive list when the 0029 * active list grows too big. 0030 * 0031 * fault ------------------------+ 0032 * | 0033 * +--------------+ | +-------------+ 0034 * reclaim <- | inactive | <-+-- demotion | active | <--+ 0035 * +--------------+ +-------------+ | 0036 * | | 0037 * +-------------- promotion ------------------+ 0038 * 0039 * 0040 * Access frequency and refault distance 0041 * 0042 * A workload is thrashing when its pages are frequently used but they 0043 * are evicted from the inactive list every time before another access 0044 * would have promoted them to the active list. 0045 * 0046 * In cases where the average access distance between thrashing pages 0047 * is bigger than the size of memory there is nothing that can be 0048 * done - the thrashing set could never fit into memory under any 0049 * circumstance. 0050 * 0051 * However, the average access distance could be bigger than the 0052 * inactive list, yet smaller than the size of memory. In this case, 0053 * the set could fit into memory if it weren't for the currently 0054 * active pages - which may be used more, hopefully less frequently: 0055 * 0056 * +-memory available to cache-+ 0057 * | | 0058 * +-inactive------+-active----+ 0059 * a b | c d e f g h i | J K L M N | 0060 * +---------------+-----------+ 0061 * 0062 * It is prohibitively expensive to accurately track access frequency 0063 * of pages. But a reasonable approximation can be made to measure 0064 * thrashing on the inactive list, after which refaulting pages can be 0065 * activated optimistically to compete with the existing active pages. 0066 * 0067 * Approximating inactive page access frequency - Observations: 0068 * 0069 * 1. When a page is accessed for the first time, it is added to the 0070 * head of the inactive list, slides every existing inactive page 0071 * towards the tail by one slot, and pushes the current tail page 0072 * out of memory. 0073 * 0074 * 2. When a page is accessed for the second time, it is promoted to 0075 * the active list, shrinking the inactive list by one slot. This 0076 * also slides all inactive pages that were faulted into the cache 0077 * more recently than the activated page towards the tail of the 0078 * inactive list. 0079 * 0080 * Thus: 0081 * 0082 * 1. The sum of evictions and activations between any two points in 0083 * time indicate the minimum number of inactive pages accessed in 0084 * between. 0085 * 0086 * 2. Moving one inactive page N page slots towards the tail of the 0087 * list requires at least N inactive page accesses. 0088 * 0089 * Combining these: 0090 * 0091 * 1. When a page is finally evicted from memory, the number of 0092 * inactive pages accessed while the page was in cache is at least 0093 * the number of page slots on the inactive list. 0094 * 0095 * 2. In addition, measuring the sum of evictions and activations (E) 0096 * at the time of a page's eviction, and comparing it to another 0097 * reading (R) at the time the page faults back into memory tells 0098 * the minimum number of accesses while the page was not cached. 0099 * This is called the refault distance. 0100 * 0101 * Because the first access of the page was the fault and the second 0102 * access the refault, we combine the in-cache distance with the 0103 * out-of-cache distance to get the complete minimum access distance 0104 * of this page: 0105 * 0106 * NR_inactive + (R - E) 0107 * 0108 * And knowing the minimum access distance of a page, we can easily 0109 * tell if the page would be able to stay in cache assuming all page 0110 * slots in the cache were available: 0111 * 0112 * NR_inactive + (R - E) <= NR_inactive + NR_active 0113 * 0114 * which can be further simplified to 0115 * 0116 * (R - E) <= NR_active 0117 * 0118 * Put into words, the refault distance (out-of-cache) can be seen as 0119 * a deficit in inactive list space (in-cache). If the inactive list 0120 * had (R - E) more page slots, the page would not have been evicted 0121 * in between accesses, but activated instead. And on a full system, 0122 * the only thing eating into inactive list space is active pages. 0123 * 0124 * 0125 * Refaulting inactive pages 0126 * 0127 * All that is known about the active list is that the pages have been 0128 * accessed more than once in the past. This means that at any given 0129 * time there is actually a good chance that pages on the active list 0130 * are no longer in active use. 0131 * 0132 * So when a refault distance of (R - E) is observed and there are at 0133 * least (R - E) active pages, the refaulting page is activated 0134 * optimistically in the hope that (R - E) active pages are actually 0135 * used less frequently than the refaulting page - or even not used at 0136 * all anymore. 0137 * 0138 * That means if inactive cache is refaulting with a suitable refault 0139 * distance, we assume the cache workingset is transitioning and put 0140 * pressure on the current active list. 0141 * 0142 * If this is wrong and demotion kicks in, the pages which are truly 0143 * used more frequently will be reactivated while the less frequently 0144 * used once will be evicted from memory. 0145 * 0146 * But if this is right, the stale pages will be pushed out of memory 0147 * and the used pages get to stay in cache. 0148 * 0149 * Refaulting active pages 0150 * 0151 * If on the other hand the refaulting pages have recently been 0152 * deactivated, it means that the active list is no longer protecting 0153 * actively used cache from reclaim. The cache is NOT transitioning to 0154 * a different workingset; the existing workingset is thrashing in the 0155 * space allocated to the page cache. 0156 * 0157 * 0158 * Implementation 0159 * 0160 * For each node's LRU lists, a counter for inactive evictions and 0161 * activations is maintained (node->nonresident_age). 0162 * 0163 * On eviction, a snapshot of this counter (along with some bits to 0164 * identify the node) is stored in the now empty page cache 0165 * slot of the evicted page. This is called a shadow entry. 0166 * 0167 * On cache misses for which there are shadow entries, an eligible 0168 * refault distance will immediately activate the refaulting page. 0169 */ 0170 0171 #define WORKINGSET_SHIFT 1 0172 #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \ 0173 WORKINGSET_SHIFT + NODES_SHIFT + \ 0174 MEM_CGROUP_ID_SHIFT) 0175 #define EVICTION_MASK (~0UL >> EVICTION_SHIFT) 0176 0177 /* 0178 * Eviction timestamps need to be able to cover the full range of 0179 * actionable refaults. However, bits are tight in the xarray 0180 * entry, and after storing the identifier for the lruvec there might 0181 * not be enough left to represent every single actionable refault. In 0182 * that case, we have to sacrifice granularity for distance, and group 0183 * evictions into coarser buckets by shaving off lower timestamp bits. 0184 */ 0185 static unsigned int bucket_order __read_mostly; 0186 0187 static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, 0188 bool workingset) 0189 { 0190 eviction >>= bucket_order; 0191 eviction &= EVICTION_MASK; 0192 eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; 0193 eviction = (eviction << NODES_SHIFT) | pgdat->node_id; 0194 eviction = (eviction << WORKINGSET_SHIFT) | workingset; 0195 0196 return xa_mk_value(eviction); 0197 } 0198 0199 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, 0200 unsigned long *evictionp, bool *workingsetp) 0201 { 0202 unsigned long entry = xa_to_value(shadow); 0203 int memcgid, nid; 0204 bool workingset; 0205 0206 workingset = entry & ((1UL << WORKINGSET_SHIFT) - 1); 0207 entry >>= WORKINGSET_SHIFT; 0208 nid = entry & ((1UL << NODES_SHIFT) - 1); 0209 entry >>= NODES_SHIFT; 0210 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); 0211 entry >>= MEM_CGROUP_ID_SHIFT; 0212 0213 *memcgidp = memcgid; 0214 *pgdat = NODE_DATA(nid); 0215 *evictionp = entry << bucket_order; 0216 *workingsetp = workingset; 0217 } 0218 0219 /** 0220 * workingset_age_nonresident - age non-resident entries as LRU ages 0221 * @lruvec: the lruvec that was aged 0222 * @nr_pages: the number of pages to count 0223 * 0224 * As in-memory pages are aged, non-resident pages need to be aged as 0225 * well, in order for the refault distances later on to be comparable 0226 * to the in-memory dimensions. This function allows reclaim and LRU 0227 * operations to drive the non-resident aging along in parallel. 0228 */ 0229 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages) 0230 { 0231 /* 0232 * Reclaiming a cgroup means reclaiming all its children in a 0233 * round-robin fashion. That means that each cgroup has an LRU 0234 * order that is composed of the LRU orders of its child 0235 * cgroups; and every page has an LRU position not just in the 0236 * cgroup that owns it, but in all of that group's ancestors. 0237 * 0238 * So when the physical inactive list of a leaf cgroup ages, 0239 * the virtual inactive lists of all its parents, including 0240 * the root cgroup's, age as well. 0241 */ 0242 do { 0243 atomic_long_add(nr_pages, &lruvec->nonresident_age); 0244 } while ((lruvec = parent_lruvec(lruvec))); 0245 } 0246 0247 /** 0248 * workingset_eviction - note the eviction of a folio from memory 0249 * @target_memcg: the cgroup that is causing the reclaim 0250 * @folio: the folio being evicted 0251 * 0252 * Return: a shadow entry to be stored in @folio->mapping->i_pages in place 0253 * of the evicted @folio so that a later refault can be detected. 0254 */ 0255 void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg) 0256 { 0257 struct pglist_data *pgdat = folio_pgdat(folio); 0258 unsigned long eviction; 0259 struct lruvec *lruvec; 0260 int memcgid; 0261 0262 /* Folio is fully exclusive and pins folio's memory cgroup pointer */ 0263 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 0264 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 0265 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 0266 0267 lruvec = mem_cgroup_lruvec(target_memcg, pgdat); 0268 /* XXX: target_memcg can be NULL, go through lruvec */ 0269 memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); 0270 eviction = atomic_long_read(&lruvec->nonresident_age); 0271 workingset_age_nonresident(lruvec, folio_nr_pages(folio)); 0272 return pack_shadow(memcgid, pgdat, eviction, 0273 folio_test_workingset(folio)); 0274 } 0275 0276 /** 0277 * workingset_refault - Evaluate the refault of a previously evicted folio. 0278 * @folio: The freshly allocated replacement folio. 0279 * @shadow: Shadow entry of the evicted folio. 0280 * 0281 * Calculates and evaluates the refault distance of the previously 0282 * evicted folio in the context of the node and the memcg whose memory 0283 * pressure caused the eviction. 0284 */ 0285 void workingset_refault(struct folio *folio, void *shadow) 0286 { 0287 bool file = folio_is_file_lru(folio); 0288 struct mem_cgroup *eviction_memcg; 0289 struct lruvec *eviction_lruvec; 0290 unsigned long refault_distance; 0291 unsigned long workingset_size; 0292 struct pglist_data *pgdat; 0293 struct mem_cgroup *memcg; 0294 unsigned long eviction; 0295 struct lruvec *lruvec; 0296 unsigned long refault; 0297 bool workingset; 0298 int memcgid; 0299 long nr; 0300 0301 unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset); 0302 0303 rcu_read_lock(); 0304 /* 0305 * Look up the memcg associated with the stored ID. It might 0306 * have been deleted since the folio's eviction. 0307 * 0308 * Note that in rare events the ID could have been recycled 0309 * for a new cgroup that refaults a shared folio. This is 0310 * impossible to tell from the available data. However, this 0311 * should be a rare and limited disturbance, and activations 0312 * are always speculative anyway. Ultimately, it's the aging 0313 * algorithm's job to shake out the minimum access frequency 0314 * for the active cache. 0315 * 0316 * XXX: On !CONFIG_MEMCG, this will always return NULL; it 0317 * would be better if the root_mem_cgroup existed in all 0318 * configurations instead. 0319 */ 0320 eviction_memcg = mem_cgroup_from_id(memcgid); 0321 if (!mem_cgroup_disabled() && !eviction_memcg) 0322 goto out; 0323 eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat); 0324 refault = atomic_long_read(&eviction_lruvec->nonresident_age); 0325 0326 /* 0327 * Calculate the refault distance 0328 * 0329 * The unsigned subtraction here gives an accurate distance 0330 * across nonresident_age overflows in most cases. There is a 0331 * special case: usually, shadow entries have a short lifetime 0332 * and are either refaulted or reclaimed along with the inode 0333 * before they get too old. But it is not impossible for the 0334 * nonresident_age to lap a shadow entry in the field, which 0335 * can then result in a false small refault distance, leading 0336 * to a false activation should this old entry actually 0337 * refault again. However, earlier kernels used to deactivate 0338 * unconditionally with *every* reclaim invocation for the 0339 * longest time, so the occasional inappropriate activation 0340 * leading to pressure on the active list is not a problem. 0341 */ 0342 refault_distance = (refault - eviction) & EVICTION_MASK; 0343 0344 /* 0345 * The activation decision for this folio is made at the level 0346 * where the eviction occurred, as that is where the LRU order 0347 * during folio reclaim is being determined. 0348 * 0349 * However, the cgroup that will own the folio is the one that 0350 * is actually experiencing the refault event. 0351 */ 0352 nr = folio_nr_pages(folio); 0353 memcg = folio_memcg(folio); 0354 lruvec = mem_cgroup_lruvec(memcg, pgdat); 0355 0356 mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr); 0357 0358 mem_cgroup_flush_stats_delayed(); 0359 /* 0360 * Compare the distance to the existing workingset size. We 0361 * don't activate pages that couldn't stay resident even if 0362 * all the memory was available to the workingset. Whether 0363 * workingset competition needs to consider anon or not depends 0364 * on having swap. 0365 */ 0366 workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE); 0367 if (!file) { 0368 workingset_size += lruvec_page_state(eviction_lruvec, 0369 NR_INACTIVE_FILE); 0370 } 0371 if (mem_cgroup_get_nr_swap_pages(memcg) > 0) { 0372 workingset_size += lruvec_page_state(eviction_lruvec, 0373 NR_ACTIVE_ANON); 0374 if (file) { 0375 workingset_size += lruvec_page_state(eviction_lruvec, 0376 NR_INACTIVE_ANON); 0377 } 0378 } 0379 if (refault_distance > workingset_size) 0380 goto out; 0381 0382 folio_set_active(folio); 0383 workingset_age_nonresident(lruvec, nr); 0384 mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file, nr); 0385 0386 /* Folio was active prior to eviction */ 0387 if (workingset) { 0388 folio_set_workingset(folio); 0389 /* XXX: Move to lru_cache_add() when it supports new vs putback */ 0390 lru_note_cost_folio(folio); 0391 mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr); 0392 } 0393 out: 0394 rcu_read_unlock(); 0395 } 0396 0397 /** 0398 * workingset_activation - note a page activation 0399 * @folio: Folio that is being activated. 0400 */ 0401 void workingset_activation(struct folio *folio) 0402 { 0403 struct mem_cgroup *memcg; 0404 0405 rcu_read_lock(); 0406 /* 0407 * Filter non-memcg pages here, e.g. unmap can call 0408 * mark_page_accessed() on VDSO pages. 0409 * 0410 * XXX: See workingset_refault() - this should return 0411 * root_mem_cgroup even for !CONFIG_MEMCG. 0412 */ 0413 memcg = folio_memcg_rcu(folio); 0414 if (!mem_cgroup_disabled() && !memcg) 0415 goto out; 0416 workingset_age_nonresident(folio_lruvec(folio), folio_nr_pages(folio)); 0417 out: 0418 rcu_read_unlock(); 0419 } 0420 0421 /* 0422 * Shadow entries reflect the share of the working set that does not 0423 * fit into memory, so their number depends on the access pattern of 0424 * the workload. In most cases, they will refault or get reclaimed 0425 * along with the inode, but a (malicious) workload that streams 0426 * through files with a total size several times that of available 0427 * memory, while preventing the inodes from being reclaimed, can 0428 * create excessive amounts of shadow nodes. To keep a lid on this, 0429 * track shadow nodes and reclaim them when they grow way past the 0430 * point where they would still be useful. 0431 */ 0432 0433 struct list_lru shadow_nodes; 0434 0435 void workingset_update_node(struct xa_node *node) 0436 { 0437 struct address_space *mapping; 0438 0439 /* 0440 * Track non-empty nodes that contain only shadow entries; 0441 * unlink those that contain pages or are being freed. 0442 * 0443 * Avoid acquiring the list_lru lock when the nodes are 0444 * already where they should be. The list_empty() test is safe 0445 * as node->private_list is protected by the i_pages lock. 0446 */ 0447 mapping = container_of(node->array, struct address_space, i_pages); 0448 lockdep_assert_held(&mapping->i_pages.xa_lock); 0449 0450 if (node->count && node->count == node->nr_values) { 0451 if (list_empty(&node->private_list)) { 0452 list_lru_add(&shadow_nodes, &node->private_list); 0453 __inc_lruvec_kmem_state(node, WORKINGSET_NODES); 0454 } 0455 } else { 0456 if (!list_empty(&node->private_list)) { 0457 list_lru_del(&shadow_nodes, &node->private_list); 0458 __dec_lruvec_kmem_state(node, WORKINGSET_NODES); 0459 } 0460 } 0461 } 0462 0463 static unsigned long count_shadow_nodes(struct shrinker *shrinker, 0464 struct shrink_control *sc) 0465 { 0466 unsigned long max_nodes; 0467 unsigned long nodes; 0468 unsigned long pages; 0469 0470 nodes = list_lru_shrink_count(&shadow_nodes, sc); 0471 if (!nodes) 0472 return SHRINK_EMPTY; 0473 0474 /* 0475 * Approximate a reasonable limit for the nodes 0476 * containing shadow entries. We don't need to keep more 0477 * shadow entries than possible pages on the active list, 0478 * since refault distances bigger than that are dismissed. 0479 * 0480 * The size of the active list converges toward 100% of 0481 * overall page cache as memory grows, with only a tiny 0482 * inactive list. Assume the total cache size for that. 0483 * 0484 * Nodes might be sparsely populated, with only one shadow 0485 * entry in the extreme case. Obviously, we cannot keep one 0486 * node for every eligible shadow entry, so compromise on a 0487 * worst-case density of 1/8th. Below that, not all eligible 0488 * refaults can be detected anymore. 0489 * 0490 * On 64-bit with 7 xa_nodes per page and 64 slots 0491 * each, this will reclaim shadow entries when they consume 0492 * ~1.8% of available memory: 0493 * 0494 * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE 0495 */ 0496 #ifdef CONFIG_MEMCG 0497 if (sc->memcg) { 0498 struct lruvec *lruvec; 0499 int i; 0500 0501 lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); 0502 for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) 0503 pages += lruvec_page_state_local(lruvec, 0504 NR_LRU_BASE + i); 0505 pages += lruvec_page_state_local( 0506 lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT; 0507 pages += lruvec_page_state_local( 0508 lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT; 0509 } else 0510 #endif 0511 pages = node_present_pages(sc->nid); 0512 0513 max_nodes = pages >> (XA_CHUNK_SHIFT - 3); 0514 0515 if (nodes <= max_nodes) 0516 return 0; 0517 return nodes - max_nodes; 0518 } 0519 0520 static enum lru_status shadow_lru_isolate(struct list_head *item, 0521 struct list_lru_one *lru, 0522 spinlock_t *lru_lock, 0523 void *arg) __must_hold(lru_lock) 0524 { 0525 struct xa_node *node = container_of(item, struct xa_node, private_list); 0526 struct address_space *mapping; 0527 int ret; 0528 0529 /* 0530 * Page cache insertions and deletions synchronously maintain 0531 * the shadow node LRU under the i_pages lock and the 0532 * lru_lock. Because the page cache tree is emptied before 0533 * the inode can be destroyed, holding the lru_lock pins any 0534 * address_space that has nodes on the LRU. 0535 * 0536 * We can then safely transition to the i_pages lock to 0537 * pin only the address_space of the particular node we want 0538 * to reclaim, take the node off-LRU, and drop the lru_lock. 0539 */ 0540 0541 mapping = container_of(node->array, struct address_space, i_pages); 0542 0543 /* Coming from the list, invert the lock order */ 0544 if (!xa_trylock(&mapping->i_pages)) { 0545 spin_unlock_irq(lru_lock); 0546 ret = LRU_RETRY; 0547 goto out; 0548 } 0549 0550 if (!spin_trylock(&mapping->host->i_lock)) { 0551 xa_unlock(&mapping->i_pages); 0552 spin_unlock_irq(lru_lock); 0553 ret = LRU_RETRY; 0554 goto out; 0555 } 0556 0557 list_lru_isolate(lru, item); 0558 __dec_lruvec_kmem_state(node, WORKINGSET_NODES); 0559 0560 spin_unlock(lru_lock); 0561 0562 /* 0563 * The nodes should only contain one or more shadow entries, 0564 * no pages, so we expect to be able to remove them all and 0565 * delete and free the empty node afterwards. 0566 */ 0567 if (WARN_ON_ONCE(!node->nr_values)) 0568 goto out_invalid; 0569 if (WARN_ON_ONCE(node->count != node->nr_values)) 0570 goto out_invalid; 0571 xa_delete_node(node, workingset_update_node); 0572 __inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM); 0573 0574 out_invalid: 0575 xa_unlock_irq(&mapping->i_pages); 0576 if (mapping_shrinkable(mapping)) 0577 inode_add_lru(mapping->host); 0578 spin_unlock(&mapping->host->i_lock); 0579 ret = LRU_REMOVED_RETRY; 0580 out: 0581 cond_resched(); 0582 spin_lock_irq(lru_lock); 0583 return ret; 0584 } 0585 0586 static unsigned long scan_shadow_nodes(struct shrinker *shrinker, 0587 struct shrink_control *sc) 0588 { 0589 /* list_lru lock nests inside the IRQ-safe i_pages lock */ 0590 return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate, 0591 NULL); 0592 } 0593 0594 static struct shrinker workingset_shadow_shrinker = { 0595 .count_objects = count_shadow_nodes, 0596 .scan_objects = scan_shadow_nodes, 0597 .seeks = 0, /* ->count reports only fully expendable nodes */ 0598 .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, 0599 }; 0600 0601 /* 0602 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe 0603 * i_pages lock. 0604 */ 0605 static struct lock_class_key shadow_nodes_key; 0606 0607 static int __init workingset_init(void) 0608 { 0609 unsigned int timestamp_bits; 0610 unsigned int max_order; 0611 int ret; 0612 0613 BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT); 0614 /* 0615 * Calculate the eviction bucket size to cover the longest 0616 * actionable refault distance, which is currently half of 0617 * memory (totalram_pages/2). However, memory hotplug may add 0618 * some more pages at runtime, so keep working with up to 0619 * double the initial memory by using totalram_pages as-is. 0620 */ 0621 timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; 0622 max_order = fls_long(totalram_pages() - 1); 0623 if (max_order > timestamp_bits) 0624 bucket_order = max_order - timestamp_bits; 0625 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", 0626 timestamp_bits, max_order, bucket_order); 0627 0628 ret = prealloc_shrinker(&workingset_shadow_shrinker, "mm-shadow"); 0629 if (ret) 0630 goto err; 0631 ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key, 0632 &workingset_shadow_shrinker); 0633 if (ret) 0634 goto err_list_lru; 0635 register_shrinker_prepared(&workingset_shadow_shrinker); 0636 return 0; 0637 err_list_lru: 0638 free_prealloced_shrinker(&workingset_shadow_shrinker); 0639 err: 0640 return ret; 0641 } 0642 module_init(workingset_init);
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |