0001
0002
0003
0004
0005
0006
0007
0008 #define pr_fmt(fmt) "damon-pa: " fmt
0009
0010 #include <linux/mmu_notifier.h>
0011 #include <linux/page_idle.h>
0012 #include <linux/pagemap.h>
0013 #include <linux/rmap.h>
0014 #include <linux/swap.h>
0015
0016 #include "../internal.h"
0017 #include "ops-common.h"
0018
0019 static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
0020 unsigned long addr, void *arg)
0021 {
0022 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
0023
0024 while (page_vma_mapped_walk(&pvmw)) {
0025 addr = pvmw.address;
0026 if (pvmw.pte)
0027 damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr);
0028 else
0029 damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr);
0030 }
0031 return true;
0032 }
0033
0034 static void damon_pa_mkold(unsigned long paddr)
0035 {
0036 struct folio *folio;
0037 struct page *page = damon_get_page(PHYS_PFN(paddr));
0038 struct rmap_walk_control rwc = {
0039 .rmap_one = __damon_pa_mkold,
0040 .anon_lock = folio_lock_anon_vma_read,
0041 };
0042 bool need_lock;
0043
0044 if (!page)
0045 return;
0046 folio = page_folio(page);
0047
0048 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
0049 folio_set_idle(folio);
0050 goto out;
0051 }
0052
0053 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
0054 if (need_lock && !folio_trylock(folio))
0055 goto out;
0056
0057 rmap_walk(folio, &rwc);
0058
0059 if (need_lock)
0060 folio_unlock(folio);
0061
0062 out:
0063 folio_put(folio);
0064 }
0065
0066 static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
0067 struct damon_region *r)
0068 {
0069 r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
0070
0071 damon_pa_mkold(r->sampling_addr);
0072 }
0073
0074 static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
0075 {
0076 struct damon_target *t;
0077 struct damon_region *r;
0078
0079 damon_for_each_target(t, ctx) {
0080 damon_for_each_region(r, t)
0081 __damon_pa_prepare_access_check(ctx, r);
0082 }
0083 }
0084
0085 struct damon_pa_access_chk_result {
0086 unsigned long page_sz;
0087 bool accessed;
0088 };
0089
0090 static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
0091 unsigned long addr, void *arg)
0092 {
0093 struct damon_pa_access_chk_result *result = arg;
0094 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
0095
0096 result->accessed = false;
0097 result->page_sz = PAGE_SIZE;
0098 while (page_vma_mapped_walk(&pvmw)) {
0099 addr = pvmw.address;
0100 if (pvmw.pte) {
0101 result->accessed = pte_young(*pvmw.pte) ||
0102 !folio_test_idle(folio) ||
0103 mmu_notifier_test_young(vma->vm_mm, addr);
0104 } else {
0105 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0106 result->accessed = pmd_young(*pvmw.pmd) ||
0107 !folio_test_idle(folio) ||
0108 mmu_notifier_test_young(vma->vm_mm, addr);
0109 result->page_sz = HPAGE_PMD_SIZE;
0110 #else
0111 WARN_ON_ONCE(1);
0112 #endif
0113 }
0114 if (result->accessed) {
0115 page_vma_mapped_walk_done(&pvmw);
0116 break;
0117 }
0118 }
0119
0120
0121 return !result->accessed;
0122 }
0123
0124 static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
0125 {
0126 struct folio *folio;
0127 struct page *page = damon_get_page(PHYS_PFN(paddr));
0128 struct damon_pa_access_chk_result result = {
0129 .page_sz = PAGE_SIZE,
0130 .accessed = false,
0131 };
0132 struct rmap_walk_control rwc = {
0133 .arg = &result,
0134 .rmap_one = __damon_pa_young,
0135 .anon_lock = folio_lock_anon_vma_read,
0136 };
0137 bool need_lock;
0138
0139 if (!page)
0140 return false;
0141 folio = page_folio(page);
0142
0143 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
0144 if (folio_test_idle(folio))
0145 result.accessed = false;
0146 else
0147 result.accessed = true;
0148 folio_put(folio);
0149 goto out;
0150 }
0151
0152 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
0153 if (need_lock && !folio_trylock(folio)) {
0154 folio_put(folio);
0155 return false;
0156 }
0157
0158 rmap_walk(folio, &rwc);
0159
0160 if (need_lock)
0161 folio_unlock(folio);
0162 folio_put(folio);
0163
0164 out:
0165 *page_sz = result.page_sz;
0166 return result.accessed;
0167 }
0168
0169 static void __damon_pa_check_access(struct damon_ctx *ctx,
0170 struct damon_region *r)
0171 {
0172 static unsigned long last_addr;
0173 static unsigned long last_page_sz = PAGE_SIZE;
0174 static bool last_accessed;
0175
0176
0177 if (ALIGN_DOWN(last_addr, last_page_sz) ==
0178 ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
0179 if (last_accessed)
0180 r->nr_accesses++;
0181 return;
0182 }
0183
0184 last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
0185 if (last_accessed)
0186 r->nr_accesses++;
0187
0188 last_addr = r->sampling_addr;
0189 }
0190
0191 static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
0192 {
0193 struct damon_target *t;
0194 struct damon_region *r;
0195 unsigned int max_nr_accesses = 0;
0196
0197 damon_for_each_target(t, ctx) {
0198 damon_for_each_region(r, t) {
0199 __damon_pa_check_access(ctx, r);
0200 max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
0201 }
0202 }
0203
0204 return max_nr_accesses;
0205 }
0206
0207 static unsigned long damon_pa_pageout(struct damon_region *r)
0208 {
0209 unsigned long addr, applied;
0210 LIST_HEAD(page_list);
0211
0212 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
0213 struct page *page = damon_get_page(PHYS_PFN(addr));
0214
0215 if (!page)
0216 continue;
0217
0218 ClearPageReferenced(page);
0219 test_and_clear_page_young(page);
0220 if (isolate_lru_page(page)) {
0221 put_page(page);
0222 continue;
0223 }
0224 if (PageUnevictable(page)) {
0225 putback_lru_page(page);
0226 } else {
0227 list_add(&page->lru, &page_list);
0228 put_page(page);
0229 }
0230 }
0231 applied = reclaim_pages(&page_list);
0232 cond_resched();
0233 return applied * PAGE_SIZE;
0234 }
0235
0236 static unsigned long damon_pa_mark_accessed(struct damon_region *r)
0237 {
0238 unsigned long addr, applied = 0;
0239
0240 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
0241 struct page *page = damon_get_page(PHYS_PFN(addr));
0242
0243 if (!page)
0244 continue;
0245 mark_page_accessed(page);
0246 put_page(page);
0247 applied++;
0248 }
0249 return applied * PAGE_SIZE;
0250 }
0251
0252 static unsigned long damon_pa_deactivate_pages(struct damon_region *r)
0253 {
0254 unsigned long addr, applied = 0;
0255
0256 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
0257 struct page *page = damon_get_page(PHYS_PFN(addr));
0258
0259 if (!page)
0260 continue;
0261 deactivate_page(page);
0262 put_page(page);
0263 applied++;
0264 }
0265 return applied * PAGE_SIZE;
0266 }
0267
0268 static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
0269 struct damon_target *t, struct damon_region *r,
0270 struct damos *scheme)
0271 {
0272 switch (scheme->action) {
0273 case DAMOS_PAGEOUT:
0274 return damon_pa_pageout(r);
0275 case DAMOS_LRU_PRIO:
0276 return damon_pa_mark_accessed(r);
0277 case DAMOS_LRU_DEPRIO:
0278 return damon_pa_deactivate_pages(r);
0279 default:
0280 break;
0281 }
0282 return 0;
0283 }
0284
0285 static int damon_pa_scheme_score(struct damon_ctx *context,
0286 struct damon_target *t, struct damon_region *r,
0287 struct damos *scheme)
0288 {
0289 switch (scheme->action) {
0290 case DAMOS_PAGEOUT:
0291 return damon_pageout_score(context, r, scheme);
0292 case DAMOS_LRU_PRIO:
0293 return damon_hot_score(context, r, scheme);
0294 case DAMOS_LRU_DEPRIO:
0295 return damon_pageout_score(context, r, scheme);
0296 default:
0297 break;
0298 }
0299
0300 return DAMOS_MAX_SCORE;
0301 }
0302
0303 static int __init damon_pa_initcall(void)
0304 {
0305 struct damon_operations ops = {
0306 .id = DAMON_OPS_PADDR,
0307 .init = NULL,
0308 .update = NULL,
0309 .prepare_access_checks = damon_pa_prepare_access_checks,
0310 .check_accesses = damon_pa_check_accesses,
0311 .reset_aggregated = NULL,
0312 .target_valid = NULL,
0313 .cleanup = NULL,
0314 .apply_scheme = damon_pa_apply_scheme,
0315 .get_scheme_score = damon_pa_scheme_score,
0316 };
0317
0318 return damon_register_ops(&ops);
0319 };
0320
0321 subsys_initcall(damon_pa_initcall);