0001
0002
0003
0004
0005
0006
0007
0008 #define pr_fmt(fmt) "damon-lru-sort: " fmt
0009
0010 #include <linux/damon.h>
0011 #include <linux/ioport.h>
0012 #include <linux/module.h>
0013 #include <linux/sched.h>
0014 #include <linux/workqueue.h>
0015
0016 #ifdef MODULE_PARAM_PREFIX
0017 #undef MODULE_PARAM_PREFIX
0018 #endif
0019 #define MODULE_PARAM_PREFIX "damon_lru_sort."
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 static bool enabled __read_mostly;
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041 static bool commit_inputs __read_mostly;
0042 module_param(commit_inputs, bool, 0600);
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 static unsigned long hot_thres_access_freq = 500;
0053 module_param(hot_thres_access_freq, ulong, 0600);
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063 static unsigned long cold_min_age __read_mostly = 120000000;
0064 module_param(cold_min_age, ulong, 0600);
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076 static unsigned long quota_ms __read_mostly = 10;
0077 module_param(quota_ms, ulong, 0600);
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088 static unsigned long quota_reset_interval_ms __read_mostly = 1000;
0089 module_param(quota_reset_interval_ms, ulong, 0600);
0090
0091
0092
0093
0094
0095
0096
0097 static unsigned long wmarks_interval __read_mostly = 5000000;
0098 module_param(wmarks_interval, ulong, 0600);
0099
0100
0101
0102
0103
0104
0105
0106
0107 static unsigned long wmarks_high __read_mostly = 200;
0108 module_param(wmarks_high, ulong, 0600);
0109
0110
0111
0112
0113
0114
0115
0116
0117 static unsigned long wmarks_mid __read_mostly = 150;
0118 module_param(wmarks_mid, ulong, 0600);
0119
0120
0121
0122
0123
0124
0125
0126
0127 static unsigned long wmarks_low __read_mostly = 50;
0128 module_param(wmarks_low, ulong, 0600);
0129
0130
0131
0132
0133
0134
0135
0136 static unsigned long sample_interval __read_mostly = 5000;
0137 module_param(sample_interval, ulong, 0600);
0138
0139
0140
0141
0142
0143
0144
0145 static unsigned long aggr_interval __read_mostly = 100000;
0146 module_param(aggr_interval, ulong, 0600);
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156 static unsigned long min_nr_regions __read_mostly = 10;
0157 module_param(min_nr_regions, ulong, 0600);
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167 static unsigned long max_nr_regions __read_mostly = 1000;
0168 module_param(max_nr_regions, ulong, 0600);
0169
0170
0171
0172
0173
0174
0175
0176 static unsigned long monitor_region_start __read_mostly;
0177 module_param(monitor_region_start, ulong, 0600);
0178
0179
0180
0181
0182
0183
0184
0185 static unsigned long monitor_region_end __read_mostly;
0186 module_param(monitor_region_end, ulong, 0600);
0187
0188
0189
0190
0191
0192
0193
0194 static int kdamond_pid __read_mostly = -1;
0195 module_param(kdamond_pid, int, 0400);
0196
0197
0198
0199
0200 static unsigned long nr_lru_sort_tried_hot_regions __read_mostly;
0201 module_param(nr_lru_sort_tried_hot_regions, ulong, 0400);
0202
0203
0204
0205
0206 static unsigned long bytes_lru_sort_tried_hot_regions __read_mostly;
0207 module_param(bytes_lru_sort_tried_hot_regions, ulong, 0400);
0208
0209
0210
0211
0212 static unsigned long nr_lru_sorted_hot_regions __read_mostly;
0213 module_param(nr_lru_sorted_hot_regions, ulong, 0400);
0214
0215
0216
0217
0218 static unsigned long bytes_lru_sorted_hot_regions __read_mostly;
0219 module_param(bytes_lru_sorted_hot_regions, ulong, 0400);
0220
0221
0222
0223
0224 static unsigned long nr_hot_quota_exceeds __read_mostly;
0225 module_param(nr_hot_quota_exceeds, ulong, 0400);
0226
0227
0228
0229
0230 static unsigned long nr_lru_sort_tried_cold_regions __read_mostly;
0231 module_param(nr_lru_sort_tried_cold_regions, ulong, 0400);
0232
0233
0234
0235
0236 static unsigned long bytes_lru_sort_tried_cold_regions __read_mostly;
0237 module_param(bytes_lru_sort_tried_cold_regions, ulong, 0400);
0238
0239
0240
0241
0242 static unsigned long nr_lru_sorted_cold_regions __read_mostly;
0243 module_param(nr_lru_sorted_cold_regions, ulong, 0400);
0244
0245
0246
0247
0248 static unsigned long bytes_lru_sorted_cold_regions __read_mostly;
0249 module_param(bytes_lru_sorted_cold_regions, ulong, 0400);
0250
0251
0252
0253
0254 static unsigned long nr_cold_quota_exceeds __read_mostly;
0255 module_param(nr_cold_quota_exceeds, ulong, 0400);
0256
0257 static struct damon_ctx *ctx;
0258 static struct damon_target *target;
0259
0260 struct damon_lru_sort_ram_walk_arg {
0261 unsigned long start;
0262 unsigned long end;
0263 };
0264
0265 static int walk_system_ram(struct resource *res, void *arg)
0266 {
0267 struct damon_lru_sort_ram_walk_arg *a = arg;
0268
0269 if (a->end - a->start < resource_size(res)) {
0270 a->start = res->start;
0271 a->end = res->end;
0272 }
0273 return 0;
0274 }
0275
0276
0277
0278
0279
0280 static bool get_monitoring_region(unsigned long *start, unsigned long *end)
0281 {
0282 struct damon_lru_sort_ram_walk_arg arg = {};
0283
0284 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
0285 if (arg.end <= arg.start)
0286 return false;
0287
0288 *start = arg.start;
0289 *end = arg.end;
0290 return true;
0291 }
0292
0293
0294 static struct damos *damon_lru_sort_new_hot_scheme(unsigned int hot_thres)
0295 {
0296 struct damos_watermarks wmarks = {
0297 .metric = DAMOS_WMARK_FREE_MEM_RATE,
0298 .interval = wmarks_interval,
0299 .high = wmarks_high,
0300 .mid = wmarks_mid,
0301 .low = wmarks_low,
0302 };
0303 struct damos_quota quota = {
0304
0305
0306
0307
0308 .ms = quota_ms / 2,
0309 .sz = 0,
0310 .reset_interval = quota_reset_interval_ms,
0311
0312 .weight_sz = 0,
0313 .weight_nr_accesses = 1,
0314 .weight_age = 0,
0315 };
0316 struct damos *scheme = damon_new_scheme(
0317
0318 PAGE_SIZE, ULONG_MAX,
0319
0320 hot_thres, UINT_MAX,
0321
0322 0, UINT_MAX,
0323
0324 DAMOS_LRU_PRIO,
0325
0326 "a,
0327
0328 &wmarks);
0329
0330 return scheme;
0331 }
0332
0333
0334 static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres)
0335 {
0336 struct damos_watermarks wmarks = {
0337 .metric = DAMOS_WMARK_FREE_MEM_RATE,
0338 .interval = wmarks_interval,
0339 .high = wmarks_high,
0340 .mid = wmarks_mid,
0341 .low = wmarks_low,
0342 };
0343 struct damos_quota quota = {
0344
0345
0346
0347
0348
0349 .ms = quota_ms / 2,
0350 .sz = 0,
0351 .reset_interval = quota_reset_interval_ms,
0352
0353 .weight_sz = 0,
0354 .weight_nr_accesses = 0,
0355 .weight_age = 1,
0356 };
0357 struct damos *scheme = damon_new_scheme(
0358
0359 PAGE_SIZE, ULONG_MAX,
0360
0361 0, 0,
0362
0363 cold_thres, UINT_MAX,
0364
0365 DAMOS_LRU_DEPRIO,
0366
0367 "a,
0368
0369 &wmarks);
0370
0371 return scheme;
0372 }
0373
0374 static int damon_lru_sort_apply_parameters(void)
0375 {
0376 struct damos *scheme, *next_scheme;
0377 struct damon_addr_range addr_range;
0378 unsigned int hot_thres, cold_thres;
0379 int err = 0;
0380
0381 err = damon_set_attrs(ctx, sample_interval, aggr_interval, 0,
0382 min_nr_regions, max_nr_regions);
0383 if (err)
0384 return err;
0385
0386
0387 damon_for_each_scheme_safe(scheme, next_scheme, ctx)
0388 damon_destroy_scheme(scheme);
0389
0390
0391 hot_thres = aggr_interval / sample_interval * hot_thres_access_freq /
0392 1000;
0393 scheme = damon_lru_sort_new_hot_scheme(hot_thres);
0394 if (!scheme)
0395 return -ENOMEM;
0396 damon_add_scheme(ctx, scheme);
0397
0398 cold_thres = cold_min_age / aggr_interval;
0399 scheme = damon_lru_sort_new_cold_scheme(cold_thres);
0400 if (!scheme)
0401 return -ENOMEM;
0402 damon_add_scheme(ctx, scheme);
0403
0404 if (monitor_region_start > monitor_region_end)
0405 return -EINVAL;
0406 if (!monitor_region_start && !monitor_region_end &&
0407 !get_monitoring_region(&monitor_region_start,
0408 &monitor_region_end))
0409 return -EINVAL;
0410 addr_range.start = monitor_region_start;
0411 addr_range.end = monitor_region_end;
0412 return damon_set_regions(target, &addr_range, 1);
0413 }
0414
0415 static int damon_lru_sort_turn(bool on)
0416 {
0417 int err;
0418
0419 if (!on) {
0420 err = damon_stop(&ctx, 1);
0421 if (!err)
0422 kdamond_pid = -1;
0423 return err;
0424 }
0425
0426 err = damon_lru_sort_apply_parameters();
0427 if (err)
0428 return err;
0429
0430 err = damon_start(&ctx, 1, true);
0431 if (err)
0432 return err;
0433 kdamond_pid = ctx->kdamond->pid;
0434 return 0;
0435 }
0436
0437 static struct delayed_work damon_lru_sort_timer;
0438 static void damon_lru_sort_timer_fn(struct work_struct *work)
0439 {
0440 static bool last_enabled;
0441 bool now_enabled;
0442
0443 now_enabled = enabled;
0444 if (last_enabled != now_enabled) {
0445 if (!damon_lru_sort_turn(now_enabled))
0446 last_enabled = now_enabled;
0447 else
0448 enabled = last_enabled;
0449 }
0450 }
0451 static DECLARE_DELAYED_WORK(damon_lru_sort_timer, damon_lru_sort_timer_fn);
0452
0453 static bool damon_lru_sort_initialized;
0454
0455 static int damon_lru_sort_enabled_store(const char *val,
0456 const struct kernel_param *kp)
0457 {
0458 int rc = param_set_bool(val, kp);
0459
0460 if (rc < 0)
0461 return rc;
0462
0463 if (!damon_lru_sort_initialized)
0464 return rc;
0465
0466 schedule_delayed_work(&damon_lru_sort_timer, 0);
0467
0468 return 0;
0469 }
0470
0471 static const struct kernel_param_ops enabled_param_ops = {
0472 .set = damon_lru_sort_enabled_store,
0473 .get = param_get_bool,
0474 };
0475
0476 module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
0477 MODULE_PARM_DESC(enabled,
0478 "Enable or disable DAMON_LRU_SORT (default: disabled)");
0479
0480 static int damon_lru_sort_handle_commit_inputs(void)
0481 {
0482 int err;
0483
0484 if (!commit_inputs)
0485 return 0;
0486
0487 err = damon_lru_sort_apply_parameters();
0488 commit_inputs = false;
0489 return err;
0490 }
0491
0492 static int damon_lru_sort_after_aggregation(struct damon_ctx *c)
0493 {
0494 struct damos *s;
0495
0496
0497 damon_for_each_scheme(s, c) {
0498 if (s->action == DAMOS_LRU_PRIO) {
0499 nr_lru_sort_tried_hot_regions = s->stat.nr_tried;
0500 bytes_lru_sort_tried_hot_regions = s->stat.sz_tried;
0501 nr_lru_sorted_hot_regions = s->stat.nr_applied;
0502 bytes_lru_sorted_hot_regions = s->stat.sz_applied;
0503 nr_hot_quota_exceeds = s->stat.qt_exceeds;
0504 } else if (s->action == DAMOS_LRU_DEPRIO) {
0505 nr_lru_sort_tried_cold_regions = s->stat.nr_tried;
0506 bytes_lru_sort_tried_cold_regions = s->stat.sz_tried;
0507 nr_lru_sorted_cold_regions = s->stat.nr_applied;
0508 bytes_lru_sorted_cold_regions = s->stat.sz_applied;
0509 nr_cold_quota_exceeds = s->stat.qt_exceeds;
0510 }
0511 }
0512
0513 return damon_lru_sort_handle_commit_inputs();
0514 }
0515
0516 static int damon_lru_sort_after_wmarks_check(struct damon_ctx *c)
0517 {
0518 return damon_lru_sort_handle_commit_inputs();
0519 }
0520
0521 static int __init damon_lru_sort_init(void)
0522 {
0523 ctx = damon_new_ctx();
0524 if (!ctx)
0525 return -ENOMEM;
0526
0527 if (damon_select_ops(ctx, DAMON_OPS_PADDR)) {
0528 damon_destroy_ctx(ctx);
0529 return -EINVAL;
0530 }
0531
0532 ctx->callback.after_wmarks_check = damon_lru_sort_after_wmarks_check;
0533 ctx->callback.after_aggregation = damon_lru_sort_after_aggregation;
0534
0535 target = damon_new_target();
0536 if (!target) {
0537 damon_destroy_ctx(ctx);
0538 return -ENOMEM;
0539 }
0540 damon_add_target(ctx, target);
0541
0542 schedule_delayed_work(&damon_lru_sort_timer, 0);
0543
0544 damon_lru_sort_initialized = true;
0545 return 0;
0546 }
0547
0548 module_init(damon_lru_sort_init);