0001
0002
0003
0004
0005
0006
0007
0008 #define pr_fmt(fmt) "damon-dbgfs: " fmt
0009
0010 #include <linux/damon.h>
0011 #include <linux/debugfs.h>
0012 #include <linux/file.h>
0013 #include <linux/mm.h>
0014 #include <linux/module.h>
0015 #include <linux/page_idle.h>
0016 #include <linux/slab.h>
0017
0018 static struct damon_ctx **dbgfs_ctxs;
0019 static int dbgfs_nr_ctxs;
0020 static struct dentry **dbgfs_dirs;
0021 static DEFINE_MUTEX(damon_dbgfs_lock);
0022
0023
0024
0025
0026 static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos)
0027 {
0028 char *kbuf;
0029 ssize_t ret;
0030
0031
0032 if (*ppos)
0033 return ERR_PTR(-EINVAL);
0034
0035 kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN);
0036 if (!kbuf)
0037 return ERR_PTR(-ENOMEM);
0038
0039 ret = simple_write_to_buffer(kbuf, count + 1, ppos, buf, count);
0040 if (ret != count) {
0041 kfree(kbuf);
0042 return ERR_PTR(-EIO);
0043 }
0044 kbuf[ret] = '\0';
0045
0046 return kbuf;
0047 }
0048
0049 static ssize_t dbgfs_attrs_read(struct file *file,
0050 char __user *buf, size_t count, loff_t *ppos)
0051 {
0052 struct damon_ctx *ctx = file->private_data;
0053 char kbuf[128];
0054 int ret;
0055
0056 mutex_lock(&ctx->kdamond_lock);
0057 ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n",
0058 ctx->sample_interval, ctx->aggr_interval,
0059 ctx->ops_update_interval, ctx->min_nr_regions,
0060 ctx->max_nr_regions);
0061 mutex_unlock(&ctx->kdamond_lock);
0062
0063 return simple_read_from_buffer(buf, count, ppos, kbuf, ret);
0064 }
0065
0066 static ssize_t dbgfs_attrs_write(struct file *file,
0067 const char __user *buf, size_t count, loff_t *ppos)
0068 {
0069 struct damon_ctx *ctx = file->private_data;
0070 unsigned long s, a, r, minr, maxr;
0071 char *kbuf;
0072 ssize_t ret;
0073
0074 kbuf = user_input_str(buf, count, ppos);
0075 if (IS_ERR(kbuf))
0076 return PTR_ERR(kbuf);
0077
0078 if (sscanf(kbuf, "%lu %lu %lu %lu %lu",
0079 &s, &a, &r, &minr, &maxr) != 5) {
0080 ret = -EINVAL;
0081 goto out;
0082 }
0083
0084 mutex_lock(&ctx->kdamond_lock);
0085 if (ctx->kdamond) {
0086 ret = -EBUSY;
0087 goto unlock_out;
0088 }
0089
0090 ret = damon_set_attrs(ctx, s, a, r, minr, maxr);
0091 if (!ret)
0092 ret = count;
0093 unlock_out:
0094 mutex_unlock(&ctx->kdamond_lock);
0095 out:
0096 kfree(kbuf);
0097 return ret;
0098 }
0099
0100
0101
0102
0103
0104
0105 static int damos_action_to_dbgfs_scheme_action(enum damos_action action)
0106 {
0107 switch (action) {
0108 case DAMOS_WILLNEED:
0109 return 0;
0110 case DAMOS_COLD:
0111 return 1;
0112 case DAMOS_PAGEOUT:
0113 return 2;
0114 case DAMOS_HUGEPAGE:
0115 return 3;
0116 case DAMOS_NOHUGEPAGE:
0117 return 4;
0118 case DAMOS_STAT:
0119 return 5;
0120 default:
0121 return -EINVAL;
0122 }
0123 }
0124
0125 static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len)
0126 {
0127 struct damos *s;
0128 int written = 0;
0129 int rc;
0130
0131 damon_for_each_scheme(s, c) {
0132 rc = scnprintf(&buf[written], len - written,
0133 "%lu %lu %u %u %u %u %d %lu %lu %lu %u %u %u %d %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
0134 s->min_sz_region, s->max_sz_region,
0135 s->min_nr_accesses, s->max_nr_accesses,
0136 s->min_age_region, s->max_age_region,
0137 damos_action_to_dbgfs_scheme_action(s->action),
0138 s->quota.ms, s->quota.sz,
0139 s->quota.reset_interval,
0140 s->quota.weight_sz,
0141 s->quota.weight_nr_accesses,
0142 s->quota.weight_age,
0143 s->wmarks.metric, s->wmarks.interval,
0144 s->wmarks.high, s->wmarks.mid, s->wmarks.low,
0145 s->stat.nr_tried, s->stat.sz_tried,
0146 s->stat.nr_applied, s->stat.sz_applied,
0147 s->stat.qt_exceeds);
0148 if (!rc)
0149 return -ENOMEM;
0150
0151 written += rc;
0152 }
0153 return written;
0154 }
0155
0156 static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf,
0157 size_t count, loff_t *ppos)
0158 {
0159 struct damon_ctx *ctx = file->private_data;
0160 char *kbuf;
0161 ssize_t len;
0162
0163 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
0164 if (!kbuf)
0165 return -ENOMEM;
0166
0167 mutex_lock(&ctx->kdamond_lock);
0168 len = sprint_schemes(ctx, kbuf, count);
0169 mutex_unlock(&ctx->kdamond_lock);
0170 if (len < 0)
0171 goto out;
0172 len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
0173
0174 out:
0175 kfree(kbuf);
0176 return len;
0177 }
0178
0179 static void free_schemes_arr(struct damos **schemes, ssize_t nr_schemes)
0180 {
0181 ssize_t i;
0182
0183 for (i = 0; i < nr_schemes; i++)
0184 kfree(schemes[i]);
0185 kfree(schemes);
0186 }
0187
0188
0189
0190
0191
0192 static enum damos_action dbgfs_scheme_action_to_damos_action(int dbgfs_action)
0193 {
0194 switch (dbgfs_action) {
0195 case 0:
0196 return DAMOS_WILLNEED;
0197 case 1:
0198 return DAMOS_COLD;
0199 case 2:
0200 return DAMOS_PAGEOUT;
0201 case 3:
0202 return DAMOS_HUGEPAGE;
0203 case 4:
0204 return DAMOS_NOHUGEPAGE;
0205 case 5:
0206 return DAMOS_STAT;
0207 default:
0208 return -EINVAL;
0209 }
0210 }
0211
0212
0213
0214
0215
0216
0217
0218 static struct damos **str_to_schemes(const char *str, ssize_t len,
0219 ssize_t *nr_schemes)
0220 {
0221 struct damos *scheme, **schemes;
0222 const int max_nr_schemes = 256;
0223 int pos = 0, parsed, ret;
0224 unsigned long min_sz, max_sz;
0225 unsigned int min_nr_a, max_nr_a, min_age, max_age;
0226 unsigned int action_input;
0227 enum damos_action action;
0228
0229 schemes = kmalloc_array(max_nr_schemes, sizeof(scheme),
0230 GFP_KERNEL);
0231 if (!schemes)
0232 return NULL;
0233
0234 *nr_schemes = 0;
0235 while (pos < len && *nr_schemes < max_nr_schemes) {
0236 struct damos_quota quota = {};
0237 struct damos_watermarks wmarks;
0238
0239 ret = sscanf(&str[pos],
0240 "%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n",
0241 &min_sz, &max_sz, &min_nr_a, &max_nr_a,
0242 &min_age, &max_age, &action_input, "a.ms,
0243 "a.sz, "a.reset_interval,
0244 "a.weight_sz, "a.weight_nr_accesses,
0245 "a.weight_age, &wmarks.metric,
0246 &wmarks.interval, &wmarks.high, &wmarks.mid,
0247 &wmarks.low, &parsed);
0248 if (ret != 18)
0249 break;
0250 action = dbgfs_scheme_action_to_damos_action(action_input);
0251 if ((int)action < 0)
0252 goto fail;
0253
0254 if (min_sz > max_sz || min_nr_a > max_nr_a || min_age > max_age)
0255 goto fail;
0256
0257 if (wmarks.high < wmarks.mid || wmarks.high < wmarks.low ||
0258 wmarks.mid < wmarks.low)
0259 goto fail;
0260
0261 pos += parsed;
0262 scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a,
0263 min_age, max_age, action, "a, &wmarks);
0264 if (!scheme)
0265 goto fail;
0266
0267 schemes[*nr_schemes] = scheme;
0268 *nr_schemes += 1;
0269 }
0270 return schemes;
0271 fail:
0272 free_schemes_arr(schemes, *nr_schemes);
0273 return NULL;
0274 }
0275
0276 static ssize_t dbgfs_schemes_write(struct file *file, const char __user *buf,
0277 size_t count, loff_t *ppos)
0278 {
0279 struct damon_ctx *ctx = file->private_data;
0280 char *kbuf;
0281 struct damos **schemes;
0282 ssize_t nr_schemes = 0, ret;
0283
0284 kbuf = user_input_str(buf, count, ppos);
0285 if (IS_ERR(kbuf))
0286 return PTR_ERR(kbuf);
0287
0288 schemes = str_to_schemes(kbuf, count, &nr_schemes);
0289 if (!schemes) {
0290 ret = -EINVAL;
0291 goto out;
0292 }
0293
0294 mutex_lock(&ctx->kdamond_lock);
0295 if (ctx->kdamond) {
0296 ret = -EBUSY;
0297 goto unlock_out;
0298 }
0299
0300 ret = damon_set_schemes(ctx, schemes, nr_schemes);
0301 if (!ret) {
0302 ret = count;
0303 nr_schemes = 0;
0304 }
0305
0306 unlock_out:
0307 mutex_unlock(&ctx->kdamond_lock);
0308 free_schemes_arr(schemes, nr_schemes);
0309 out:
0310 kfree(kbuf);
0311 return ret;
0312 }
0313
0314 static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len)
0315 {
0316 struct damon_target *t;
0317 int id;
0318 int written = 0;
0319 int rc;
0320
0321 damon_for_each_target(t, ctx) {
0322 if (damon_target_has_pid(ctx))
0323
0324 id = pid_vnr(t->pid);
0325 else
0326
0327 id = 42;
0328
0329 rc = scnprintf(&buf[written], len - written, "%d ", id);
0330 if (!rc)
0331 return -ENOMEM;
0332 written += rc;
0333 }
0334 if (written)
0335 written -= 1;
0336 written += scnprintf(&buf[written], len - written, "\n");
0337 return written;
0338 }
0339
0340 static ssize_t dbgfs_target_ids_read(struct file *file,
0341 char __user *buf, size_t count, loff_t *ppos)
0342 {
0343 struct damon_ctx *ctx = file->private_data;
0344 ssize_t len;
0345 char ids_buf[320];
0346
0347 mutex_lock(&ctx->kdamond_lock);
0348 len = sprint_target_ids(ctx, ids_buf, 320);
0349 mutex_unlock(&ctx->kdamond_lock);
0350 if (len < 0)
0351 return len;
0352
0353 return simple_read_from_buffer(buf, count, ppos, ids_buf, len);
0354 }
0355
0356
0357
0358
0359
0360
0361
0362 static int *str_to_ints(const char *str, ssize_t len, ssize_t *nr_ints)
0363 {
0364 int *array;
0365 const int max_nr_ints = 32;
0366 int nr;
0367 int pos = 0, parsed, ret;
0368
0369 *nr_ints = 0;
0370 array = kmalloc_array(max_nr_ints, sizeof(*array), GFP_KERNEL);
0371 if (!array)
0372 return NULL;
0373 while (*nr_ints < max_nr_ints && pos < len) {
0374 ret = sscanf(&str[pos], "%d%n", &nr, &parsed);
0375 pos += parsed;
0376 if (ret != 1)
0377 break;
0378 array[*nr_ints] = nr;
0379 *nr_ints += 1;
0380 }
0381
0382 return array;
0383 }
0384
0385 static void dbgfs_put_pids(struct pid **pids, int nr_pids)
0386 {
0387 int i;
0388
0389 for (i = 0; i < nr_pids; i++)
0390 put_pid(pids[i]);
0391 }
0392
0393
0394
0395
0396
0397
0398
0399 static struct pid **str_to_pids(const char *str, ssize_t len, ssize_t *nr_pids)
0400 {
0401 int *ints;
0402 ssize_t nr_ints;
0403 struct pid **pids;
0404
0405 *nr_pids = 0;
0406
0407 ints = str_to_ints(str, len, &nr_ints);
0408 if (!ints)
0409 return NULL;
0410
0411 pids = kmalloc_array(nr_ints, sizeof(*pids), GFP_KERNEL);
0412 if (!pids)
0413 goto out;
0414
0415 for (; *nr_pids < nr_ints; (*nr_pids)++) {
0416 pids[*nr_pids] = find_get_pid(ints[*nr_pids]);
0417 if (!pids[*nr_pids]) {
0418 dbgfs_put_pids(pids, *nr_pids);
0419 kfree(ints);
0420 kfree(pids);
0421 return NULL;
0422 }
0423 }
0424
0425 out:
0426 kfree(ints);
0427 return pids;
0428 }
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442 static int dbgfs_set_targets(struct damon_ctx *ctx, ssize_t nr_targets,
0443 struct pid **pids)
0444 {
0445 ssize_t i;
0446 struct damon_target *t, *next;
0447
0448 damon_for_each_target_safe(t, next, ctx) {
0449 if (damon_target_has_pid(ctx))
0450 put_pid(t->pid);
0451 damon_destroy_target(t);
0452 }
0453
0454 for (i = 0; i < nr_targets; i++) {
0455 t = damon_new_target();
0456 if (!t) {
0457 damon_for_each_target_safe(t, next, ctx)
0458 damon_destroy_target(t);
0459 if (damon_target_has_pid(ctx))
0460 dbgfs_put_pids(pids, nr_targets);
0461 return -ENOMEM;
0462 }
0463 if (damon_target_has_pid(ctx))
0464 t->pid = pids[i];
0465 damon_add_target(ctx, t);
0466 }
0467
0468 return 0;
0469 }
0470
0471 static ssize_t dbgfs_target_ids_write(struct file *file,
0472 const char __user *buf, size_t count, loff_t *ppos)
0473 {
0474 struct damon_ctx *ctx = file->private_data;
0475 bool id_is_pid = true;
0476 char *kbuf;
0477 struct pid **target_pids = NULL;
0478 ssize_t nr_targets;
0479 ssize_t ret;
0480
0481 kbuf = user_input_str(buf, count, ppos);
0482 if (IS_ERR(kbuf))
0483 return PTR_ERR(kbuf);
0484
0485 if (!strncmp(kbuf, "paddr\n", count)) {
0486 id_is_pid = false;
0487 nr_targets = 1;
0488 }
0489
0490 if (id_is_pid) {
0491 target_pids = str_to_pids(kbuf, count, &nr_targets);
0492 if (!target_pids) {
0493 ret = -ENOMEM;
0494 goto out;
0495 }
0496 }
0497
0498 mutex_lock(&ctx->kdamond_lock);
0499 if (ctx->kdamond) {
0500 if (id_is_pid)
0501 dbgfs_put_pids(target_pids, nr_targets);
0502 ret = -EBUSY;
0503 goto unlock_out;
0504 }
0505
0506
0507 dbgfs_set_targets(ctx, 0, NULL);
0508 if (!nr_targets) {
0509 ret = count;
0510 goto unlock_out;
0511 }
0512
0513
0514 if (id_is_pid)
0515 ret = damon_select_ops(ctx, DAMON_OPS_VADDR);
0516 else
0517 ret = damon_select_ops(ctx, DAMON_OPS_PADDR);
0518 if (ret)
0519 goto unlock_out;
0520
0521 ret = dbgfs_set_targets(ctx, nr_targets, target_pids);
0522 if (!ret)
0523 ret = count;
0524
0525 unlock_out:
0526 mutex_unlock(&ctx->kdamond_lock);
0527 kfree(target_pids);
0528 out:
0529 kfree(kbuf);
0530 return ret;
0531 }
0532
0533 static ssize_t sprint_init_regions(struct damon_ctx *c, char *buf, ssize_t len)
0534 {
0535 struct damon_target *t;
0536 struct damon_region *r;
0537 int target_idx = 0;
0538 int written = 0;
0539 int rc;
0540
0541 damon_for_each_target(t, c) {
0542 damon_for_each_region(r, t) {
0543 rc = scnprintf(&buf[written], len - written,
0544 "%d %lu %lu\n",
0545 target_idx, r->ar.start, r->ar.end);
0546 if (!rc)
0547 return -ENOMEM;
0548 written += rc;
0549 }
0550 target_idx++;
0551 }
0552 return written;
0553 }
0554
0555 static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf,
0556 size_t count, loff_t *ppos)
0557 {
0558 struct damon_ctx *ctx = file->private_data;
0559 char *kbuf;
0560 ssize_t len;
0561
0562 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
0563 if (!kbuf)
0564 return -ENOMEM;
0565
0566 mutex_lock(&ctx->kdamond_lock);
0567 if (ctx->kdamond) {
0568 mutex_unlock(&ctx->kdamond_lock);
0569 len = -EBUSY;
0570 goto out;
0571 }
0572
0573 len = sprint_init_regions(ctx, kbuf, count);
0574 mutex_unlock(&ctx->kdamond_lock);
0575 if (len < 0)
0576 goto out;
0577 len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
0578
0579 out:
0580 kfree(kbuf);
0581 return len;
0582 }
0583
0584 static int add_init_region(struct damon_ctx *c, int target_idx,
0585 struct damon_addr_range *ar)
0586 {
0587 struct damon_target *t;
0588 struct damon_region *r, *prev;
0589 unsigned long idx = 0;
0590 int rc = -EINVAL;
0591
0592 if (ar->start >= ar->end)
0593 return -EINVAL;
0594
0595 damon_for_each_target(t, c) {
0596 if (idx++ == target_idx) {
0597 r = damon_new_region(ar->start, ar->end);
0598 if (!r)
0599 return -ENOMEM;
0600 damon_add_region(r, t);
0601 if (damon_nr_regions(t) > 1) {
0602 prev = damon_prev_region(r);
0603 if (prev->ar.end > r->ar.start) {
0604 damon_destroy_region(r, t);
0605 return -EINVAL;
0606 }
0607 }
0608 rc = 0;
0609 }
0610 }
0611 return rc;
0612 }
0613
0614 static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len)
0615 {
0616 struct damon_target *t;
0617 struct damon_region *r, *next;
0618 int pos = 0, parsed, ret;
0619 int target_idx;
0620 struct damon_addr_range ar;
0621 int err;
0622
0623 damon_for_each_target(t, c) {
0624 damon_for_each_region_safe(r, next, t)
0625 damon_destroy_region(r, t);
0626 }
0627
0628 while (pos < len) {
0629 ret = sscanf(&str[pos], "%d %lu %lu%n",
0630 &target_idx, &ar.start, &ar.end, &parsed);
0631 if (ret != 3)
0632 break;
0633 err = add_init_region(c, target_idx, &ar);
0634 if (err)
0635 goto fail;
0636 pos += parsed;
0637 }
0638
0639 return 0;
0640
0641 fail:
0642 damon_for_each_target(t, c) {
0643 damon_for_each_region_safe(r, next, t)
0644 damon_destroy_region(r, t);
0645 }
0646 return err;
0647 }
0648
0649 static ssize_t dbgfs_init_regions_write(struct file *file,
0650 const char __user *buf, size_t count,
0651 loff_t *ppos)
0652 {
0653 struct damon_ctx *ctx = file->private_data;
0654 char *kbuf;
0655 ssize_t ret = count;
0656 int err;
0657
0658 kbuf = user_input_str(buf, count, ppos);
0659 if (IS_ERR(kbuf))
0660 return PTR_ERR(kbuf);
0661
0662 mutex_lock(&ctx->kdamond_lock);
0663 if (ctx->kdamond) {
0664 ret = -EBUSY;
0665 goto unlock_out;
0666 }
0667
0668 err = set_init_regions(ctx, kbuf, ret);
0669 if (err)
0670 ret = err;
0671
0672 unlock_out:
0673 mutex_unlock(&ctx->kdamond_lock);
0674 kfree(kbuf);
0675 return ret;
0676 }
0677
0678 static ssize_t dbgfs_kdamond_pid_read(struct file *file,
0679 char __user *buf, size_t count, loff_t *ppos)
0680 {
0681 struct damon_ctx *ctx = file->private_data;
0682 char *kbuf;
0683 ssize_t len;
0684
0685 kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
0686 if (!kbuf)
0687 return -ENOMEM;
0688
0689 mutex_lock(&ctx->kdamond_lock);
0690 if (ctx->kdamond)
0691 len = scnprintf(kbuf, count, "%d\n", ctx->kdamond->pid);
0692 else
0693 len = scnprintf(kbuf, count, "none\n");
0694 mutex_unlock(&ctx->kdamond_lock);
0695 if (!len)
0696 goto out;
0697 len = simple_read_from_buffer(buf, count, ppos, kbuf, len);
0698
0699 out:
0700 kfree(kbuf);
0701 return len;
0702 }
0703
0704 static int damon_dbgfs_open(struct inode *inode, struct file *file)
0705 {
0706 file->private_data = inode->i_private;
0707
0708 return nonseekable_open(inode, file);
0709 }
0710
0711 static const struct file_operations attrs_fops = {
0712 .open = damon_dbgfs_open,
0713 .read = dbgfs_attrs_read,
0714 .write = dbgfs_attrs_write,
0715 };
0716
0717 static const struct file_operations schemes_fops = {
0718 .open = damon_dbgfs_open,
0719 .read = dbgfs_schemes_read,
0720 .write = dbgfs_schemes_write,
0721 };
0722
0723 static const struct file_operations target_ids_fops = {
0724 .open = damon_dbgfs_open,
0725 .read = dbgfs_target_ids_read,
0726 .write = dbgfs_target_ids_write,
0727 };
0728
0729 static const struct file_operations init_regions_fops = {
0730 .open = damon_dbgfs_open,
0731 .read = dbgfs_init_regions_read,
0732 .write = dbgfs_init_regions_write,
0733 };
0734
0735 static const struct file_operations kdamond_pid_fops = {
0736 .open = damon_dbgfs_open,
0737 .read = dbgfs_kdamond_pid_read,
0738 };
0739
0740 static void dbgfs_fill_ctx_dir(struct dentry *dir, struct damon_ctx *ctx)
0741 {
0742 const char * const file_names[] = {"attrs", "schemes", "target_ids",
0743 "init_regions", "kdamond_pid"};
0744 const struct file_operations *fops[] = {&attrs_fops, &schemes_fops,
0745 &target_ids_fops, &init_regions_fops, &kdamond_pid_fops};
0746 int i;
0747
0748 for (i = 0; i < ARRAY_SIZE(file_names); i++)
0749 debugfs_create_file(file_names[i], 0600, dir, ctx, fops[i]);
0750 }
0751
0752 static void dbgfs_before_terminate(struct damon_ctx *ctx)
0753 {
0754 struct damon_target *t, *next;
0755
0756 if (!damon_target_has_pid(ctx))
0757 return;
0758
0759 mutex_lock(&ctx->kdamond_lock);
0760 damon_for_each_target_safe(t, next, ctx) {
0761 put_pid(t->pid);
0762 damon_destroy_target(t);
0763 }
0764 mutex_unlock(&ctx->kdamond_lock);
0765 }
0766
0767 static struct damon_ctx *dbgfs_new_ctx(void)
0768 {
0769 struct damon_ctx *ctx;
0770
0771 ctx = damon_new_ctx();
0772 if (!ctx)
0773 return NULL;
0774
0775 if (damon_select_ops(ctx, DAMON_OPS_VADDR) &&
0776 damon_select_ops(ctx, DAMON_OPS_PADDR)) {
0777 damon_destroy_ctx(ctx);
0778 return NULL;
0779 }
0780 ctx->callback.before_terminate = dbgfs_before_terminate;
0781 return ctx;
0782 }
0783
0784 static void dbgfs_destroy_ctx(struct damon_ctx *ctx)
0785 {
0786 damon_destroy_ctx(ctx);
0787 }
0788
0789
0790
0791
0792
0793
0794
0795
0796 static int dbgfs_mk_context(char *name)
0797 {
0798 struct dentry *root, **new_dirs, *new_dir;
0799 struct damon_ctx **new_ctxs, *new_ctx;
0800
0801 if (damon_nr_running_ctxs())
0802 return -EBUSY;
0803
0804 new_ctxs = krealloc(dbgfs_ctxs, sizeof(*dbgfs_ctxs) *
0805 (dbgfs_nr_ctxs + 1), GFP_KERNEL);
0806 if (!new_ctxs)
0807 return -ENOMEM;
0808 dbgfs_ctxs = new_ctxs;
0809
0810 new_dirs = krealloc(dbgfs_dirs, sizeof(*dbgfs_dirs) *
0811 (dbgfs_nr_ctxs + 1), GFP_KERNEL);
0812 if (!new_dirs)
0813 return -ENOMEM;
0814 dbgfs_dirs = new_dirs;
0815
0816 root = dbgfs_dirs[0];
0817 if (!root)
0818 return -ENOENT;
0819
0820 new_dir = debugfs_create_dir(name, root);
0821
0822 if (IS_ERR(new_dir))
0823 return PTR_ERR(new_dir);
0824 dbgfs_dirs[dbgfs_nr_ctxs] = new_dir;
0825
0826 new_ctx = dbgfs_new_ctx();
0827 if (!new_ctx) {
0828 debugfs_remove(new_dir);
0829 dbgfs_dirs[dbgfs_nr_ctxs] = NULL;
0830 return -ENOMEM;
0831 }
0832
0833 dbgfs_ctxs[dbgfs_nr_ctxs] = new_ctx;
0834 dbgfs_fill_ctx_dir(dbgfs_dirs[dbgfs_nr_ctxs],
0835 dbgfs_ctxs[dbgfs_nr_ctxs]);
0836 dbgfs_nr_ctxs++;
0837
0838 return 0;
0839 }
0840
0841 static ssize_t dbgfs_mk_context_write(struct file *file,
0842 const char __user *buf, size_t count, loff_t *ppos)
0843 {
0844 char *kbuf;
0845 char *ctx_name;
0846 ssize_t ret;
0847
0848 kbuf = user_input_str(buf, count, ppos);
0849 if (IS_ERR(kbuf))
0850 return PTR_ERR(kbuf);
0851 ctx_name = kmalloc(count + 1, GFP_KERNEL);
0852 if (!ctx_name) {
0853 kfree(kbuf);
0854 return -ENOMEM;
0855 }
0856
0857
0858 if (sscanf(kbuf, "%s", ctx_name) != 1) {
0859 ret = -EINVAL;
0860 goto out;
0861 }
0862
0863 mutex_lock(&damon_dbgfs_lock);
0864 ret = dbgfs_mk_context(ctx_name);
0865 if (!ret)
0866 ret = count;
0867 mutex_unlock(&damon_dbgfs_lock);
0868
0869 out:
0870 kfree(kbuf);
0871 kfree(ctx_name);
0872 return ret;
0873 }
0874
0875
0876
0877
0878
0879
0880
0881
0882 static int dbgfs_rm_context(char *name)
0883 {
0884 struct dentry *root, *dir, **new_dirs;
0885 struct damon_ctx **new_ctxs;
0886 int i, j;
0887 int ret = 0;
0888
0889 if (damon_nr_running_ctxs())
0890 return -EBUSY;
0891
0892 root = dbgfs_dirs[0];
0893 if (!root)
0894 return -ENOENT;
0895
0896 dir = debugfs_lookup(name, root);
0897 if (!dir)
0898 return -ENOENT;
0899
0900 new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
0901 GFP_KERNEL);
0902 if (!new_dirs) {
0903 ret = -ENOMEM;
0904 goto out_dput;
0905 }
0906
0907 new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs),
0908 GFP_KERNEL);
0909 if (!new_ctxs) {
0910 ret = -ENOMEM;
0911 goto out_new_dirs;
0912 }
0913
0914 for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) {
0915 if (dbgfs_dirs[i] == dir) {
0916 debugfs_remove(dbgfs_dirs[i]);
0917 dbgfs_destroy_ctx(dbgfs_ctxs[i]);
0918 continue;
0919 }
0920 new_dirs[j] = dbgfs_dirs[i];
0921 new_ctxs[j++] = dbgfs_ctxs[i];
0922 }
0923
0924 kfree(dbgfs_dirs);
0925 kfree(dbgfs_ctxs);
0926
0927 dbgfs_dirs = new_dirs;
0928 dbgfs_ctxs = new_ctxs;
0929 dbgfs_nr_ctxs--;
0930
0931 goto out_dput;
0932
0933 out_new_dirs:
0934 kfree(new_dirs);
0935 out_dput:
0936 dput(dir);
0937 return ret;
0938 }
0939
0940 static ssize_t dbgfs_rm_context_write(struct file *file,
0941 const char __user *buf, size_t count, loff_t *ppos)
0942 {
0943 char *kbuf;
0944 ssize_t ret;
0945 char *ctx_name;
0946
0947 kbuf = user_input_str(buf, count, ppos);
0948 if (IS_ERR(kbuf))
0949 return PTR_ERR(kbuf);
0950 ctx_name = kmalloc(count + 1, GFP_KERNEL);
0951 if (!ctx_name) {
0952 kfree(kbuf);
0953 return -ENOMEM;
0954 }
0955
0956
0957 if (sscanf(kbuf, "%s", ctx_name) != 1) {
0958 ret = -EINVAL;
0959 goto out;
0960 }
0961
0962 mutex_lock(&damon_dbgfs_lock);
0963 ret = dbgfs_rm_context(ctx_name);
0964 if (!ret)
0965 ret = count;
0966 mutex_unlock(&damon_dbgfs_lock);
0967
0968 out:
0969 kfree(kbuf);
0970 kfree(ctx_name);
0971 return ret;
0972 }
0973
0974 static ssize_t dbgfs_monitor_on_read(struct file *file,
0975 char __user *buf, size_t count, loff_t *ppos)
0976 {
0977 char monitor_on_buf[5];
0978 bool monitor_on = damon_nr_running_ctxs() != 0;
0979 int len;
0980
0981 len = scnprintf(monitor_on_buf, 5, monitor_on ? "on\n" : "off\n");
0982
0983 return simple_read_from_buffer(buf, count, ppos, monitor_on_buf, len);
0984 }
0985
0986 static ssize_t dbgfs_monitor_on_write(struct file *file,
0987 const char __user *buf, size_t count, loff_t *ppos)
0988 {
0989 ssize_t ret;
0990 char *kbuf;
0991
0992 kbuf = user_input_str(buf, count, ppos);
0993 if (IS_ERR(kbuf))
0994 return PTR_ERR(kbuf);
0995
0996
0997 if (sscanf(kbuf, "%s", kbuf) != 1) {
0998 kfree(kbuf);
0999 return -EINVAL;
1000 }
1001
1002 mutex_lock(&damon_dbgfs_lock);
1003 if (!strncmp(kbuf, "on", count)) {
1004 int i;
1005
1006 for (i = 0; i < dbgfs_nr_ctxs; i++) {
1007 if (damon_targets_empty(dbgfs_ctxs[i])) {
1008 kfree(kbuf);
1009 mutex_unlock(&damon_dbgfs_lock);
1010 return -EINVAL;
1011 }
1012 }
1013 ret = damon_start(dbgfs_ctxs, dbgfs_nr_ctxs, true);
1014 } else if (!strncmp(kbuf, "off", count)) {
1015 ret = damon_stop(dbgfs_ctxs, dbgfs_nr_ctxs);
1016 } else {
1017 ret = -EINVAL;
1018 }
1019 mutex_unlock(&damon_dbgfs_lock);
1020
1021 if (!ret)
1022 ret = count;
1023 kfree(kbuf);
1024 return ret;
1025 }
1026
1027 static const struct file_operations mk_contexts_fops = {
1028 .write = dbgfs_mk_context_write,
1029 };
1030
1031 static const struct file_operations rm_contexts_fops = {
1032 .write = dbgfs_rm_context_write,
1033 };
1034
1035 static const struct file_operations monitor_on_fops = {
1036 .read = dbgfs_monitor_on_read,
1037 .write = dbgfs_monitor_on_write,
1038 };
1039
1040 static int __init __damon_dbgfs_init(void)
1041 {
1042 struct dentry *dbgfs_root;
1043 const char * const file_names[] = {"mk_contexts", "rm_contexts",
1044 "monitor_on"};
1045 const struct file_operations *fops[] = {&mk_contexts_fops,
1046 &rm_contexts_fops, &monitor_on_fops};
1047 int i;
1048
1049 dbgfs_root = debugfs_create_dir("damon", NULL);
1050
1051 for (i = 0; i < ARRAY_SIZE(file_names); i++)
1052 debugfs_create_file(file_names[i], 0600, dbgfs_root, NULL,
1053 fops[i]);
1054 dbgfs_fill_ctx_dir(dbgfs_root, dbgfs_ctxs[0]);
1055
1056 dbgfs_dirs = kmalloc_array(1, sizeof(dbgfs_root), GFP_KERNEL);
1057 if (!dbgfs_dirs) {
1058 debugfs_remove(dbgfs_root);
1059 return -ENOMEM;
1060 }
1061 dbgfs_dirs[0] = dbgfs_root;
1062
1063 return 0;
1064 }
1065
1066
1067
1068
1069
1070 static int __init damon_dbgfs_init(void)
1071 {
1072 int rc = -ENOMEM;
1073
1074 mutex_lock(&damon_dbgfs_lock);
1075 dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL);
1076 if (!dbgfs_ctxs)
1077 goto out;
1078 dbgfs_ctxs[0] = dbgfs_new_ctx();
1079 if (!dbgfs_ctxs[0]) {
1080 kfree(dbgfs_ctxs);
1081 goto out;
1082 }
1083 dbgfs_nr_ctxs = 1;
1084
1085 rc = __damon_dbgfs_init();
1086 if (rc) {
1087 kfree(dbgfs_ctxs[0]);
1088 kfree(dbgfs_ctxs);
1089 pr_err("%s: dbgfs init failed\n", __func__);
1090 }
1091
1092 out:
1093 mutex_unlock(&damon_dbgfs_lock);
1094 return rc;
1095 }
1096
1097 module_init(damon_dbgfs_init);
1098
1099 #include "dbgfs-test.h"