0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/damon.h>
0009 #include <linux/kobject.h>
0010 #include <linux/pid.h>
0011 #include <linux/sched.h>
0012 #include <linux/slab.h>
0013
0014 static DEFINE_MUTEX(damon_sysfs_lock);
0015
0016
0017
0018
0019
0020 struct damon_sysfs_ul_range {
0021 struct kobject kobj;
0022 unsigned long min;
0023 unsigned long max;
0024 };
0025
0026 static struct damon_sysfs_ul_range *damon_sysfs_ul_range_alloc(
0027 unsigned long min,
0028 unsigned long max)
0029 {
0030 struct damon_sysfs_ul_range *range = kmalloc(sizeof(*range),
0031 GFP_KERNEL);
0032
0033 if (!range)
0034 return NULL;
0035 range->kobj = (struct kobject){};
0036 range->min = min;
0037 range->max = max;
0038
0039 return range;
0040 }
0041
0042 static ssize_t min_show(struct kobject *kobj, struct kobj_attribute *attr,
0043 char *buf)
0044 {
0045 struct damon_sysfs_ul_range *range = container_of(kobj,
0046 struct damon_sysfs_ul_range, kobj);
0047
0048 return sysfs_emit(buf, "%lu\n", range->min);
0049 }
0050
0051 static ssize_t min_store(struct kobject *kobj, struct kobj_attribute *attr,
0052 const char *buf, size_t count)
0053 {
0054 struct damon_sysfs_ul_range *range = container_of(kobj,
0055 struct damon_sysfs_ul_range, kobj);
0056 unsigned long min;
0057 int err;
0058
0059 err = kstrtoul(buf, 0, &min);
0060 if (err)
0061 return -EINVAL;
0062
0063 range->min = min;
0064 return count;
0065 }
0066
0067 static ssize_t max_show(struct kobject *kobj, struct kobj_attribute *attr,
0068 char *buf)
0069 {
0070 struct damon_sysfs_ul_range *range = container_of(kobj,
0071 struct damon_sysfs_ul_range, kobj);
0072
0073 return sysfs_emit(buf, "%lu\n", range->max);
0074 }
0075
0076 static ssize_t max_store(struct kobject *kobj, struct kobj_attribute *attr,
0077 const char *buf, size_t count)
0078 {
0079 struct damon_sysfs_ul_range *range = container_of(kobj,
0080 struct damon_sysfs_ul_range, kobj);
0081 unsigned long max;
0082 int err;
0083
0084 err = kstrtoul(buf, 0, &max);
0085 if (err)
0086 return -EINVAL;
0087
0088 range->max = max;
0089 return count;
0090 }
0091
0092 static void damon_sysfs_ul_range_release(struct kobject *kobj)
0093 {
0094 kfree(container_of(kobj, struct damon_sysfs_ul_range, kobj));
0095 }
0096
0097 static struct kobj_attribute damon_sysfs_ul_range_min_attr =
0098 __ATTR_RW_MODE(min, 0600);
0099
0100 static struct kobj_attribute damon_sysfs_ul_range_max_attr =
0101 __ATTR_RW_MODE(max, 0600);
0102
0103 static struct attribute *damon_sysfs_ul_range_attrs[] = {
0104 &damon_sysfs_ul_range_min_attr.attr,
0105 &damon_sysfs_ul_range_max_attr.attr,
0106 NULL,
0107 };
0108 ATTRIBUTE_GROUPS(damon_sysfs_ul_range);
0109
0110 static struct kobj_type damon_sysfs_ul_range_ktype = {
0111 .release = damon_sysfs_ul_range_release,
0112 .sysfs_ops = &kobj_sysfs_ops,
0113 .default_groups = damon_sysfs_ul_range_groups,
0114 };
0115
0116
0117
0118
0119
0120 struct damon_sysfs_stats {
0121 struct kobject kobj;
0122 unsigned long nr_tried;
0123 unsigned long sz_tried;
0124 unsigned long nr_applied;
0125 unsigned long sz_applied;
0126 unsigned long qt_exceeds;
0127 };
0128
0129 static struct damon_sysfs_stats *damon_sysfs_stats_alloc(void)
0130 {
0131 return kzalloc(sizeof(struct damon_sysfs_stats), GFP_KERNEL);
0132 }
0133
0134 static ssize_t nr_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
0135 char *buf)
0136 {
0137 struct damon_sysfs_stats *stats = container_of(kobj,
0138 struct damon_sysfs_stats, kobj);
0139
0140 return sysfs_emit(buf, "%lu\n", stats->nr_tried);
0141 }
0142
0143 static ssize_t sz_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
0144 char *buf)
0145 {
0146 struct damon_sysfs_stats *stats = container_of(kobj,
0147 struct damon_sysfs_stats, kobj);
0148
0149 return sysfs_emit(buf, "%lu\n", stats->sz_tried);
0150 }
0151
0152 static ssize_t nr_applied_show(struct kobject *kobj,
0153 struct kobj_attribute *attr, char *buf)
0154 {
0155 struct damon_sysfs_stats *stats = container_of(kobj,
0156 struct damon_sysfs_stats, kobj);
0157
0158 return sysfs_emit(buf, "%lu\n", stats->nr_applied);
0159 }
0160
0161 static ssize_t sz_applied_show(struct kobject *kobj,
0162 struct kobj_attribute *attr, char *buf)
0163 {
0164 struct damon_sysfs_stats *stats = container_of(kobj,
0165 struct damon_sysfs_stats, kobj);
0166
0167 return sysfs_emit(buf, "%lu\n", stats->sz_applied);
0168 }
0169
0170 static ssize_t qt_exceeds_show(struct kobject *kobj,
0171 struct kobj_attribute *attr, char *buf)
0172 {
0173 struct damon_sysfs_stats *stats = container_of(kobj,
0174 struct damon_sysfs_stats, kobj);
0175
0176 return sysfs_emit(buf, "%lu\n", stats->qt_exceeds);
0177 }
0178
0179 static void damon_sysfs_stats_release(struct kobject *kobj)
0180 {
0181 kfree(container_of(kobj, struct damon_sysfs_stats, kobj));
0182 }
0183
0184 static struct kobj_attribute damon_sysfs_stats_nr_tried_attr =
0185 __ATTR_RO_MODE(nr_tried, 0400);
0186
0187 static struct kobj_attribute damon_sysfs_stats_sz_tried_attr =
0188 __ATTR_RO_MODE(sz_tried, 0400);
0189
0190 static struct kobj_attribute damon_sysfs_stats_nr_applied_attr =
0191 __ATTR_RO_MODE(nr_applied, 0400);
0192
0193 static struct kobj_attribute damon_sysfs_stats_sz_applied_attr =
0194 __ATTR_RO_MODE(sz_applied, 0400);
0195
0196 static struct kobj_attribute damon_sysfs_stats_qt_exceeds_attr =
0197 __ATTR_RO_MODE(qt_exceeds, 0400);
0198
0199 static struct attribute *damon_sysfs_stats_attrs[] = {
0200 &damon_sysfs_stats_nr_tried_attr.attr,
0201 &damon_sysfs_stats_sz_tried_attr.attr,
0202 &damon_sysfs_stats_nr_applied_attr.attr,
0203 &damon_sysfs_stats_sz_applied_attr.attr,
0204 &damon_sysfs_stats_qt_exceeds_attr.attr,
0205 NULL,
0206 };
0207 ATTRIBUTE_GROUPS(damon_sysfs_stats);
0208
0209 static struct kobj_type damon_sysfs_stats_ktype = {
0210 .release = damon_sysfs_stats_release,
0211 .sysfs_ops = &kobj_sysfs_ops,
0212 .default_groups = damon_sysfs_stats_groups,
0213 };
0214
0215
0216
0217
0218
0219 struct damon_sysfs_watermarks {
0220 struct kobject kobj;
0221 enum damos_wmark_metric metric;
0222 unsigned long interval_us;
0223 unsigned long high;
0224 unsigned long mid;
0225 unsigned long low;
0226 };
0227
0228 static struct damon_sysfs_watermarks *damon_sysfs_watermarks_alloc(
0229 enum damos_wmark_metric metric, unsigned long interval_us,
0230 unsigned long high, unsigned long mid, unsigned long low)
0231 {
0232 struct damon_sysfs_watermarks *watermarks = kmalloc(
0233 sizeof(*watermarks), GFP_KERNEL);
0234
0235 if (!watermarks)
0236 return NULL;
0237 watermarks->kobj = (struct kobject){};
0238 watermarks->metric = metric;
0239 watermarks->interval_us = interval_us;
0240 watermarks->high = high;
0241 watermarks->mid = mid;
0242 watermarks->low = low;
0243 return watermarks;
0244 }
0245
0246
0247 static const char * const damon_sysfs_wmark_metric_strs[] = {
0248 "none",
0249 "free_mem_rate",
0250 };
0251
0252 static ssize_t metric_show(struct kobject *kobj, struct kobj_attribute *attr,
0253 char *buf)
0254 {
0255 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
0256 struct damon_sysfs_watermarks, kobj);
0257
0258 return sysfs_emit(buf, "%s\n",
0259 damon_sysfs_wmark_metric_strs[watermarks->metric]);
0260 }
0261
0262 static ssize_t metric_store(struct kobject *kobj, struct kobj_attribute *attr,
0263 const char *buf, size_t count)
0264 {
0265 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
0266 struct damon_sysfs_watermarks, kobj);
0267 enum damos_wmark_metric metric;
0268
0269 for (metric = 0; metric < NR_DAMOS_WMARK_METRICS; metric++) {
0270 if (sysfs_streq(buf, damon_sysfs_wmark_metric_strs[metric])) {
0271 watermarks->metric = metric;
0272 return count;
0273 }
0274 }
0275 return -EINVAL;
0276 }
0277
0278 static ssize_t interval_us_show(struct kobject *kobj,
0279 struct kobj_attribute *attr, char *buf)
0280 {
0281 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
0282 struct damon_sysfs_watermarks, kobj);
0283
0284 return sysfs_emit(buf, "%lu\n", watermarks->interval_us);
0285 }
0286
0287 static ssize_t interval_us_store(struct kobject *kobj,
0288 struct kobj_attribute *attr, const char *buf, size_t count)
0289 {
0290 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
0291 struct damon_sysfs_watermarks, kobj);
0292 int err = kstrtoul(buf, 0, &watermarks->interval_us);
0293
0294 if (err)
0295 return -EINVAL;
0296 return count;
0297 }
0298
0299 static ssize_t high_show(struct kobject *kobj,
0300 struct kobj_attribute *attr, char *buf)
0301 {
0302 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
0303 struct damon_sysfs_watermarks, kobj);
0304
0305 return sysfs_emit(buf, "%lu\n", watermarks->high);
0306 }
0307
0308 static ssize_t high_store(struct kobject *kobj,
0309 struct kobj_attribute *attr, const char *buf, size_t count)
0310 {
0311 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
0312 struct damon_sysfs_watermarks, kobj);
0313 int err = kstrtoul(buf, 0, &watermarks->high);
0314
0315 if (err)
0316 return -EINVAL;
0317 return count;
0318 }
0319
0320 static ssize_t mid_show(struct kobject *kobj,
0321 struct kobj_attribute *attr, char *buf)
0322 {
0323 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
0324 struct damon_sysfs_watermarks, kobj);
0325
0326 return sysfs_emit(buf, "%lu\n", watermarks->mid);
0327 }
0328
0329 static ssize_t mid_store(struct kobject *kobj,
0330 struct kobj_attribute *attr, const char *buf, size_t count)
0331 {
0332 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
0333 struct damon_sysfs_watermarks, kobj);
0334 int err = kstrtoul(buf, 0, &watermarks->mid);
0335
0336 if (err)
0337 return -EINVAL;
0338 return count;
0339 }
0340
0341 static ssize_t low_show(struct kobject *kobj,
0342 struct kobj_attribute *attr, char *buf)
0343 {
0344 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
0345 struct damon_sysfs_watermarks, kobj);
0346
0347 return sysfs_emit(buf, "%lu\n", watermarks->low);
0348 }
0349
0350 static ssize_t low_store(struct kobject *kobj,
0351 struct kobj_attribute *attr, const char *buf, size_t count)
0352 {
0353 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
0354 struct damon_sysfs_watermarks, kobj);
0355 int err = kstrtoul(buf, 0, &watermarks->low);
0356
0357 if (err)
0358 return -EINVAL;
0359 return count;
0360 }
0361
0362 static void damon_sysfs_watermarks_release(struct kobject *kobj)
0363 {
0364 kfree(container_of(kobj, struct damon_sysfs_watermarks, kobj));
0365 }
0366
0367 static struct kobj_attribute damon_sysfs_watermarks_metric_attr =
0368 __ATTR_RW_MODE(metric, 0600);
0369
0370 static struct kobj_attribute damon_sysfs_watermarks_interval_us_attr =
0371 __ATTR_RW_MODE(interval_us, 0600);
0372
0373 static struct kobj_attribute damon_sysfs_watermarks_high_attr =
0374 __ATTR_RW_MODE(high, 0600);
0375
0376 static struct kobj_attribute damon_sysfs_watermarks_mid_attr =
0377 __ATTR_RW_MODE(mid, 0600);
0378
0379 static struct kobj_attribute damon_sysfs_watermarks_low_attr =
0380 __ATTR_RW_MODE(low, 0600);
0381
0382 static struct attribute *damon_sysfs_watermarks_attrs[] = {
0383 &damon_sysfs_watermarks_metric_attr.attr,
0384 &damon_sysfs_watermarks_interval_us_attr.attr,
0385 &damon_sysfs_watermarks_high_attr.attr,
0386 &damon_sysfs_watermarks_mid_attr.attr,
0387 &damon_sysfs_watermarks_low_attr.attr,
0388 NULL,
0389 };
0390 ATTRIBUTE_GROUPS(damon_sysfs_watermarks);
0391
0392 static struct kobj_type damon_sysfs_watermarks_ktype = {
0393 .release = damon_sysfs_watermarks_release,
0394 .sysfs_ops = &kobj_sysfs_ops,
0395 .default_groups = damon_sysfs_watermarks_groups,
0396 };
0397
0398
0399
0400
0401
0402 struct damon_sysfs_weights {
0403 struct kobject kobj;
0404 unsigned int sz;
0405 unsigned int nr_accesses;
0406 unsigned int age;
0407 };
0408
0409 static struct damon_sysfs_weights *damon_sysfs_weights_alloc(unsigned int sz,
0410 unsigned int nr_accesses, unsigned int age)
0411 {
0412 struct damon_sysfs_weights *weights = kmalloc(sizeof(*weights),
0413 GFP_KERNEL);
0414
0415 if (!weights)
0416 return NULL;
0417 weights->kobj = (struct kobject){};
0418 weights->sz = sz;
0419 weights->nr_accesses = nr_accesses;
0420 weights->age = age;
0421 return weights;
0422 }
0423
0424 static ssize_t sz_permil_show(struct kobject *kobj,
0425 struct kobj_attribute *attr, char *buf)
0426 {
0427 struct damon_sysfs_weights *weights = container_of(kobj,
0428 struct damon_sysfs_weights, kobj);
0429
0430 return sysfs_emit(buf, "%u\n", weights->sz);
0431 }
0432
0433 static ssize_t sz_permil_store(struct kobject *kobj,
0434 struct kobj_attribute *attr, const char *buf, size_t count)
0435 {
0436 struct damon_sysfs_weights *weights = container_of(kobj,
0437 struct damon_sysfs_weights, kobj);
0438 int err = kstrtouint(buf, 0, &weights->sz);
0439
0440 if (err)
0441 return -EINVAL;
0442 return count;
0443 }
0444
0445 static ssize_t nr_accesses_permil_show(struct kobject *kobj,
0446 struct kobj_attribute *attr, char *buf)
0447 {
0448 struct damon_sysfs_weights *weights = container_of(kobj,
0449 struct damon_sysfs_weights, kobj);
0450
0451 return sysfs_emit(buf, "%u\n", weights->nr_accesses);
0452 }
0453
0454 static ssize_t nr_accesses_permil_store(struct kobject *kobj,
0455 struct kobj_attribute *attr, const char *buf, size_t count)
0456 {
0457 struct damon_sysfs_weights *weights = container_of(kobj,
0458 struct damon_sysfs_weights, kobj);
0459 int err = kstrtouint(buf, 0, &weights->nr_accesses);
0460
0461 if (err)
0462 return -EINVAL;
0463 return count;
0464 }
0465
0466 static ssize_t age_permil_show(struct kobject *kobj,
0467 struct kobj_attribute *attr, char *buf)
0468 {
0469 struct damon_sysfs_weights *weights = container_of(kobj,
0470 struct damon_sysfs_weights, kobj);
0471
0472 return sysfs_emit(buf, "%u\n", weights->age);
0473 }
0474
0475 static ssize_t age_permil_store(struct kobject *kobj,
0476 struct kobj_attribute *attr, const char *buf, size_t count)
0477 {
0478 struct damon_sysfs_weights *weights = container_of(kobj,
0479 struct damon_sysfs_weights, kobj);
0480 int err = kstrtouint(buf, 0, &weights->age);
0481
0482 if (err)
0483 return -EINVAL;
0484 return count;
0485 }
0486
0487 static void damon_sysfs_weights_release(struct kobject *kobj)
0488 {
0489 kfree(container_of(kobj, struct damon_sysfs_weights, kobj));
0490 }
0491
0492 static struct kobj_attribute damon_sysfs_weights_sz_attr =
0493 __ATTR_RW_MODE(sz_permil, 0600);
0494
0495 static struct kobj_attribute damon_sysfs_weights_nr_accesses_attr =
0496 __ATTR_RW_MODE(nr_accesses_permil, 0600);
0497
0498 static struct kobj_attribute damon_sysfs_weights_age_attr =
0499 __ATTR_RW_MODE(age_permil, 0600);
0500
0501 static struct attribute *damon_sysfs_weights_attrs[] = {
0502 &damon_sysfs_weights_sz_attr.attr,
0503 &damon_sysfs_weights_nr_accesses_attr.attr,
0504 &damon_sysfs_weights_age_attr.attr,
0505 NULL,
0506 };
0507 ATTRIBUTE_GROUPS(damon_sysfs_weights);
0508
0509 static struct kobj_type damon_sysfs_weights_ktype = {
0510 .release = damon_sysfs_weights_release,
0511 .sysfs_ops = &kobj_sysfs_ops,
0512 .default_groups = damon_sysfs_weights_groups,
0513 };
0514
0515
0516
0517
0518
0519 struct damon_sysfs_quotas {
0520 struct kobject kobj;
0521 struct damon_sysfs_weights *weights;
0522 unsigned long ms;
0523 unsigned long sz;
0524 unsigned long reset_interval_ms;
0525 };
0526
0527 static struct damon_sysfs_quotas *damon_sysfs_quotas_alloc(void)
0528 {
0529 return kzalloc(sizeof(struct damon_sysfs_quotas), GFP_KERNEL);
0530 }
0531
0532 static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas)
0533 {
0534 struct damon_sysfs_weights *weights;
0535 int err;
0536
0537 weights = damon_sysfs_weights_alloc(0, 0, 0);
0538 if (!weights)
0539 return -ENOMEM;
0540
0541 err = kobject_init_and_add(&weights->kobj, &damon_sysfs_weights_ktype,
0542 "as->kobj, "weights");
0543 if (err)
0544 kobject_put(&weights->kobj);
0545 else
0546 quotas->weights = weights;
0547 return err;
0548 }
0549
0550 static void damon_sysfs_quotas_rm_dirs(struct damon_sysfs_quotas *quotas)
0551 {
0552 kobject_put("as->weights->kobj);
0553 }
0554
0555 static ssize_t ms_show(struct kobject *kobj, struct kobj_attribute *attr,
0556 char *buf)
0557 {
0558 struct damon_sysfs_quotas *quotas = container_of(kobj,
0559 struct damon_sysfs_quotas, kobj);
0560
0561 return sysfs_emit(buf, "%lu\n", quotas->ms);
0562 }
0563
0564 static ssize_t ms_store(struct kobject *kobj, struct kobj_attribute *attr,
0565 const char *buf, size_t count)
0566 {
0567 struct damon_sysfs_quotas *quotas = container_of(kobj,
0568 struct damon_sysfs_quotas, kobj);
0569 int err = kstrtoul(buf, 0, "as->ms);
0570
0571 if (err)
0572 return -EINVAL;
0573 return count;
0574 }
0575
0576 static ssize_t bytes_show(struct kobject *kobj, struct kobj_attribute *attr,
0577 char *buf)
0578 {
0579 struct damon_sysfs_quotas *quotas = container_of(kobj,
0580 struct damon_sysfs_quotas, kobj);
0581
0582 return sysfs_emit(buf, "%lu\n", quotas->sz);
0583 }
0584
0585 static ssize_t bytes_store(struct kobject *kobj,
0586 struct kobj_attribute *attr, const char *buf, size_t count)
0587 {
0588 struct damon_sysfs_quotas *quotas = container_of(kobj,
0589 struct damon_sysfs_quotas, kobj);
0590 int err = kstrtoul(buf, 0, "as->sz);
0591
0592 if (err)
0593 return -EINVAL;
0594 return count;
0595 }
0596
0597 static ssize_t reset_interval_ms_show(struct kobject *kobj,
0598 struct kobj_attribute *attr, char *buf)
0599 {
0600 struct damon_sysfs_quotas *quotas = container_of(kobj,
0601 struct damon_sysfs_quotas, kobj);
0602
0603 return sysfs_emit(buf, "%lu\n", quotas->reset_interval_ms);
0604 }
0605
0606 static ssize_t reset_interval_ms_store(struct kobject *kobj,
0607 struct kobj_attribute *attr, const char *buf, size_t count)
0608 {
0609 struct damon_sysfs_quotas *quotas = container_of(kobj,
0610 struct damon_sysfs_quotas, kobj);
0611 int err = kstrtoul(buf, 0, "as->reset_interval_ms);
0612
0613 if (err)
0614 return -EINVAL;
0615 return count;
0616 }
0617
0618 static void damon_sysfs_quotas_release(struct kobject *kobj)
0619 {
0620 kfree(container_of(kobj, struct damon_sysfs_quotas, kobj));
0621 }
0622
0623 static struct kobj_attribute damon_sysfs_quotas_ms_attr =
0624 __ATTR_RW_MODE(ms, 0600);
0625
0626 static struct kobj_attribute damon_sysfs_quotas_sz_attr =
0627 __ATTR_RW_MODE(bytes, 0600);
0628
0629 static struct kobj_attribute damon_sysfs_quotas_reset_interval_ms_attr =
0630 __ATTR_RW_MODE(reset_interval_ms, 0600);
0631
0632 static struct attribute *damon_sysfs_quotas_attrs[] = {
0633 &damon_sysfs_quotas_ms_attr.attr,
0634 &damon_sysfs_quotas_sz_attr.attr,
0635 &damon_sysfs_quotas_reset_interval_ms_attr.attr,
0636 NULL,
0637 };
0638 ATTRIBUTE_GROUPS(damon_sysfs_quotas);
0639
0640 static struct kobj_type damon_sysfs_quotas_ktype = {
0641 .release = damon_sysfs_quotas_release,
0642 .sysfs_ops = &kobj_sysfs_ops,
0643 .default_groups = damon_sysfs_quotas_groups,
0644 };
0645
0646
0647
0648
0649
0650 struct damon_sysfs_access_pattern {
0651 struct kobject kobj;
0652 struct damon_sysfs_ul_range *sz;
0653 struct damon_sysfs_ul_range *nr_accesses;
0654 struct damon_sysfs_ul_range *age;
0655 };
0656
0657 static
0658 struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void)
0659 {
0660 struct damon_sysfs_access_pattern *access_pattern =
0661 kmalloc(sizeof(*access_pattern), GFP_KERNEL);
0662
0663 if (!access_pattern)
0664 return NULL;
0665 access_pattern->kobj = (struct kobject){};
0666 return access_pattern;
0667 }
0668
0669 static int damon_sysfs_access_pattern_add_range_dir(
0670 struct damon_sysfs_access_pattern *access_pattern,
0671 struct damon_sysfs_ul_range **range_dir_ptr,
0672 char *name)
0673 {
0674 struct damon_sysfs_ul_range *range = damon_sysfs_ul_range_alloc(0, 0);
0675 int err;
0676
0677 if (!range)
0678 return -ENOMEM;
0679 err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype,
0680 &access_pattern->kobj, name);
0681 if (err)
0682 kobject_put(&range->kobj);
0683 else
0684 *range_dir_ptr = range;
0685 return err;
0686 }
0687
0688 static int damon_sysfs_access_pattern_add_dirs(
0689 struct damon_sysfs_access_pattern *access_pattern)
0690 {
0691 int err;
0692
0693 err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
0694 &access_pattern->sz, "sz");
0695 if (err)
0696 goto put_sz_out;
0697
0698 err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
0699 &access_pattern->nr_accesses, "nr_accesses");
0700 if (err)
0701 goto put_nr_accesses_sz_out;
0702
0703 err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
0704 &access_pattern->age, "age");
0705 if (err)
0706 goto put_age_nr_accesses_sz_out;
0707 return 0;
0708
0709 put_age_nr_accesses_sz_out:
0710 kobject_put(&access_pattern->age->kobj);
0711 access_pattern->age = NULL;
0712 put_nr_accesses_sz_out:
0713 kobject_put(&access_pattern->nr_accesses->kobj);
0714 access_pattern->nr_accesses = NULL;
0715 put_sz_out:
0716 kobject_put(&access_pattern->sz->kobj);
0717 access_pattern->sz = NULL;
0718 return err;
0719 }
0720
0721 static void damon_sysfs_access_pattern_rm_dirs(
0722 struct damon_sysfs_access_pattern *access_pattern)
0723 {
0724 kobject_put(&access_pattern->sz->kobj);
0725 kobject_put(&access_pattern->nr_accesses->kobj);
0726 kobject_put(&access_pattern->age->kobj);
0727 }
0728
0729 static void damon_sysfs_access_pattern_release(struct kobject *kobj)
0730 {
0731 kfree(container_of(kobj, struct damon_sysfs_access_pattern, kobj));
0732 }
0733
0734 static struct attribute *damon_sysfs_access_pattern_attrs[] = {
0735 NULL,
0736 };
0737 ATTRIBUTE_GROUPS(damon_sysfs_access_pattern);
0738
0739 static struct kobj_type damon_sysfs_access_pattern_ktype = {
0740 .release = damon_sysfs_access_pattern_release,
0741 .sysfs_ops = &kobj_sysfs_ops,
0742 .default_groups = damon_sysfs_access_pattern_groups,
0743 };
0744
0745
0746
0747
0748
0749 struct damon_sysfs_scheme {
0750 struct kobject kobj;
0751 enum damos_action action;
0752 struct damon_sysfs_access_pattern *access_pattern;
0753 struct damon_sysfs_quotas *quotas;
0754 struct damon_sysfs_watermarks *watermarks;
0755 struct damon_sysfs_stats *stats;
0756 };
0757
0758
0759 static const char * const damon_sysfs_damos_action_strs[] = {
0760 "willneed",
0761 "cold",
0762 "pageout",
0763 "hugepage",
0764 "nohugepage",
0765 "lru_prio",
0766 "lru_deprio",
0767 "stat",
0768 };
0769
0770 static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc(
0771 enum damos_action action)
0772 {
0773 struct damon_sysfs_scheme *scheme = kmalloc(sizeof(*scheme),
0774 GFP_KERNEL);
0775
0776 if (!scheme)
0777 return NULL;
0778 scheme->kobj = (struct kobject){};
0779 scheme->action = action;
0780 return scheme;
0781 }
0782
0783 static int damon_sysfs_scheme_set_access_pattern(
0784 struct damon_sysfs_scheme *scheme)
0785 {
0786 struct damon_sysfs_access_pattern *access_pattern;
0787 int err;
0788
0789 access_pattern = damon_sysfs_access_pattern_alloc();
0790 if (!access_pattern)
0791 return -ENOMEM;
0792 err = kobject_init_and_add(&access_pattern->kobj,
0793 &damon_sysfs_access_pattern_ktype, &scheme->kobj,
0794 "access_pattern");
0795 if (err)
0796 goto out;
0797 err = damon_sysfs_access_pattern_add_dirs(access_pattern);
0798 if (err)
0799 goto out;
0800 scheme->access_pattern = access_pattern;
0801 return 0;
0802
0803 out:
0804 kobject_put(&access_pattern->kobj);
0805 return err;
0806 }
0807
0808 static int damon_sysfs_scheme_set_quotas(struct damon_sysfs_scheme *scheme)
0809 {
0810 struct damon_sysfs_quotas *quotas = damon_sysfs_quotas_alloc();
0811 int err;
0812
0813 if (!quotas)
0814 return -ENOMEM;
0815 err = kobject_init_and_add("as->kobj, &damon_sysfs_quotas_ktype,
0816 &scheme->kobj, "quotas");
0817 if (err)
0818 goto out;
0819 err = damon_sysfs_quotas_add_dirs(quotas);
0820 if (err)
0821 goto out;
0822 scheme->quotas = quotas;
0823 return 0;
0824
0825 out:
0826 kobject_put("as->kobj);
0827 return err;
0828 }
0829
0830 static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme)
0831 {
0832 struct damon_sysfs_watermarks *watermarks =
0833 damon_sysfs_watermarks_alloc(DAMOS_WMARK_NONE, 0, 0, 0, 0);
0834 int err;
0835
0836 if (!watermarks)
0837 return -ENOMEM;
0838 err = kobject_init_and_add(&watermarks->kobj,
0839 &damon_sysfs_watermarks_ktype, &scheme->kobj,
0840 "watermarks");
0841 if (err)
0842 kobject_put(&watermarks->kobj);
0843 else
0844 scheme->watermarks = watermarks;
0845 return err;
0846 }
0847
0848 static int damon_sysfs_scheme_set_stats(struct damon_sysfs_scheme *scheme)
0849 {
0850 struct damon_sysfs_stats *stats = damon_sysfs_stats_alloc();
0851 int err;
0852
0853 if (!stats)
0854 return -ENOMEM;
0855 err = kobject_init_and_add(&stats->kobj, &damon_sysfs_stats_ktype,
0856 &scheme->kobj, "stats");
0857 if (err)
0858 kobject_put(&stats->kobj);
0859 else
0860 scheme->stats = stats;
0861 return err;
0862 }
0863
0864 static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme)
0865 {
0866 int err;
0867
0868 err = damon_sysfs_scheme_set_access_pattern(scheme);
0869 if (err)
0870 return err;
0871 err = damon_sysfs_scheme_set_quotas(scheme);
0872 if (err)
0873 goto put_access_pattern_out;
0874 err = damon_sysfs_scheme_set_watermarks(scheme);
0875 if (err)
0876 goto put_quotas_access_pattern_out;
0877 err = damon_sysfs_scheme_set_stats(scheme);
0878 if (err)
0879 goto put_watermarks_quotas_access_pattern_out;
0880 return 0;
0881
0882 put_watermarks_quotas_access_pattern_out:
0883 kobject_put(&scheme->watermarks->kobj);
0884 scheme->watermarks = NULL;
0885 put_quotas_access_pattern_out:
0886 kobject_put(&scheme->quotas->kobj);
0887 scheme->quotas = NULL;
0888 put_access_pattern_out:
0889 kobject_put(&scheme->access_pattern->kobj);
0890 scheme->access_pattern = NULL;
0891 return err;
0892 }
0893
0894 static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme)
0895 {
0896 damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern);
0897 kobject_put(&scheme->access_pattern->kobj);
0898 damon_sysfs_quotas_rm_dirs(scheme->quotas);
0899 kobject_put(&scheme->quotas->kobj);
0900 kobject_put(&scheme->watermarks->kobj);
0901 kobject_put(&scheme->stats->kobj);
0902 }
0903
0904 static ssize_t action_show(struct kobject *kobj, struct kobj_attribute *attr,
0905 char *buf)
0906 {
0907 struct damon_sysfs_scheme *scheme = container_of(kobj,
0908 struct damon_sysfs_scheme, kobj);
0909
0910 return sysfs_emit(buf, "%s\n",
0911 damon_sysfs_damos_action_strs[scheme->action]);
0912 }
0913
0914 static ssize_t action_store(struct kobject *kobj, struct kobj_attribute *attr,
0915 const char *buf, size_t count)
0916 {
0917 struct damon_sysfs_scheme *scheme = container_of(kobj,
0918 struct damon_sysfs_scheme, kobj);
0919 enum damos_action action;
0920
0921 for (action = 0; action < NR_DAMOS_ACTIONS; action++) {
0922 if (sysfs_streq(buf, damon_sysfs_damos_action_strs[action])) {
0923 scheme->action = action;
0924 return count;
0925 }
0926 }
0927 return -EINVAL;
0928 }
0929
0930 static void damon_sysfs_scheme_release(struct kobject *kobj)
0931 {
0932 kfree(container_of(kobj, struct damon_sysfs_scheme, kobj));
0933 }
0934
0935 static struct kobj_attribute damon_sysfs_scheme_action_attr =
0936 __ATTR_RW_MODE(action, 0600);
0937
0938 static struct attribute *damon_sysfs_scheme_attrs[] = {
0939 &damon_sysfs_scheme_action_attr.attr,
0940 NULL,
0941 };
0942 ATTRIBUTE_GROUPS(damon_sysfs_scheme);
0943
0944 static struct kobj_type damon_sysfs_scheme_ktype = {
0945 .release = damon_sysfs_scheme_release,
0946 .sysfs_ops = &kobj_sysfs_ops,
0947 .default_groups = damon_sysfs_scheme_groups,
0948 };
0949
0950
0951
0952
0953
0954 struct damon_sysfs_schemes {
0955 struct kobject kobj;
0956 struct damon_sysfs_scheme **schemes_arr;
0957 int nr;
0958 };
0959
0960 static struct damon_sysfs_schemes *damon_sysfs_schemes_alloc(void)
0961 {
0962 return kzalloc(sizeof(struct damon_sysfs_schemes), GFP_KERNEL);
0963 }
0964
0965 static void damon_sysfs_schemes_rm_dirs(struct damon_sysfs_schemes *schemes)
0966 {
0967 struct damon_sysfs_scheme **schemes_arr = schemes->schemes_arr;
0968 int i;
0969
0970 for (i = 0; i < schemes->nr; i++) {
0971 damon_sysfs_scheme_rm_dirs(schemes_arr[i]);
0972 kobject_put(&schemes_arr[i]->kobj);
0973 }
0974 schemes->nr = 0;
0975 kfree(schemes_arr);
0976 schemes->schemes_arr = NULL;
0977 }
0978
0979 static int damon_sysfs_schemes_add_dirs(struct damon_sysfs_schemes *schemes,
0980 int nr_schemes)
0981 {
0982 struct damon_sysfs_scheme **schemes_arr, *scheme;
0983 int err, i;
0984
0985 damon_sysfs_schemes_rm_dirs(schemes);
0986 if (!nr_schemes)
0987 return 0;
0988
0989 schemes_arr = kmalloc_array(nr_schemes, sizeof(*schemes_arr),
0990 GFP_KERNEL | __GFP_NOWARN);
0991 if (!schemes_arr)
0992 return -ENOMEM;
0993 schemes->schemes_arr = schemes_arr;
0994
0995 for (i = 0; i < nr_schemes; i++) {
0996 scheme = damon_sysfs_scheme_alloc(DAMOS_STAT);
0997 if (!scheme) {
0998 damon_sysfs_schemes_rm_dirs(schemes);
0999 return -ENOMEM;
1000 }
1001
1002 err = kobject_init_and_add(&scheme->kobj,
1003 &damon_sysfs_scheme_ktype, &schemes->kobj,
1004 "%d", i);
1005 if (err)
1006 goto out;
1007 err = damon_sysfs_scheme_add_dirs(scheme);
1008 if (err)
1009 goto out;
1010
1011 schemes_arr[i] = scheme;
1012 schemes->nr++;
1013 }
1014 return 0;
1015
1016 out:
1017 damon_sysfs_schemes_rm_dirs(schemes);
1018 kobject_put(&scheme->kobj);
1019 return err;
1020 }
1021
1022 static ssize_t nr_schemes_show(struct kobject *kobj,
1023 struct kobj_attribute *attr, char *buf)
1024 {
1025 struct damon_sysfs_schemes *schemes = container_of(kobj,
1026 struct damon_sysfs_schemes, kobj);
1027
1028 return sysfs_emit(buf, "%d\n", schemes->nr);
1029 }
1030
1031 static ssize_t nr_schemes_store(struct kobject *kobj,
1032 struct kobj_attribute *attr, const char *buf, size_t count)
1033 {
1034 struct damon_sysfs_schemes *schemes = container_of(kobj,
1035 struct damon_sysfs_schemes, kobj);
1036 int nr, err = kstrtoint(buf, 0, &nr);
1037
1038 if (err)
1039 return err;
1040 if (nr < 0)
1041 return -EINVAL;
1042
1043 if (!mutex_trylock(&damon_sysfs_lock))
1044 return -EBUSY;
1045 err = damon_sysfs_schemes_add_dirs(schemes, nr);
1046 mutex_unlock(&damon_sysfs_lock);
1047 if (err)
1048 return err;
1049 return count;
1050 }
1051
1052 static void damon_sysfs_schemes_release(struct kobject *kobj)
1053 {
1054 kfree(container_of(kobj, struct damon_sysfs_schemes, kobj));
1055 }
1056
1057 static struct kobj_attribute damon_sysfs_schemes_nr_attr =
1058 __ATTR_RW_MODE(nr_schemes, 0600);
1059
1060 static struct attribute *damon_sysfs_schemes_attrs[] = {
1061 &damon_sysfs_schemes_nr_attr.attr,
1062 NULL,
1063 };
1064 ATTRIBUTE_GROUPS(damon_sysfs_schemes);
1065
1066 static struct kobj_type damon_sysfs_schemes_ktype = {
1067 .release = damon_sysfs_schemes_release,
1068 .sysfs_ops = &kobj_sysfs_ops,
1069 .default_groups = damon_sysfs_schemes_groups,
1070 };
1071
1072
1073
1074
1075
1076 struct damon_sysfs_region {
1077 struct kobject kobj;
1078 unsigned long start;
1079 unsigned long end;
1080 };
1081
1082 static struct damon_sysfs_region *damon_sysfs_region_alloc(
1083 unsigned long start,
1084 unsigned long end)
1085 {
1086 struct damon_sysfs_region *region = kmalloc(sizeof(*region),
1087 GFP_KERNEL);
1088
1089 if (!region)
1090 return NULL;
1091 region->kobj = (struct kobject){};
1092 region->start = start;
1093 region->end = end;
1094 return region;
1095 }
1096
1097 static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr,
1098 char *buf)
1099 {
1100 struct damon_sysfs_region *region = container_of(kobj,
1101 struct damon_sysfs_region, kobj);
1102
1103 return sysfs_emit(buf, "%lu\n", region->start);
1104 }
1105
1106 static ssize_t start_store(struct kobject *kobj, struct kobj_attribute *attr,
1107 const char *buf, size_t count)
1108 {
1109 struct damon_sysfs_region *region = container_of(kobj,
1110 struct damon_sysfs_region, kobj);
1111 int err = kstrtoul(buf, 0, ®ion->start);
1112
1113 if (err)
1114 return -EINVAL;
1115 return count;
1116 }
1117
1118 static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr,
1119 char *buf)
1120 {
1121 struct damon_sysfs_region *region = container_of(kobj,
1122 struct damon_sysfs_region, kobj);
1123
1124 return sysfs_emit(buf, "%lu\n", region->end);
1125 }
1126
1127 static ssize_t end_store(struct kobject *kobj, struct kobj_attribute *attr,
1128 const char *buf, size_t count)
1129 {
1130 struct damon_sysfs_region *region = container_of(kobj,
1131 struct damon_sysfs_region, kobj);
1132 int err = kstrtoul(buf, 0, ®ion->end);
1133
1134 if (err)
1135 return -EINVAL;
1136 return count;
1137 }
1138
1139 static void damon_sysfs_region_release(struct kobject *kobj)
1140 {
1141 kfree(container_of(kobj, struct damon_sysfs_region, kobj));
1142 }
1143
1144 static struct kobj_attribute damon_sysfs_region_start_attr =
1145 __ATTR_RW_MODE(start, 0600);
1146
1147 static struct kobj_attribute damon_sysfs_region_end_attr =
1148 __ATTR_RW_MODE(end, 0600);
1149
1150 static struct attribute *damon_sysfs_region_attrs[] = {
1151 &damon_sysfs_region_start_attr.attr,
1152 &damon_sysfs_region_end_attr.attr,
1153 NULL,
1154 };
1155 ATTRIBUTE_GROUPS(damon_sysfs_region);
1156
1157 static struct kobj_type damon_sysfs_region_ktype = {
1158 .release = damon_sysfs_region_release,
1159 .sysfs_ops = &kobj_sysfs_ops,
1160 .default_groups = damon_sysfs_region_groups,
1161 };
1162
1163
1164
1165
1166
1167 struct damon_sysfs_regions {
1168 struct kobject kobj;
1169 struct damon_sysfs_region **regions_arr;
1170 int nr;
1171 };
1172
1173 static struct damon_sysfs_regions *damon_sysfs_regions_alloc(void)
1174 {
1175 return kzalloc(sizeof(struct damon_sysfs_regions), GFP_KERNEL);
1176 }
1177
1178 static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions *regions)
1179 {
1180 struct damon_sysfs_region **regions_arr = regions->regions_arr;
1181 int i;
1182
1183 for (i = 0; i < regions->nr; i++)
1184 kobject_put(®ions_arr[i]->kobj);
1185 regions->nr = 0;
1186 kfree(regions_arr);
1187 regions->regions_arr = NULL;
1188 }
1189
1190 static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions *regions,
1191 int nr_regions)
1192 {
1193 struct damon_sysfs_region **regions_arr, *region;
1194 int err, i;
1195
1196 damon_sysfs_regions_rm_dirs(regions);
1197 if (!nr_regions)
1198 return 0;
1199
1200 regions_arr = kmalloc_array(nr_regions, sizeof(*regions_arr),
1201 GFP_KERNEL | __GFP_NOWARN);
1202 if (!regions_arr)
1203 return -ENOMEM;
1204 regions->regions_arr = regions_arr;
1205
1206 for (i = 0; i < nr_regions; i++) {
1207 region = damon_sysfs_region_alloc(0, 0);
1208 if (!region) {
1209 damon_sysfs_regions_rm_dirs(regions);
1210 return -ENOMEM;
1211 }
1212
1213 err = kobject_init_and_add(®ion->kobj,
1214 &damon_sysfs_region_ktype, ®ions->kobj,
1215 "%d", i);
1216 if (err) {
1217 kobject_put(®ion->kobj);
1218 damon_sysfs_regions_rm_dirs(regions);
1219 return err;
1220 }
1221
1222 regions_arr[i] = region;
1223 regions->nr++;
1224 }
1225 return 0;
1226 }
1227
1228 static ssize_t nr_regions_show(struct kobject *kobj,
1229 struct kobj_attribute *attr, char *buf)
1230 {
1231 struct damon_sysfs_regions *regions = container_of(kobj,
1232 struct damon_sysfs_regions, kobj);
1233
1234 return sysfs_emit(buf, "%d\n", regions->nr);
1235 }
1236
1237 static ssize_t nr_regions_store(struct kobject *kobj,
1238 struct kobj_attribute *attr, const char *buf, size_t count)
1239 {
1240 struct damon_sysfs_regions *regions = container_of(kobj,
1241 struct damon_sysfs_regions, kobj);
1242 int nr, err = kstrtoint(buf, 0, &nr);
1243
1244 if (err)
1245 return err;
1246 if (nr < 0)
1247 return -EINVAL;
1248
1249 if (!mutex_trylock(&damon_sysfs_lock))
1250 return -EBUSY;
1251 err = damon_sysfs_regions_add_dirs(regions, nr);
1252 mutex_unlock(&damon_sysfs_lock);
1253 if (err)
1254 return err;
1255
1256 return count;
1257 }
1258
1259 static void damon_sysfs_regions_release(struct kobject *kobj)
1260 {
1261 kfree(container_of(kobj, struct damon_sysfs_regions, kobj));
1262 }
1263
1264 static struct kobj_attribute damon_sysfs_regions_nr_attr =
1265 __ATTR_RW_MODE(nr_regions, 0600);
1266
1267 static struct attribute *damon_sysfs_regions_attrs[] = {
1268 &damon_sysfs_regions_nr_attr.attr,
1269 NULL,
1270 };
1271 ATTRIBUTE_GROUPS(damon_sysfs_regions);
1272
1273 static struct kobj_type damon_sysfs_regions_ktype = {
1274 .release = damon_sysfs_regions_release,
1275 .sysfs_ops = &kobj_sysfs_ops,
1276 .default_groups = damon_sysfs_regions_groups,
1277 };
1278
1279
1280
1281
1282
1283 struct damon_sysfs_target {
1284 struct kobject kobj;
1285 struct damon_sysfs_regions *regions;
1286 int pid;
1287 };
1288
1289 static struct damon_sysfs_target *damon_sysfs_target_alloc(void)
1290 {
1291 return kzalloc(sizeof(struct damon_sysfs_target), GFP_KERNEL);
1292 }
1293
1294 static int damon_sysfs_target_add_dirs(struct damon_sysfs_target *target)
1295 {
1296 struct damon_sysfs_regions *regions = damon_sysfs_regions_alloc();
1297 int err;
1298
1299 if (!regions)
1300 return -ENOMEM;
1301
1302 err = kobject_init_and_add(®ions->kobj, &damon_sysfs_regions_ktype,
1303 &target->kobj, "regions");
1304 if (err)
1305 kobject_put(®ions->kobj);
1306 else
1307 target->regions = regions;
1308 return err;
1309 }
1310
1311 static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target *target)
1312 {
1313 damon_sysfs_regions_rm_dirs(target->regions);
1314 kobject_put(&target->regions->kobj);
1315 }
1316
1317 static ssize_t pid_target_show(struct kobject *kobj,
1318 struct kobj_attribute *attr, char *buf)
1319 {
1320 struct damon_sysfs_target *target = container_of(kobj,
1321 struct damon_sysfs_target, kobj);
1322
1323 return sysfs_emit(buf, "%d\n", target->pid);
1324 }
1325
1326 static ssize_t pid_target_store(struct kobject *kobj,
1327 struct kobj_attribute *attr, const char *buf, size_t count)
1328 {
1329 struct damon_sysfs_target *target = container_of(kobj,
1330 struct damon_sysfs_target, kobj);
1331 int err = kstrtoint(buf, 0, &target->pid);
1332
1333 if (err)
1334 return -EINVAL;
1335 return count;
1336 }
1337
1338 static void damon_sysfs_target_release(struct kobject *kobj)
1339 {
1340 kfree(container_of(kobj, struct damon_sysfs_target, kobj));
1341 }
1342
1343 static struct kobj_attribute damon_sysfs_target_pid_attr =
1344 __ATTR_RW_MODE(pid_target, 0600);
1345
1346 static struct attribute *damon_sysfs_target_attrs[] = {
1347 &damon_sysfs_target_pid_attr.attr,
1348 NULL,
1349 };
1350 ATTRIBUTE_GROUPS(damon_sysfs_target);
1351
1352 static struct kobj_type damon_sysfs_target_ktype = {
1353 .release = damon_sysfs_target_release,
1354 .sysfs_ops = &kobj_sysfs_ops,
1355 .default_groups = damon_sysfs_target_groups,
1356 };
1357
1358
1359
1360
1361
1362 struct damon_sysfs_targets {
1363 struct kobject kobj;
1364 struct damon_sysfs_target **targets_arr;
1365 int nr;
1366 };
1367
1368 static struct damon_sysfs_targets *damon_sysfs_targets_alloc(void)
1369 {
1370 return kzalloc(sizeof(struct damon_sysfs_targets), GFP_KERNEL);
1371 }
1372
1373 static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets *targets)
1374 {
1375 struct damon_sysfs_target **targets_arr = targets->targets_arr;
1376 int i;
1377
1378 for (i = 0; i < targets->nr; i++) {
1379 damon_sysfs_target_rm_dirs(targets_arr[i]);
1380 kobject_put(&targets_arr[i]->kobj);
1381 }
1382 targets->nr = 0;
1383 kfree(targets_arr);
1384 targets->targets_arr = NULL;
1385 }
1386
1387 static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets *targets,
1388 int nr_targets)
1389 {
1390 struct damon_sysfs_target **targets_arr, *target;
1391 int err, i;
1392
1393 damon_sysfs_targets_rm_dirs(targets);
1394 if (!nr_targets)
1395 return 0;
1396
1397 targets_arr = kmalloc_array(nr_targets, sizeof(*targets_arr),
1398 GFP_KERNEL | __GFP_NOWARN);
1399 if (!targets_arr)
1400 return -ENOMEM;
1401 targets->targets_arr = targets_arr;
1402
1403 for (i = 0; i < nr_targets; i++) {
1404 target = damon_sysfs_target_alloc();
1405 if (!target) {
1406 damon_sysfs_targets_rm_dirs(targets);
1407 return -ENOMEM;
1408 }
1409
1410 err = kobject_init_and_add(&target->kobj,
1411 &damon_sysfs_target_ktype, &targets->kobj,
1412 "%d", i);
1413 if (err)
1414 goto out;
1415
1416 err = damon_sysfs_target_add_dirs(target);
1417 if (err)
1418 goto out;
1419
1420 targets_arr[i] = target;
1421 targets->nr++;
1422 }
1423 return 0;
1424
1425 out:
1426 damon_sysfs_targets_rm_dirs(targets);
1427 kobject_put(&target->kobj);
1428 return err;
1429 }
1430
1431 static ssize_t nr_targets_show(struct kobject *kobj,
1432 struct kobj_attribute *attr, char *buf)
1433 {
1434 struct damon_sysfs_targets *targets = container_of(kobj,
1435 struct damon_sysfs_targets, kobj);
1436
1437 return sysfs_emit(buf, "%d\n", targets->nr);
1438 }
1439
1440 static ssize_t nr_targets_store(struct kobject *kobj,
1441 struct kobj_attribute *attr, const char *buf, size_t count)
1442 {
1443 struct damon_sysfs_targets *targets = container_of(kobj,
1444 struct damon_sysfs_targets, kobj);
1445 int nr, err = kstrtoint(buf, 0, &nr);
1446
1447 if (err)
1448 return err;
1449 if (nr < 0)
1450 return -EINVAL;
1451
1452 if (!mutex_trylock(&damon_sysfs_lock))
1453 return -EBUSY;
1454 err = damon_sysfs_targets_add_dirs(targets, nr);
1455 mutex_unlock(&damon_sysfs_lock);
1456 if (err)
1457 return err;
1458
1459 return count;
1460 }
1461
1462 static void damon_sysfs_targets_release(struct kobject *kobj)
1463 {
1464 kfree(container_of(kobj, struct damon_sysfs_targets, kobj));
1465 }
1466
1467 static struct kobj_attribute damon_sysfs_targets_nr_attr =
1468 __ATTR_RW_MODE(nr_targets, 0600);
1469
1470 static struct attribute *damon_sysfs_targets_attrs[] = {
1471 &damon_sysfs_targets_nr_attr.attr,
1472 NULL,
1473 };
1474 ATTRIBUTE_GROUPS(damon_sysfs_targets);
1475
1476 static struct kobj_type damon_sysfs_targets_ktype = {
1477 .release = damon_sysfs_targets_release,
1478 .sysfs_ops = &kobj_sysfs_ops,
1479 .default_groups = damon_sysfs_targets_groups,
1480 };
1481
1482
1483
1484
1485
1486 struct damon_sysfs_intervals {
1487 struct kobject kobj;
1488 unsigned long sample_us;
1489 unsigned long aggr_us;
1490 unsigned long update_us;
1491 };
1492
1493 static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
1494 unsigned long sample_us, unsigned long aggr_us,
1495 unsigned long update_us)
1496 {
1497 struct damon_sysfs_intervals *intervals = kmalloc(sizeof(*intervals),
1498 GFP_KERNEL);
1499
1500 if (!intervals)
1501 return NULL;
1502
1503 intervals->kobj = (struct kobject){};
1504 intervals->sample_us = sample_us;
1505 intervals->aggr_us = aggr_us;
1506 intervals->update_us = update_us;
1507 return intervals;
1508 }
1509
1510 static ssize_t sample_us_show(struct kobject *kobj,
1511 struct kobj_attribute *attr, char *buf)
1512 {
1513 struct damon_sysfs_intervals *intervals = container_of(kobj,
1514 struct damon_sysfs_intervals, kobj);
1515
1516 return sysfs_emit(buf, "%lu\n", intervals->sample_us);
1517 }
1518
1519 static ssize_t sample_us_store(struct kobject *kobj,
1520 struct kobj_attribute *attr, const char *buf, size_t count)
1521 {
1522 struct damon_sysfs_intervals *intervals = container_of(kobj,
1523 struct damon_sysfs_intervals, kobj);
1524 unsigned long us;
1525 int err = kstrtoul(buf, 0, &us);
1526
1527 if (err)
1528 return -EINVAL;
1529
1530 intervals->sample_us = us;
1531 return count;
1532 }
1533
1534 static ssize_t aggr_us_show(struct kobject *kobj, struct kobj_attribute *attr,
1535 char *buf)
1536 {
1537 struct damon_sysfs_intervals *intervals = container_of(kobj,
1538 struct damon_sysfs_intervals, kobj);
1539
1540 return sysfs_emit(buf, "%lu\n", intervals->aggr_us);
1541 }
1542
1543 static ssize_t aggr_us_store(struct kobject *kobj, struct kobj_attribute *attr,
1544 const char *buf, size_t count)
1545 {
1546 struct damon_sysfs_intervals *intervals = container_of(kobj,
1547 struct damon_sysfs_intervals, kobj);
1548 unsigned long us;
1549 int err = kstrtoul(buf, 0, &us);
1550
1551 if (err)
1552 return -EINVAL;
1553
1554 intervals->aggr_us = us;
1555 return count;
1556 }
1557
1558 static ssize_t update_us_show(struct kobject *kobj,
1559 struct kobj_attribute *attr, char *buf)
1560 {
1561 struct damon_sysfs_intervals *intervals = container_of(kobj,
1562 struct damon_sysfs_intervals, kobj);
1563
1564 return sysfs_emit(buf, "%lu\n", intervals->update_us);
1565 }
1566
1567 static ssize_t update_us_store(struct kobject *kobj,
1568 struct kobj_attribute *attr, const char *buf, size_t count)
1569 {
1570 struct damon_sysfs_intervals *intervals = container_of(kobj,
1571 struct damon_sysfs_intervals, kobj);
1572 unsigned long us;
1573 int err = kstrtoul(buf, 0, &us);
1574
1575 if (err)
1576 return -EINVAL;
1577
1578 intervals->update_us = us;
1579 return count;
1580 }
1581
1582 static void damon_sysfs_intervals_release(struct kobject *kobj)
1583 {
1584 kfree(container_of(kobj, struct damon_sysfs_intervals, kobj));
1585 }
1586
1587 static struct kobj_attribute damon_sysfs_intervals_sample_us_attr =
1588 __ATTR_RW_MODE(sample_us, 0600);
1589
1590 static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr =
1591 __ATTR_RW_MODE(aggr_us, 0600);
1592
1593 static struct kobj_attribute damon_sysfs_intervals_update_us_attr =
1594 __ATTR_RW_MODE(update_us, 0600);
1595
1596 static struct attribute *damon_sysfs_intervals_attrs[] = {
1597 &damon_sysfs_intervals_sample_us_attr.attr,
1598 &damon_sysfs_intervals_aggr_us_attr.attr,
1599 &damon_sysfs_intervals_update_us_attr.attr,
1600 NULL,
1601 };
1602 ATTRIBUTE_GROUPS(damon_sysfs_intervals);
1603
1604 static struct kobj_type damon_sysfs_intervals_ktype = {
1605 .release = damon_sysfs_intervals_release,
1606 .sysfs_ops = &kobj_sysfs_ops,
1607 .default_groups = damon_sysfs_intervals_groups,
1608 };
1609
1610
1611
1612
1613
1614 struct damon_sysfs_attrs {
1615 struct kobject kobj;
1616 struct damon_sysfs_intervals *intervals;
1617 struct damon_sysfs_ul_range *nr_regions_range;
1618 };
1619
1620 static struct damon_sysfs_attrs *damon_sysfs_attrs_alloc(void)
1621 {
1622 struct damon_sysfs_attrs *attrs = kmalloc(sizeof(*attrs), GFP_KERNEL);
1623
1624 if (!attrs)
1625 return NULL;
1626 attrs->kobj = (struct kobject){};
1627 return attrs;
1628 }
1629
1630 static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
1631 {
1632 struct damon_sysfs_intervals *intervals;
1633 struct damon_sysfs_ul_range *nr_regions_range;
1634 int err;
1635
1636 intervals = damon_sysfs_intervals_alloc(5000, 100000, 60000000);
1637 if (!intervals)
1638 return -ENOMEM;
1639
1640 err = kobject_init_and_add(&intervals->kobj,
1641 &damon_sysfs_intervals_ktype, &attrs->kobj,
1642 "intervals");
1643 if (err)
1644 goto put_intervals_out;
1645 attrs->intervals = intervals;
1646
1647 nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000);
1648 if (!nr_regions_range) {
1649 err = -ENOMEM;
1650 goto put_intervals_out;
1651 }
1652
1653 err = kobject_init_and_add(&nr_regions_range->kobj,
1654 &damon_sysfs_ul_range_ktype, &attrs->kobj,
1655 "nr_regions");
1656 if (err)
1657 goto put_nr_regions_intervals_out;
1658 attrs->nr_regions_range = nr_regions_range;
1659 return 0;
1660
1661 put_nr_regions_intervals_out:
1662 kobject_put(&nr_regions_range->kobj);
1663 attrs->nr_regions_range = NULL;
1664 put_intervals_out:
1665 kobject_put(&intervals->kobj);
1666 attrs->intervals = NULL;
1667 return err;
1668 }
1669
1670 static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs)
1671 {
1672 kobject_put(&attrs->nr_regions_range->kobj);
1673 kobject_put(&attrs->intervals->kobj);
1674 }
1675
1676 static void damon_sysfs_attrs_release(struct kobject *kobj)
1677 {
1678 kfree(container_of(kobj, struct damon_sysfs_attrs, kobj));
1679 }
1680
1681 static struct attribute *damon_sysfs_attrs_attrs[] = {
1682 NULL,
1683 };
1684 ATTRIBUTE_GROUPS(damon_sysfs_attrs);
1685
1686 static struct kobj_type damon_sysfs_attrs_ktype = {
1687 .release = damon_sysfs_attrs_release,
1688 .sysfs_ops = &kobj_sysfs_ops,
1689 .default_groups = damon_sysfs_attrs_groups,
1690 };
1691
1692
1693
1694
1695
1696
1697 static const char * const damon_sysfs_ops_strs[] = {
1698 "vaddr",
1699 "fvaddr",
1700 "paddr",
1701 };
1702
1703 struct damon_sysfs_context {
1704 struct kobject kobj;
1705 enum damon_ops_id ops_id;
1706 struct damon_sysfs_attrs *attrs;
1707 struct damon_sysfs_targets *targets;
1708 struct damon_sysfs_schemes *schemes;
1709 };
1710
1711 static struct damon_sysfs_context *damon_sysfs_context_alloc(
1712 enum damon_ops_id ops_id)
1713 {
1714 struct damon_sysfs_context *context = kmalloc(sizeof(*context),
1715 GFP_KERNEL);
1716
1717 if (!context)
1718 return NULL;
1719 context->kobj = (struct kobject){};
1720 context->ops_id = ops_id;
1721 return context;
1722 }
1723
1724 static int damon_sysfs_context_set_attrs(struct damon_sysfs_context *context)
1725 {
1726 struct damon_sysfs_attrs *attrs = damon_sysfs_attrs_alloc();
1727 int err;
1728
1729 if (!attrs)
1730 return -ENOMEM;
1731 err = kobject_init_and_add(&attrs->kobj, &damon_sysfs_attrs_ktype,
1732 &context->kobj, "monitoring_attrs");
1733 if (err)
1734 goto out;
1735 err = damon_sysfs_attrs_add_dirs(attrs);
1736 if (err)
1737 goto out;
1738 context->attrs = attrs;
1739 return 0;
1740
1741 out:
1742 kobject_put(&attrs->kobj);
1743 return err;
1744 }
1745
1746 static int damon_sysfs_context_set_targets(struct damon_sysfs_context *context)
1747 {
1748 struct damon_sysfs_targets *targets = damon_sysfs_targets_alloc();
1749 int err;
1750
1751 if (!targets)
1752 return -ENOMEM;
1753 err = kobject_init_and_add(&targets->kobj, &damon_sysfs_targets_ktype,
1754 &context->kobj, "targets");
1755 if (err) {
1756 kobject_put(&targets->kobj);
1757 return err;
1758 }
1759 context->targets = targets;
1760 return 0;
1761 }
1762
1763 static int damon_sysfs_context_set_schemes(struct damon_sysfs_context *context)
1764 {
1765 struct damon_sysfs_schemes *schemes = damon_sysfs_schemes_alloc();
1766 int err;
1767
1768 if (!schemes)
1769 return -ENOMEM;
1770 err = kobject_init_and_add(&schemes->kobj, &damon_sysfs_schemes_ktype,
1771 &context->kobj, "schemes");
1772 if (err) {
1773 kobject_put(&schemes->kobj);
1774 return err;
1775 }
1776 context->schemes = schemes;
1777 return 0;
1778 }
1779
1780 static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
1781 {
1782 int err;
1783
1784 err = damon_sysfs_context_set_attrs(context);
1785 if (err)
1786 return err;
1787
1788 err = damon_sysfs_context_set_targets(context);
1789 if (err)
1790 goto put_attrs_out;
1791
1792 err = damon_sysfs_context_set_schemes(context);
1793 if (err)
1794 goto put_targets_attrs_out;
1795 return 0;
1796
1797 put_targets_attrs_out:
1798 kobject_put(&context->targets->kobj);
1799 context->targets = NULL;
1800 put_attrs_out:
1801 kobject_put(&context->attrs->kobj);
1802 context->attrs = NULL;
1803 return err;
1804 }
1805
1806 static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context *context)
1807 {
1808 damon_sysfs_attrs_rm_dirs(context->attrs);
1809 kobject_put(&context->attrs->kobj);
1810 damon_sysfs_targets_rm_dirs(context->targets);
1811 kobject_put(&context->targets->kobj);
1812 damon_sysfs_schemes_rm_dirs(context->schemes);
1813 kobject_put(&context->schemes->kobj);
1814 }
1815
1816 static ssize_t avail_operations_show(struct kobject *kobj,
1817 struct kobj_attribute *attr, char *buf)
1818 {
1819 enum damon_ops_id id;
1820 int len = 0;
1821
1822 for (id = 0; id < NR_DAMON_OPS; id++) {
1823 if (!damon_is_registered_ops(id))
1824 continue;
1825 len += sysfs_emit_at(buf, len, "%s\n",
1826 damon_sysfs_ops_strs[id]);
1827 }
1828 return len;
1829 }
1830
1831 static ssize_t operations_show(struct kobject *kobj,
1832 struct kobj_attribute *attr, char *buf)
1833 {
1834 struct damon_sysfs_context *context = container_of(kobj,
1835 struct damon_sysfs_context, kobj);
1836
1837 return sysfs_emit(buf, "%s\n", damon_sysfs_ops_strs[context->ops_id]);
1838 }
1839
1840 static ssize_t operations_store(struct kobject *kobj,
1841 struct kobj_attribute *attr, const char *buf, size_t count)
1842 {
1843 struct damon_sysfs_context *context = container_of(kobj,
1844 struct damon_sysfs_context, kobj);
1845 enum damon_ops_id id;
1846
1847 for (id = 0; id < NR_DAMON_OPS; id++) {
1848 if (sysfs_streq(buf, damon_sysfs_ops_strs[id])) {
1849 context->ops_id = id;
1850 return count;
1851 }
1852 }
1853 return -EINVAL;
1854 }
1855
1856 static void damon_sysfs_context_release(struct kobject *kobj)
1857 {
1858 kfree(container_of(kobj, struct damon_sysfs_context, kobj));
1859 }
1860
1861 static struct kobj_attribute damon_sysfs_context_avail_operations_attr =
1862 __ATTR_RO_MODE(avail_operations, 0400);
1863
1864 static struct kobj_attribute damon_sysfs_context_operations_attr =
1865 __ATTR_RW_MODE(operations, 0600);
1866
1867 static struct attribute *damon_sysfs_context_attrs[] = {
1868 &damon_sysfs_context_avail_operations_attr.attr,
1869 &damon_sysfs_context_operations_attr.attr,
1870 NULL,
1871 };
1872 ATTRIBUTE_GROUPS(damon_sysfs_context);
1873
1874 static struct kobj_type damon_sysfs_context_ktype = {
1875 .release = damon_sysfs_context_release,
1876 .sysfs_ops = &kobj_sysfs_ops,
1877 .default_groups = damon_sysfs_context_groups,
1878 };
1879
1880
1881
1882
1883
1884 struct damon_sysfs_contexts {
1885 struct kobject kobj;
1886 struct damon_sysfs_context **contexts_arr;
1887 int nr;
1888 };
1889
1890 static struct damon_sysfs_contexts *damon_sysfs_contexts_alloc(void)
1891 {
1892 return kzalloc(sizeof(struct damon_sysfs_contexts), GFP_KERNEL);
1893 }
1894
1895 static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts *contexts)
1896 {
1897 struct damon_sysfs_context **contexts_arr = contexts->contexts_arr;
1898 int i;
1899
1900 for (i = 0; i < contexts->nr; i++) {
1901 damon_sysfs_context_rm_dirs(contexts_arr[i]);
1902 kobject_put(&contexts_arr[i]->kobj);
1903 }
1904 contexts->nr = 0;
1905 kfree(contexts_arr);
1906 contexts->contexts_arr = NULL;
1907 }
1908
1909 static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts *contexts,
1910 int nr_contexts)
1911 {
1912 struct damon_sysfs_context **contexts_arr, *context;
1913 int err, i;
1914
1915 damon_sysfs_contexts_rm_dirs(contexts);
1916 if (!nr_contexts)
1917 return 0;
1918
1919 contexts_arr = kmalloc_array(nr_contexts, sizeof(*contexts_arr),
1920 GFP_KERNEL | __GFP_NOWARN);
1921 if (!contexts_arr)
1922 return -ENOMEM;
1923 contexts->contexts_arr = contexts_arr;
1924
1925 for (i = 0; i < nr_contexts; i++) {
1926 context = damon_sysfs_context_alloc(DAMON_OPS_VADDR);
1927 if (!context) {
1928 damon_sysfs_contexts_rm_dirs(contexts);
1929 return -ENOMEM;
1930 }
1931
1932 err = kobject_init_and_add(&context->kobj,
1933 &damon_sysfs_context_ktype, &contexts->kobj,
1934 "%d", i);
1935 if (err)
1936 goto out;
1937
1938 err = damon_sysfs_context_add_dirs(context);
1939 if (err)
1940 goto out;
1941
1942 contexts_arr[i] = context;
1943 contexts->nr++;
1944 }
1945 return 0;
1946
1947 out:
1948 damon_sysfs_contexts_rm_dirs(contexts);
1949 kobject_put(&context->kobj);
1950 return err;
1951 }
1952
1953 static ssize_t nr_contexts_show(struct kobject *kobj,
1954 struct kobj_attribute *attr, char *buf)
1955 {
1956 struct damon_sysfs_contexts *contexts = container_of(kobj,
1957 struct damon_sysfs_contexts, kobj);
1958
1959 return sysfs_emit(buf, "%d\n", contexts->nr);
1960 }
1961
1962 static ssize_t nr_contexts_store(struct kobject *kobj,
1963 struct kobj_attribute *attr, const char *buf, size_t count)
1964 {
1965 struct damon_sysfs_contexts *contexts = container_of(kobj,
1966 struct damon_sysfs_contexts, kobj);
1967 int nr, err;
1968
1969 err = kstrtoint(buf, 0, &nr);
1970 if (err)
1971 return err;
1972
1973 if (nr < 0 || 1 < nr)
1974 return -EINVAL;
1975
1976 if (!mutex_trylock(&damon_sysfs_lock))
1977 return -EBUSY;
1978 err = damon_sysfs_contexts_add_dirs(contexts, nr);
1979 mutex_unlock(&damon_sysfs_lock);
1980 if (err)
1981 return err;
1982
1983 return count;
1984 }
1985
1986 static void damon_sysfs_contexts_release(struct kobject *kobj)
1987 {
1988 kfree(container_of(kobj, struct damon_sysfs_contexts, kobj));
1989 }
1990
1991 static struct kobj_attribute damon_sysfs_contexts_nr_attr
1992 = __ATTR_RW_MODE(nr_contexts, 0600);
1993
1994 static struct attribute *damon_sysfs_contexts_attrs[] = {
1995 &damon_sysfs_contexts_nr_attr.attr,
1996 NULL,
1997 };
1998 ATTRIBUTE_GROUPS(damon_sysfs_contexts);
1999
2000 static struct kobj_type damon_sysfs_contexts_ktype = {
2001 .release = damon_sysfs_contexts_release,
2002 .sysfs_ops = &kobj_sysfs_ops,
2003 .default_groups = damon_sysfs_contexts_groups,
2004 };
2005
2006
2007
2008
2009
2010 struct damon_sysfs_kdamond {
2011 struct kobject kobj;
2012 struct damon_sysfs_contexts *contexts;
2013 struct damon_ctx *damon_ctx;
2014 };
2015
2016 static struct damon_sysfs_kdamond *damon_sysfs_kdamond_alloc(void)
2017 {
2018 return kzalloc(sizeof(struct damon_sysfs_kdamond), GFP_KERNEL);
2019 }
2020
2021 static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond *kdamond)
2022 {
2023 struct damon_sysfs_contexts *contexts;
2024 int err;
2025
2026 contexts = damon_sysfs_contexts_alloc();
2027 if (!contexts)
2028 return -ENOMEM;
2029
2030 err = kobject_init_and_add(&contexts->kobj,
2031 &damon_sysfs_contexts_ktype, &kdamond->kobj,
2032 "contexts");
2033 if (err) {
2034 kobject_put(&contexts->kobj);
2035 return err;
2036 }
2037 kdamond->contexts = contexts;
2038
2039 return err;
2040 }
2041
2042 static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond *kdamond)
2043 {
2044 damon_sysfs_contexts_rm_dirs(kdamond->contexts);
2045 kobject_put(&kdamond->contexts->kobj);
2046 }
2047
2048 static bool damon_sysfs_ctx_running(struct damon_ctx *ctx)
2049 {
2050 bool running;
2051
2052 mutex_lock(&ctx->kdamond_lock);
2053 running = ctx->kdamond != NULL;
2054 mutex_unlock(&ctx->kdamond_lock);
2055 return running;
2056 }
2057
2058
2059
2060
2061 enum damon_sysfs_cmd {
2062
2063 DAMON_SYSFS_CMD_ON,
2064
2065 DAMON_SYSFS_CMD_OFF,
2066
2067 DAMON_SYSFS_CMD_COMMIT,
2068
2069
2070
2071
2072 DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS,
2073
2074
2075
2076 NR_DAMON_SYSFS_CMDS,
2077 };
2078
2079
2080 static const char * const damon_sysfs_cmd_strs[] = {
2081 "on",
2082 "off",
2083 "commit",
2084 "update_schemes_stats",
2085 };
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098 struct damon_sysfs_cmd_request {
2099 enum damon_sysfs_cmd cmd;
2100 struct damon_sysfs_kdamond *kdamond;
2101 };
2102
2103
2104 static struct damon_sysfs_cmd_request damon_sysfs_cmd_request;
2105
2106 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
2107 char *buf)
2108 {
2109 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2110 struct damon_sysfs_kdamond, kobj);
2111 struct damon_ctx *ctx = kdamond->damon_ctx;
2112 bool running;
2113
2114 if (!ctx)
2115 running = false;
2116 else
2117 running = damon_sysfs_ctx_running(ctx);
2118
2119 return sysfs_emit(buf, "%s\n", running ?
2120 damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
2121 damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
2122 }
2123
2124 static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
2125 struct damon_sysfs_attrs *sys_attrs)
2126 {
2127 struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals;
2128 struct damon_sysfs_ul_range *sys_nr_regions =
2129 sys_attrs->nr_regions_range;
2130
2131 return damon_set_attrs(ctx, sys_intervals->sample_us,
2132 sys_intervals->aggr_us, sys_intervals->update_us,
2133 sys_nr_regions->min, sys_nr_regions->max);
2134 }
2135
2136 static void damon_sysfs_destroy_targets(struct damon_ctx *ctx)
2137 {
2138 struct damon_target *t, *next;
2139
2140 damon_for_each_target_safe(t, next, ctx) {
2141 if (damon_target_has_pid(ctx))
2142 put_pid(t->pid);
2143 damon_destroy_target(t);
2144 }
2145 }
2146
2147 static int damon_sysfs_set_regions(struct damon_target *t,
2148 struct damon_sysfs_regions *sysfs_regions)
2149 {
2150 struct damon_addr_range *ranges = kmalloc_array(sysfs_regions->nr,
2151 sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
2152 int i, err = -EINVAL;
2153
2154 if (!ranges)
2155 return -ENOMEM;
2156 for (i = 0; i < sysfs_regions->nr; i++) {
2157 struct damon_sysfs_region *sys_region =
2158 sysfs_regions->regions_arr[i];
2159
2160 if (sys_region->start > sys_region->end)
2161 goto out;
2162
2163 ranges[i].start = sys_region->start;
2164 ranges[i].end = sys_region->end;
2165 if (i == 0)
2166 continue;
2167 if (ranges[i - 1].end > ranges[i].start)
2168 goto out;
2169 }
2170 err = damon_set_regions(t, ranges, sysfs_regions->nr);
2171 out:
2172 kfree(ranges);
2173 return err;
2174
2175 }
2176
2177 static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target,
2178 struct damon_ctx *ctx)
2179 {
2180 struct damon_target *t = damon_new_target();
2181 int err = -EINVAL;
2182
2183 if (!t)
2184 return -ENOMEM;
2185 damon_add_target(ctx, t);
2186 if (damon_target_has_pid(ctx)) {
2187 t->pid = find_get_pid(sys_target->pid);
2188 if (!t->pid)
2189 goto destroy_targets_out;
2190 }
2191 err = damon_sysfs_set_regions(t, sys_target->regions);
2192 if (err)
2193 goto destroy_targets_out;
2194 return 0;
2195
2196 destroy_targets_out:
2197 damon_sysfs_destroy_targets(ctx);
2198 return err;
2199 }
2200
2201
2202
2203
2204
2205
2206
2207 static struct damon_target *damon_sysfs_existing_target(
2208 struct damon_sysfs_target *sys_target, struct damon_ctx *ctx)
2209 {
2210 struct pid *pid;
2211 struct damon_target *t;
2212
2213 if (!damon_target_has_pid(ctx)) {
2214
2215 damon_for_each_target(t, ctx)
2216 return t;
2217 return NULL;
2218 }
2219
2220
2221 pid = find_get_pid(sys_target->pid);
2222 if (!pid)
2223 return ERR_PTR(-EINVAL);
2224 damon_for_each_target(t, ctx) {
2225 if (t->pid == pid) {
2226 put_pid(pid);
2227 return t;
2228 }
2229 }
2230 put_pid(pid);
2231 return NULL;
2232 }
2233
2234 static int damon_sysfs_set_targets(struct damon_ctx *ctx,
2235 struct damon_sysfs_targets *sysfs_targets)
2236 {
2237 int i, err;
2238
2239
2240 if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
2241 return -EINVAL;
2242
2243 for (i = 0; i < sysfs_targets->nr; i++) {
2244 struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
2245 struct damon_target *t = damon_sysfs_existing_target(st, ctx);
2246
2247 if (IS_ERR(t))
2248 return PTR_ERR(t);
2249 if (!t)
2250 err = damon_sysfs_add_target(st, ctx);
2251 else
2252 err = damon_sysfs_set_regions(t, st->regions);
2253 if (err)
2254 return err;
2255 }
2256 return 0;
2257 }
2258
2259 static struct damos *damon_sysfs_mk_scheme(
2260 struct damon_sysfs_scheme *sysfs_scheme)
2261 {
2262 struct damon_sysfs_access_pattern *pattern =
2263 sysfs_scheme->access_pattern;
2264 struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas;
2265 struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights;
2266 struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks;
2267 struct damos_quota quota = {
2268 .ms = sysfs_quotas->ms,
2269 .sz = sysfs_quotas->sz,
2270 .reset_interval = sysfs_quotas->reset_interval_ms,
2271 .weight_sz = sysfs_weights->sz,
2272 .weight_nr_accesses = sysfs_weights->nr_accesses,
2273 .weight_age = sysfs_weights->age,
2274 };
2275 struct damos_watermarks wmarks = {
2276 .metric = sysfs_wmarks->metric,
2277 .interval = sysfs_wmarks->interval_us,
2278 .high = sysfs_wmarks->high,
2279 .mid = sysfs_wmarks->mid,
2280 .low = sysfs_wmarks->low,
2281 };
2282
2283 return damon_new_scheme(pattern->sz->min, pattern->sz->max,
2284 pattern->nr_accesses->min, pattern->nr_accesses->max,
2285 pattern->age->min, pattern->age->max,
2286 sysfs_scheme->action, "a, &wmarks);
2287 }
2288
2289 static int damon_sysfs_set_schemes(struct damon_ctx *ctx,
2290 struct damon_sysfs_schemes *sysfs_schemes)
2291 {
2292 int i;
2293
2294 for (i = 0; i < sysfs_schemes->nr; i++) {
2295 struct damos *scheme, *next;
2296
2297 scheme = damon_sysfs_mk_scheme(sysfs_schemes->schemes_arr[i]);
2298 if (!scheme) {
2299 damon_for_each_scheme_safe(scheme, next, ctx)
2300 damon_destroy_scheme(scheme);
2301 return -ENOMEM;
2302 }
2303 damon_add_scheme(ctx, scheme);
2304 }
2305 return 0;
2306 }
2307
2308 static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
2309 {
2310 struct damon_target *t, *next;
2311
2312 if (ctx->ops.id != DAMON_OPS_VADDR && ctx->ops.id != DAMON_OPS_FVADDR)
2313 return;
2314
2315 mutex_lock(&ctx->kdamond_lock);
2316 damon_for_each_target_safe(t, next, ctx) {
2317 put_pid(t->pid);
2318 damon_destroy_target(t);
2319 }
2320 mutex_unlock(&ctx->kdamond_lock);
2321 }
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332 static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond *kdamond)
2333 {
2334 struct damon_ctx *ctx = kdamond->damon_ctx;
2335 struct damon_sysfs_schemes *sysfs_schemes;
2336 struct damos *scheme;
2337 int schemes_idx = 0;
2338
2339 if (!ctx)
2340 return -EINVAL;
2341 sysfs_schemes = kdamond->contexts->contexts_arr[0]->schemes;
2342 damon_for_each_scheme(scheme, ctx) {
2343 struct damon_sysfs_stats *sysfs_stats;
2344
2345 sysfs_stats = sysfs_schemes->schemes_arr[schemes_idx++]->stats;
2346 sysfs_stats->nr_tried = scheme->stat.nr_tried;
2347 sysfs_stats->sz_tried = scheme->stat.sz_tried;
2348 sysfs_stats->nr_applied = scheme->stat.nr_applied;
2349 sysfs_stats->sz_applied = scheme->stat.sz_applied;
2350 sysfs_stats->qt_exceeds = scheme->stat.qt_exceeds;
2351 }
2352 return 0;
2353 }
2354
2355 static inline bool damon_sysfs_kdamond_running(
2356 struct damon_sysfs_kdamond *kdamond)
2357 {
2358 return kdamond->damon_ctx &&
2359 damon_sysfs_ctx_running(kdamond->damon_ctx);
2360 }
2361
2362 static int damon_sysfs_apply_inputs(struct damon_ctx *ctx,
2363 struct damon_sysfs_context *sys_ctx)
2364 {
2365 int err;
2366
2367 err = damon_select_ops(ctx, sys_ctx->ops_id);
2368 if (err)
2369 return err;
2370 err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
2371 if (err)
2372 return err;
2373 err = damon_sysfs_set_targets(ctx, sys_ctx->targets);
2374 if (err)
2375 return err;
2376 return damon_sysfs_set_schemes(ctx, sys_ctx->schemes);
2377 }
2378
2379
2380
2381
2382
2383
2384
2385 static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
2386 {
2387 if (!damon_sysfs_kdamond_running(kdamond))
2388 return -EINVAL;
2389
2390 if (kdamond->contexts->nr != 1)
2391 return -EINVAL;
2392
2393 return damon_sysfs_apply_inputs(kdamond->damon_ctx,
2394 kdamond->contexts->contexts_arr[0]);
2395 }
2396
2397
2398
2399
2400
2401
2402
2403
2404 static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
2405 {
2406 struct damon_sysfs_kdamond *kdamond;
2407 int err = 0;
2408
2409
2410 if (!mutex_trylock(&damon_sysfs_lock))
2411 return 0;
2412 kdamond = damon_sysfs_cmd_request.kdamond;
2413 if (!kdamond || kdamond->damon_ctx != c)
2414 goto out;
2415 switch (damon_sysfs_cmd_request.cmd) {
2416 case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS:
2417 err = damon_sysfs_upd_schemes_stats(kdamond);
2418 break;
2419 case DAMON_SYSFS_CMD_COMMIT:
2420 err = damon_sysfs_commit_input(kdamond);
2421 break;
2422 default:
2423 break;
2424 }
2425
2426 damon_sysfs_cmd_request.kdamond = NULL;
2427 out:
2428 mutex_unlock(&damon_sysfs_lock);
2429 return err;
2430 }
2431
2432 static struct damon_ctx *damon_sysfs_build_ctx(
2433 struct damon_sysfs_context *sys_ctx)
2434 {
2435 struct damon_ctx *ctx = damon_new_ctx();
2436 int err;
2437
2438 if (!ctx)
2439 return ERR_PTR(-ENOMEM);
2440
2441 err = damon_sysfs_apply_inputs(ctx, sys_ctx);
2442 if (err) {
2443 damon_destroy_ctx(ctx);
2444 return ERR_PTR(err);
2445 }
2446
2447 ctx->callback.after_wmarks_check = damon_sysfs_cmd_request_callback;
2448 ctx->callback.after_aggregation = damon_sysfs_cmd_request_callback;
2449 ctx->callback.before_terminate = damon_sysfs_before_terminate;
2450 return ctx;
2451 }
2452
2453 static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
2454 {
2455 struct damon_ctx *ctx;
2456 int err;
2457
2458 if (kdamond->damon_ctx &&
2459 damon_sysfs_ctx_running(kdamond->damon_ctx))
2460 return -EBUSY;
2461 if (damon_sysfs_cmd_request.kdamond == kdamond)
2462 return -EBUSY;
2463
2464 if (kdamond->contexts->nr != 1)
2465 return -EINVAL;
2466
2467 if (kdamond->damon_ctx)
2468 damon_destroy_ctx(kdamond->damon_ctx);
2469 kdamond->damon_ctx = NULL;
2470
2471 ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
2472 if (IS_ERR(ctx))
2473 return PTR_ERR(ctx);
2474 err = damon_start(&ctx, 1, false);
2475 if (err) {
2476 damon_destroy_ctx(ctx);
2477 return err;
2478 }
2479 kdamond->damon_ctx = ctx;
2480 return err;
2481 }
2482
2483 static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond)
2484 {
2485 if (!kdamond->damon_ctx)
2486 return -EINVAL;
2487 return damon_stop(&kdamond->damon_ctx, 1);
2488
2489
2490
2491
2492
2493 }
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508 static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd,
2509 struct damon_sysfs_kdamond *kdamond)
2510 {
2511 bool need_wait = true;
2512
2513
2514 switch (cmd) {
2515 case DAMON_SYSFS_CMD_ON:
2516 return damon_sysfs_turn_damon_on(kdamond);
2517 case DAMON_SYSFS_CMD_OFF:
2518 return damon_sysfs_turn_damon_off(kdamond);
2519 default:
2520 break;
2521 }
2522
2523
2524 if (damon_sysfs_cmd_request.kdamond)
2525 return -EBUSY;
2526 if (!damon_sysfs_kdamond_running(kdamond))
2527 return -EINVAL;
2528 damon_sysfs_cmd_request.cmd = cmd;
2529 damon_sysfs_cmd_request.kdamond = kdamond;
2530
2531
2532
2533
2534
2535 mutex_unlock(&damon_sysfs_lock);
2536 while (need_wait) {
2537 schedule_timeout_idle(msecs_to_jiffies(100));
2538 if (!mutex_trylock(&damon_sysfs_lock))
2539 continue;
2540 if (!damon_sysfs_cmd_request.kdamond) {
2541
2542 need_wait = false;
2543 } else if (!damon_sysfs_kdamond_running(kdamond)) {
2544
2545 need_wait = false;
2546 damon_sysfs_cmd_request.kdamond = NULL;
2547 }
2548 mutex_unlock(&damon_sysfs_lock);
2549 }
2550 mutex_lock(&damon_sysfs_lock);
2551 return 0;
2552 }
2553
2554 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
2555 const char *buf, size_t count)
2556 {
2557 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2558 struct damon_sysfs_kdamond, kobj);
2559 enum damon_sysfs_cmd cmd;
2560 ssize_t ret = -EINVAL;
2561
2562 if (!mutex_trylock(&damon_sysfs_lock))
2563 return -EBUSY;
2564 for (cmd = 0; cmd < NR_DAMON_SYSFS_CMDS; cmd++) {
2565 if (sysfs_streq(buf, damon_sysfs_cmd_strs[cmd])) {
2566 ret = damon_sysfs_handle_cmd(cmd, kdamond);
2567 break;
2568 }
2569 }
2570 mutex_unlock(&damon_sysfs_lock);
2571 if (!ret)
2572 ret = count;
2573 return ret;
2574 }
2575
2576 static ssize_t pid_show(struct kobject *kobj,
2577 struct kobj_attribute *attr, char *buf)
2578 {
2579 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2580 struct damon_sysfs_kdamond, kobj);
2581 struct damon_ctx *ctx;
2582 int pid;
2583
2584 if (!mutex_trylock(&damon_sysfs_lock))
2585 return -EBUSY;
2586 ctx = kdamond->damon_ctx;
2587 if (!ctx) {
2588 pid = -1;
2589 goto out;
2590 }
2591 mutex_lock(&ctx->kdamond_lock);
2592 if (!ctx->kdamond)
2593 pid = -1;
2594 else
2595 pid = ctx->kdamond->pid;
2596 mutex_unlock(&ctx->kdamond_lock);
2597 out:
2598 mutex_unlock(&damon_sysfs_lock);
2599 return sysfs_emit(buf, "%d\n", pid);
2600 }
2601
2602 static void damon_sysfs_kdamond_release(struct kobject *kobj)
2603 {
2604 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2605 struct damon_sysfs_kdamond, kobj);
2606
2607 if (kdamond->damon_ctx)
2608 damon_destroy_ctx(kdamond->damon_ctx);
2609 kfree(kdamond);
2610 }
2611
2612 static struct kobj_attribute damon_sysfs_kdamond_state_attr =
2613 __ATTR_RW_MODE(state, 0600);
2614
2615 static struct kobj_attribute damon_sysfs_kdamond_pid_attr =
2616 __ATTR_RO_MODE(pid, 0400);
2617
2618 static struct attribute *damon_sysfs_kdamond_attrs[] = {
2619 &damon_sysfs_kdamond_state_attr.attr,
2620 &damon_sysfs_kdamond_pid_attr.attr,
2621 NULL,
2622 };
2623 ATTRIBUTE_GROUPS(damon_sysfs_kdamond);
2624
2625 static struct kobj_type damon_sysfs_kdamond_ktype = {
2626 .release = damon_sysfs_kdamond_release,
2627 .sysfs_ops = &kobj_sysfs_ops,
2628 .default_groups = damon_sysfs_kdamond_groups,
2629 };
2630
2631
2632
2633
2634
2635 struct damon_sysfs_kdamonds {
2636 struct kobject kobj;
2637 struct damon_sysfs_kdamond **kdamonds_arr;
2638 int nr;
2639 };
2640
2641 static struct damon_sysfs_kdamonds *damon_sysfs_kdamonds_alloc(void)
2642 {
2643 return kzalloc(sizeof(struct damon_sysfs_kdamonds), GFP_KERNEL);
2644 }
2645
2646 static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds)
2647 {
2648 struct damon_sysfs_kdamond **kdamonds_arr = kdamonds->kdamonds_arr;
2649 int i;
2650
2651 for (i = 0; i < kdamonds->nr; i++) {
2652 damon_sysfs_kdamond_rm_dirs(kdamonds_arr[i]);
2653 kobject_put(&kdamonds_arr[i]->kobj);
2654 }
2655 kdamonds->nr = 0;
2656 kfree(kdamonds_arr);
2657 kdamonds->kdamonds_arr = NULL;
2658 }
2659
2660 static int damon_sysfs_nr_running_ctxs(struct damon_sysfs_kdamond **kdamonds,
2661 int nr_kdamonds)
2662 {
2663 int nr_running_ctxs = 0;
2664 int i;
2665
2666 for (i = 0; i < nr_kdamonds; i++) {
2667 struct damon_ctx *ctx = kdamonds[i]->damon_ctx;
2668
2669 if (!ctx)
2670 continue;
2671 mutex_lock(&ctx->kdamond_lock);
2672 if (ctx->kdamond)
2673 nr_running_ctxs++;
2674 mutex_unlock(&ctx->kdamond_lock);
2675 }
2676 return nr_running_ctxs;
2677 }
2678
2679 static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
2680 int nr_kdamonds)
2681 {
2682 struct damon_sysfs_kdamond **kdamonds_arr, *kdamond;
2683 int err, i;
2684
2685 if (damon_sysfs_nr_running_ctxs(kdamonds->kdamonds_arr, kdamonds->nr))
2686 return -EBUSY;
2687
2688 for (i = 0; i < kdamonds->nr; i++) {
2689 if (damon_sysfs_cmd_request.kdamond ==
2690 kdamonds->kdamonds_arr[i])
2691 return -EBUSY;
2692 }
2693
2694 damon_sysfs_kdamonds_rm_dirs(kdamonds);
2695 if (!nr_kdamonds)
2696 return 0;
2697
2698 kdamonds_arr = kmalloc_array(nr_kdamonds, sizeof(*kdamonds_arr),
2699 GFP_KERNEL | __GFP_NOWARN);
2700 if (!kdamonds_arr)
2701 return -ENOMEM;
2702 kdamonds->kdamonds_arr = kdamonds_arr;
2703
2704 for (i = 0; i < nr_kdamonds; i++) {
2705 kdamond = damon_sysfs_kdamond_alloc();
2706 if (!kdamond) {
2707 damon_sysfs_kdamonds_rm_dirs(kdamonds);
2708 return -ENOMEM;
2709 }
2710
2711 err = kobject_init_and_add(&kdamond->kobj,
2712 &damon_sysfs_kdamond_ktype, &kdamonds->kobj,
2713 "%d", i);
2714 if (err)
2715 goto out;
2716
2717 err = damon_sysfs_kdamond_add_dirs(kdamond);
2718 if (err)
2719 goto out;
2720
2721 kdamonds_arr[i] = kdamond;
2722 kdamonds->nr++;
2723 }
2724 return 0;
2725
2726 out:
2727 damon_sysfs_kdamonds_rm_dirs(kdamonds);
2728 kobject_put(&kdamond->kobj);
2729 return err;
2730 }
2731
2732 static ssize_t nr_kdamonds_show(struct kobject *kobj,
2733 struct kobj_attribute *attr, char *buf)
2734 {
2735 struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2736 struct damon_sysfs_kdamonds, kobj);
2737
2738 return sysfs_emit(buf, "%d\n", kdamonds->nr);
2739 }
2740
2741 static ssize_t nr_kdamonds_store(struct kobject *kobj,
2742 struct kobj_attribute *attr, const char *buf, size_t count)
2743 {
2744 struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2745 struct damon_sysfs_kdamonds, kobj);
2746 int nr, err;
2747
2748 err = kstrtoint(buf, 0, &nr);
2749 if (err)
2750 return err;
2751 if (nr < 0)
2752 return -EINVAL;
2753
2754 if (!mutex_trylock(&damon_sysfs_lock))
2755 return -EBUSY;
2756 err = damon_sysfs_kdamonds_add_dirs(kdamonds, nr);
2757 mutex_unlock(&damon_sysfs_lock);
2758 if (err)
2759 return err;
2760
2761 return count;
2762 }
2763
2764 static void damon_sysfs_kdamonds_release(struct kobject *kobj)
2765 {
2766 kfree(container_of(kobj, struct damon_sysfs_kdamonds, kobj));
2767 }
2768
2769 static struct kobj_attribute damon_sysfs_kdamonds_nr_attr =
2770 __ATTR_RW_MODE(nr_kdamonds, 0600);
2771
2772 static struct attribute *damon_sysfs_kdamonds_attrs[] = {
2773 &damon_sysfs_kdamonds_nr_attr.attr,
2774 NULL,
2775 };
2776 ATTRIBUTE_GROUPS(damon_sysfs_kdamonds);
2777
2778 static struct kobj_type damon_sysfs_kdamonds_ktype = {
2779 .release = damon_sysfs_kdamonds_release,
2780 .sysfs_ops = &kobj_sysfs_ops,
2781 .default_groups = damon_sysfs_kdamonds_groups,
2782 };
2783
2784
2785
2786
2787
2788 struct damon_sysfs_ui_dir {
2789 struct kobject kobj;
2790 struct damon_sysfs_kdamonds *kdamonds;
2791 };
2792
2793 static struct damon_sysfs_ui_dir *damon_sysfs_ui_dir_alloc(void)
2794 {
2795 return kzalloc(sizeof(struct damon_sysfs_ui_dir), GFP_KERNEL);
2796 }
2797
2798 static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir *ui_dir)
2799 {
2800 struct damon_sysfs_kdamonds *kdamonds;
2801 int err;
2802
2803 kdamonds = damon_sysfs_kdamonds_alloc();
2804 if (!kdamonds)
2805 return -ENOMEM;
2806
2807 err = kobject_init_and_add(&kdamonds->kobj,
2808 &damon_sysfs_kdamonds_ktype, &ui_dir->kobj,
2809 "kdamonds");
2810 if (err) {
2811 kobject_put(&kdamonds->kobj);
2812 return err;
2813 }
2814 ui_dir->kdamonds = kdamonds;
2815 return err;
2816 }
2817
2818 static void damon_sysfs_ui_dir_release(struct kobject *kobj)
2819 {
2820 kfree(container_of(kobj, struct damon_sysfs_ui_dir, kobj));
2821 }
2822
2823 static struct attribute *damon_sysfs_ui_dir_attrs[] = {
2824 NULL,
2825 };
2826 ATTRIBUTE_GROUPS(damon_sysfs_ui_dir);
2827
2828 static struct kobj_type damon_sysfs_ui_dir_ktype = {
2829 .release = damon_sysfs_ui_dir_release,
2830 .sysfs_ops = &kobj_sysfs_ops,
2831 .default_groups = damon_sysfs_ui_dir_groups,
2832 };
2833
2834 static int __init damon_sysfs_init(void)
2835 {
2836 struct kobject *damon_sysfs_root;
2837 struct damon_sysfs_ui_dir *admin;
2838 int err;
2839
2840 damon_sysfs_root = kobject_create_and_add("damon", mm_kobj);
2841 if (!damon_sysfs_root)
2842 return -ENOMEM;
2843
2844 admin = damon_sysfs_ui_dir_alloc();
2845 if (!admin) {
2846 kobject_put(damon_sysfs_root);
2847 return -ENOMEM;
2848 }
2849 err = kobject_init_and_add(&admin->kobj, &damon_sysfs_ui_dir_ktype,
2850 damon_sysfs_root, "admin");
2851 if (err)
2852 goto out;
2853 err = damon_sysfs_ui_dir_add_dirs(admin);
2854 if (err)
2855 goto out;
2856 return 0;
2857
2858 out:
2859 kobject_put(&admin->kobj);
2860 kobject_put(damon_sysfs_root);
2861 return err;
2862 }
2863 subsys_initcall(damon_sysfs_init);