0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0014
0015 #include <linux/cacheinfo.h>
0016 #include <linux/cpu.h>
0017 #include <linux/debugfs.h>
0018 #include <linux/fs.h>
0019 #include <linux/fs_parser.h>
0020 #include <linux/sysfs.h>
0021 #include <linux/kernfs.h>
0022 #include <linux/seq_buf.h>
0023 #include <linux/seq_file.h>
0024 #include <linux/sched/signal.h>
0025 #include <linux/sched/task.h>
0026 #include <linux/slab.h>
0027 #include <linux/task_work.h>
0028 #include <linux/user_namespace.h>
0029
0030 #include <uapi/linux/magic.h>
0031
0032 #include <asm/resctrl.h>
0033 #include "internal.h"
0034
0035 DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
0036 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
0037 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
0038 static struct kernfs_root *rdt_root;
0039 struct rdtgroup rdtgroup_default;
0040 LIST_HEAD(rdt_all_groups);
0041
0042
0043 LIST_HEAD(resctrl_schema_all);
0044
0045
0046 static struct kernfs_node *kn_info;
0047
0048
0049 static struct kernfs_node *kn_mongrp;
0050
0051
0052 static struct kernfs_node *kn_mondata;
0053
0054 static struct seq_buf last_cmd_status;
0055 static char last_cmd_status_buf[512];
0056
0057 struct dentry *debugfs_resctrl;
0058
0059 void rdt_last_cmd_clear(void)
0060 {
0061 lockdep_assert_held(&rdtgroup_mutex);
0062 seq_buf_clear(&last_cmd_status);
0063 }
0064
0065 void rdt_last_cmd_puts(const char *s)
0066 {
0067 lockdep_assert_held(&rdtgroup_mutex);
0068 seq_buf_puts(&last_cmd_status, s);
0069 }
0070
0071 void rdt_last_cmd_printf(const char *fmt, ...)
0072 {
0073 va_list ap;
0074
0075 va_start(ap, fmt);
0076 lockdep_assert_held(&rdtgroup_mutex);
0077 seq_buf_vprintf(&last_cmd_status, fmt, ap);
0078 va_end(ap);
0079 }
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096 static int closid_free_map;
0097 static int closid_free_map_len;
0098
0099 int closids_supported(void)
0100 {
0101 return closid_free_map_len;
0102 }
0103
0104 static void closid_init(void)
0105 {
0106 struct resctrl_schema *s;
0107 u32 rdt_min_closid = 32;
0108
0109
0110 list_for_each_entry(s, &resctrl_schema_all, list)
0111 rdt_min_closid = min(rdt_min_closid, s->num_closid);
0112
0113 closid_free_map = BIT_MASK(rdt_min_closid) - 1;
0114
0115
0116 closid_free_map &= ~1;
0117 closid_free_map_len = rdt_min_closid;
0118 }
0119
0120 static int closid_alloc(void)
0121 {
0122 u32 closid = ffs(closid_free_map);
0123
0124 if (closid == 0)
0125 return -ENOSPC;
0126 closid--;
0127 closid_free_map &= ~(1 << closid);
0128
0129 return closid;
0130 }
0131
0132 void closid_free(int closid)
0133 {
0134 closid_free_map |= 1 << closid;
0135 }
0136
0137
0138
0139
0140
0141
0142
0143
0144 static bool closid_allocated(unsigned int closid)
0145 {
0146 return (closid_free_map & (1 << closid)) == 0;
0147 }
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid)
0159 {
0160 struct rdtgroup *rdtgrp;
0161
0162 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
0163 if (rdtgrp->closid == closid)
0164 return rdtgrp->mode;
0165 }
0166
0167 return RDT_NUM_MODES;
0168 }
0169
0170 static const char * const rdt_mode_str[] = {
0171 [RDT_MODE_SHAREABLE] = "shareable",
0172 [RDT_MODE_EXCLUSIVE] = "exclusive",
0173 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup",
0174 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked",
0175 };
0176
0177
0178
0179
0180
0181
0182
0183 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode)
0184 {
0185 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES)
0186 return "unknown";
0187
0188 return rdt_mode_str[mode];
0189 }
0190
0191
0192 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
0193 {
0194 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
0195 .ia_uid = current_fsuid(),
0196 .ia_gid = current_fsgid(), };
0197
0198 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
0199 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
0200 return 0;
0201
0202 return kernfs_setattr(kn, &iattr);
0203 }
0204
0205 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
0206 {
0207 struct kernfs_node *kn;
0208 int ret;
0209
0210 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
0211 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
0212 0, rft->kf_ops, rft, NULL, NULL);
0213 if (IS_ERR(kn))
0214 return PTR_ERR(kn);
0215
0216 ret = rdtgroup_kn_set_ugid(kn);
0217 if (ret) {
0218 kernfs_remove(kn);
0219 return ret;
0220 }
0221
0222 return 0;
0223 }
0224
0225 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
0226 {
0227 struct kernfs_open_file *of = m->private;
0228 struct rftype *rft = of->kn->priv;
0229
0230 if (rft->seq_show)
0231 return rft->seq_show(of, m, arg);
0232 return 0;
0233 }
0234
0235 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
0236 size_t nbytes, loff_t off)
0237 {
0238 struct rftype *rft = of->kn->priv;
0239
0240 if (rft->write)
0241 return rft->write(of, buf, nbytes, off);
0242
0243 return -EINVAL;
0244 }
0245
0246 static const struct kernfs_ops rdtgroup_kf_single_ops = {
0247 .atomic_write_len = PAGE_SIZE,
0248 .write = rdtgroup_file_write,
0249 .seq_show = rdtgroup_seqfile_show,
0250 };
0251
0252 static const struct kernfs_ops kf_mondata_ops = {
0253 .atomic_write_len = PAGE_SIZE,
0254 .seq_show = rdtgroup_mondata_show,
0255 };
0256
0257 static bool is_cpu_list(struct kernfs_open_file *of)
0258 {
0259 struct rftype *rft = of->kn->priv;
0260
0261 return rft->flags & RFTYPE_FLAGS_CPUS_LIST;
0262 }
0263
0264 static int rdtgroup_cpus_show(struct kernfs_open_file *of,
0265 struct seq_file *s, void *v)
0266 {
0267 struct rdtgroup *rdtgrp;
0268 struct cpumask *mask;
0269 int ret = 0;
0270
0271 rdtgrp = rdtgroup_kn_lock_live(of->kn);
0272
0273 if (rdtgrp) {
0274 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
0275 if (!rdtgrp->plr->d) {
0276 rdt_last_cmd_clear();
0277 rdt_last_cmd_puts("Cache domain offline\n");
0278 ret = -ENODEV;
0279 } else {
0280 mask = &rdtgrp->plr->d->cpu_mask;
0281 seq_printf(s, is_cpu_list(of) ?
0282 "%*pbl\n" : "%*pb\n",
0283 cpumask_pr_args(mask));
0284 }
0285 } else {
0286 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
0287 cpumask_pr_args(&rdtgrp->cpu_mask));
0288 }
0289 } else {
0290 ret = -ENOENT;
0291 }
0292 rdtgroup_kn_unlock(of->kn);
0293
0294 return ret;
0295 }
0296
0297
0298
0299
0300
0301
0302
0303 static void update_cpu_closid_rmid(void *info)
0304 {
0305 struct rdtgroup *r = info;
0306
0307 if (r) {
0308 this_cpu_write(pqr_state.default_closid, r->closid);
0309 this_cpu_write(pqr_state.default_rmid, r->mon.rmid);
0310 }
0311
0312
0313
0314
0315
0316
0317 resctrl_sched_in();
0318 }
0319
0320
0321
0322
0323
0324
0325 static void
0326 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
0327 {
0328 int cpu = get_cpu();
0329
0330 if (cpumask_test_cpu(cpu, cpu_mask))
0331 update_cpu_closid_rmid(r);
0332 smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1);
0333 put_cpu();
0334 }
0335
0336 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
0337 cpumask_var_t tmpmask)
0338 {
0339 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp;
0340 struct list_head *head;
0341
0342
0343 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
0344 if (!cpumask_empty(tmpmask)) {
0345 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
0346 return -EINVAL;
0347 }
0348
0349
0350 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
0351 if (!cpumask_empty(tmpmask)) {
0352
0353 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
0354 update_closid_rmid(tmpmask, prgrp);
0355 }
0356
0357
0358
0359
0360
0361 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
0362 if (!cpumask_empty(tmpmask)) {
0363 head = &prgrp->mon.crdtgrp_list;
0364 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
0365 if (crgrp == rdtgrp)
0366 continue;
0367 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask,
0368 tmpmask);
0369 }
0370 update_closid_rmid(tmpmask, rdtgrp);
0371 }
0372
0373
0374 cpumask_copy(&rdtgrp->cpu_mask, newmask);
0375
0376 return 0;
0377 }
0378
0379 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
0380 {
0381 struct rdtgroup *crgrp;
0382
0383 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m);
0384
0385 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list)
0386 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask);
0387 }
0388
0389 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
0390 cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
0391 {
0392 struct rdtgroup *r, *crgrp;
0393 struct list_head *head;
0394
0395
0396 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
0397 if (!cpumask_empty(tmpmask)) {
0398
0399 if (rdtgrp == &rdtgroup_default) {
0400 rdt_last_cmd_puts("Can't drop CPUs from default group\n");
0401 return -EINVAL;
0402 }
0403
0404
0405 cpumask_or(&rdtgroup_default.cpu_mask,
0406 &rdtgroup_default.cpu_mask, tmpmask);
0407 update_closid_rmid(tmpmask, &rdtgroup_default);
0408 }
0409
0410
0411
0412
0413
0414
0415 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
0416 if (!cpumask_empty(tmpmask)) {
0417 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
0418 if (r == rdtgrp)
0419 continue;
0420 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
0421 if (!cpumask_empty(tmpmask1))
0422 cpumask_rdtgrp_clear(r, tmpmask1);
0423 }
0424 update_closid_rmid(tmpmask, rdtgrp);
0425 }
0426
0427
0428 cpumask_copy(&rdtgrp->cpu_mask, newmask);
0429
0430
0431
0432
0433
0434 head = &rdtgrp->mon.crdtgrp_list;
0435 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
0436 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask);
0437 update_closid_rmid(tmpmask, rdtgrp);
0438 cpumask_clear(&crgrp->cpu_mask);
0439 }
0440
0441 return 0;
0442 }
0443
0444 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
0445 char *buf, size_t nbytes, loff_t off)
0446 {
0447 cpumask_var_t tmpmask, newmask, tmpmask1;
0448 struct rdtgroup *rdtgrp;
0449 int ret;
0450
0451 if (!buf)
0452 return -EINVAL;
0453
0454 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
0455 return -ENOMEM;
0456 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
0457 free_cpumask_var(tmpmask);
0458 return -ENOMEM;
0459 }
0460 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) {
0461 free_cpumask_var(tmpmask);
0462 free_cpumask_var(newmask);
0463 return -ENOMEM;
0464 }
0465
0466 rdtgrp = rdtgroup_kn_lock_live(of->kn);
0467 if (!rdtgrp) {
0468 ret = -ENOENT;
0469 goto unlock;
0470 }
0471
0472 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
0473 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
0474 ret = -EINVAL;
0475 rdt_last_cmd_puts("Pseudo-locking in progress\n");
0476 goto unlock;
0477 }
0478
0479 if (is_cpu_list(of))
0480 ret = cpulist_parse(buf, newmask);
0481 else
0482 ret = cpumask_parse(buf, newmask);
0483
0484 if (ret) {
0485 rdt_last_cmd_puts("Bad CPU list/mask\n");
0486 goto unlock;
0487 }
0488
0489
0490 cpumask_andnot(tmpmask, newmask, cpu_online_mask);
0491 if (!cpumask_empty(tmpmask)) {
0492 ret = -EINVAL;
0493 rdt_last_cmd_puts("Can only assign online CPUs\n");
0494 goto unlock;
0495 }
0496
0497 if (rdtgrp->type == RDTCTRL_GROUP)
0498 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1);
0499 else if (rdtgrp->type == RDTMON_GROUP)
0500 ret = cpus_mon_write(rdtgrp, newmask, tmpmask);
0501 else
0502 ret = -EINVAL;
0503
0504 unlock:
0505 rdtgroup_kn_unlock(of->kn);
0506 free_cpumask_var(tmpmask);
0507 free_cpumask_var(newmask);
0508 free_cpumask_var(tmpmask1);
0509
0510 return ret ?: nbytes;
0511 }
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525 static void rdtgroup_remove(struct rdtgroup *rdtgrp)
0526 {
0527 kernfs_put(rdtgrp->kn);
0528 kfree(rdtgrp);
0529 }
0530
0531 static void _update_task_closid_rmid(void *task)
0532 {
0533
0534
0535
0536
0537 if (task == current)
0538 resctrl_sched_in();
0539 }
0540
0541 static void update_task_closid_rmid(struct task_struct *t)
0542 {
0543 if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
0544 smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
0545 else
0546 _update_task_closid_rmid(t);
0547 }
0548
0549 static int __rdtgroup_move_task(struct task_struct *tsk,
0550 struct rdtgroup *rdtgrp)
0551 {
0552
0553 if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid &&
0554 tsk->rmid == rdtgrp->mon.rmid) ||
0555 (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid &&
0556 tsk->closid == rdtgrp->mon.parent->closid))
0557 return 0;
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568 if (rdtgrp->type == RDTCTRL_GROUP) {
0569 WRITE_ONCE(tsk->closid, rdtgrp->closid);
0570 WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
0571 } else if (rdtgrp->type == RDTMON_GROUP) {
0572 if (rdtgrp->mon.parent->closid == tsk->closid) {
0573 WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
0574 } else {
0575 rdt_last_cmd_puts("Can't move task to different control group\n");
0576 return -EINVAL;
0577 }
0578 }
0579
0580
0581
0582
0583
0584 barrier();
0585
0586
0587
0588
0589
0590
0591
0592 update_task_closid_rmid(tsk);
0593
0594 return 0;
0595 }
0596
0597 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
0598 {
0599 return (rdt_alloc_capable &&
0600 (r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
0601 }
0602
0603 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
0604 {
0605 return (rdt_mon_capable &&
0606 (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid));
0607 }
0608
0609
0610
0611
0612
0613
0614
0615 int rdtgroup_tasks_assigned(struct rdtgroup *r)
0616 {
0617 struct task_struct *p, *t;
0618 int ret = 0;
0619
0620 lockdep_assert_held(&rdtgroup_mutex);
0621
0622 rcu_read_lock();
0623 for_each_process_thread(p, t) {
0624 if (is_closid_match(t, r) || is_rmid_match(t, r)) {
0625 ret = 1;
0626 break;
0627 }
0628 }
0629 rcu_read_unlock();
0630
0631 return ret;
0632 }
0633
0634 static int rdtgroup_task_write_permission(struct task_struct *task,
0635 struct kernfs_open_file *of)
0636 {
0637 const struct cred *tcred = get_task_cred(task);
0638 const struct cred *cred = current_cred();
0639 int ret = 0;
0640
0641
0642
0643
0644
0645 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
0646 !uid_eq(cred->euid, tcred->uid) &&
0647 !uid_eq(cred->euid, tcred->suid)) {
0648 rdt_last_cmd_printf("No permission to move task %d\n", task->pid);
0649 ret = -EPERM;
0650 }
0651
0652 put_cred(tcred);
0653 return ret;
0654 }
0655
0656 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
0657 struct kernfs_open_file *of)
0658 {
0659 struct task_struct *tsk;
0660 int ret;
0661
0662 rcu_read_lock();
0663 if (pid) {
0664 tsk = find_task_by_vpid(pid);
0665 if (!tsk) {
0666 rcu_read_unlock();
0667 rdt_last_cmd_printf("No task %d\n", pid);
0668 return -ESRCH;
0669 }
0670 } else {
0671 tsk = current;
0672 }
0673
0674 get_task_struct(tsk);
0675 rcu_read_unlock();
0676
0677 ret = rdtgroup_task_write_permission(tsk, of);
0678 if (!ret)
0679 ret = __rdtgroup_move_task(tsk, rdtgrp);
0680
0681 put_task_struct(tsk);
0682 return ret;
0683 }
0684
0685 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
0686 char *buf, size_t nbytes, loff_t off)
0687 {
0688 struct rdtgroup *rdtgrp;
0689 int ret = 0;
0690 pid_t pid;
0691
0692 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
0693 return -EINVAL;
0694 rdtgrp = rdtgroup_kn_lock_live(of->kn);
0695 if (!rdtgrp) {
0696 rdtgroup_kn_unlock(of->kn);
0697 return -ENOENT;
0698 }
0699 rdt_last_cmd_clear();
0700
0701 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
0702 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
0703 ret = -EINVAL;
0704 rdt_last_cmd_puts("Pseudo-locking in progress\n");
0705 goto unlock;
0706 }
0707
0708 ret = rdtgroup_move_task(pid, rdtgrp, of);
0709
0710 unlock:
0711 rdtgroup_kn_unlock(of->kn);
0712
0713 return ret ?: nbytes;
0714 }
0715
0716 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
0717 {
0718 struct task_struct *p, *t;
0719
0720 rcu_read_lock();
0721 for_each_process_thread(p, t) {
0722 if (is_closid_match(t, r) || is_rmid_match(t, r))
0723 seq_printf(s, "%d\n", t->pid);
0724 }
0725 rcu_read_unlock();
0726 }
0727
0728 static int rdtgroup_tasks_show(struct kernfs_open_file *of,
0729 struct seq_file *s, void *v)
0730 {
0731 struct rdtgroup *rdtgrp;
0732 int ret = 0;
0733
0734 rdtgrp = rdtgroup_kn_lock_live(of->kn);
0735 if (rdtgrp)
0736 show_rdt_tasks(rdtgrp, s);
0737 else
0738 ret = -ENOENT;
0739 rdtgroup_kn_unlock(of->kn);
0740
0741 return ret;
0742 }
0743
0744 #ifdef CONFIG_PROC_CPU_RESCTRL
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777 int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns,
0778 struct pid *pid, struct task_struct *tsk)
0779 {
0780 struct rdtgroup *rdtg;
0781 int ret = 0;
0782
0783 mutex_lock(&rdtgroup_mutex);
0784
0785
0786 if (!static_branch_unlikely(&rdt_enable_key)) {
0787 seq_puts(s, "res:\nmon:\n");
0788 goto unlock;
0789 }
0790
0791 list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) {
0792 struct rdtgroup *crg;
0793
0794
0795
0796
0797
0798 if (rdtg->mode != RDT_MODE_SHAREABLE &&
0799 rdtg->mode != RDT_MODE_EXCLUSIVE)
0800 continue;
0801
0802 if (rdtg->closid != tsk->closid)
0803 continue;
0804
0805 seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "",
0806 rdtg->kn->name);
0807 seq_puts(s, "mon:");
0808 list_for_each_entry(crg, &rdtg->mon.crdtgrp_list,
0809 mon.crdtgrp_list) {
0810 if (tsk->rmid != crg->mon.rmid)
0811 continue;
0812 seq_printf(s, "%s", crg->kn->name);
0813 break;
0814 }
0815 seq_putc(s, '\n');
0816 goto unlock;
0817 }
0818
0819
0820
0821
0822 ret = -ENOENT;
0823 unlock:
0824 mutex_unlock(&rdtgroup_mutex);
0825
0826 return ret;
0827 }
0828 #endif
0829
0830 static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
0831 struct seq_file *seq, void *v)
0832 {
0833 int len;
0834
0835 mutex_lock(&rdtgroup_mutex);
0836 len = seq_buf_used(&last_cmd_status);
0837 if (len)
0838 seq_printf(seq, "%.*s", len, last_cmd_status_buf);
0839 else
0840 seq_puts(seq, "ok\n");
0841 mutex_unlock(&rdtgroup_mutex);
0842 return 0;
0843 }
0844
0845 static int rdt_num_closids_show(struct kernfs_open_file *of,
0846 struct seq_file *seq, void *v)
0847 {
0848 struct resctrl_schema *s = of->kn->parent->priv;
0849
0850 seq_printf(seq, "%u\n", s->num_closid);
0851 return 0;
0852 }
0853
0854 static int rdt_default_ctrl_show(struct kernfs_open_file *of,
0855 struct seq_file *seq, void *v)
0856 {
0857 struct resctrl_schema *s = of->kn->parent->priv;
0858 struct rdt_resource *r = s->res;
0859
0860 seq_printf(seq, "%x\n", r->default_ctrl);
0861 return 0;
0862 }
0863
0864 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
0865 struct seq_file *seq, void *v)
0866 {
0867 struct resctrl_schema *s = of->kn->parent->priv;
0868 struct rdt_resource *r = s->res;
0869
0870 seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
0871 return 0;
0872 }
0873
0874 static int rdt_shareable_bits_show(struct kernfs_open_file *of,
0875 struct seq_file *seq, void *v)
0876 {
0877 struct resctrl_schema *s = of->kn->parent->priv;
0878 struct rdt_resource *r = s->res;
0879
0880 seq_printf(seq, "%x\n", r->cache.shareable_bits);
0881 return 0;
0882 }
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898 static int rdt_bit_usage_show(struct kernfs_open_file *of,
0899 struct seq_file *seq, void *v)
0900 {
0901 struct resctrl_schema *s = of->kn->parent->priv;
0902
0903
0904
0905
0906 unsigned long sw_shareable = 0, hw_shareable = 0;
0907 unsigned long exclusive = 0, pseudo_locked = 0;
0908 struct rdt_resource *r = s->res;
0909 struct rdt_domain *dom;
0910 int i, hwb, swb, excl, psl;
0911 enum rdtgrp_mode mode;
0912 bool sep = false;
0913 u32 ctrl_val;
0914
0915 mutex_lock(&rdtgroup_mutex);
0916 hw_shareable = r->cache.shareable_bits;
0917 list_for_each_entry(dom, &r->domains, list) {
0918 if (sep)
0919 seq_putc(seq, ';');
0920 sw_shareable = 0;
0921 exclusive = 0;
0922 seq_printf(seq, "%d=", dom->id);
0923 for (i = 0; i < closids_supported(); i++) {
0924 if (!closid_allocated(i))
0925 continue;
0926 ctrl_val = resctrl_arch_get_config(r, dom, i,
0927 s->conf_type);
0928 mode = rdtgroup_mode_by_closid(i);
0929 switch (mode) {
0930 case RDT_MODE_SHAREABLE:
0931 sw_shareable |= ctrl_val;
0932 break;
0933 case RDT_MODE_EXCLUSIVE:
0934 exclusive |= ctrl_val;
0935 break;
0936 case RDT_MODE_PSEUDO_LOCKSETUP:
0937
0938
0939
0940
0941
0942
0943
0944 break;
0945 case RDT_MODE_PSEUDO_LOCKED:
0946 case RDT_NUM_MODES:
0947 WARN(1,
0948 "invalid mode for closid %d\n", i);
0949 break;
0950 }
0951 }
0952 for (i = r->cache.cbm_len - 1; i >= 0; i--) {
0953 pseudo_locked = dom->plr ? dom->plr->cbm : 0;
0954 hwb = test_bit(i, &hw_shareable);
0955 swb = test_bit(i, &sw_shareable);
0956 excl = test_bit(i, &exclusive);
0957 psl = test_bit(i, &pseudo_locked);
0958 if (hwb && swb)
0959 seq_putc(seq, 'X');
0960 else if (hwb && !swb)
0961 seq_putc(seq, 'H');
0962 else if (!hwb && swb)
0963 seq_putc(seq, 'S');
0964 else if (excl)
0965 seq_putc(seq, 'E');
0966 else if (psl)
0967 seq_putc(seq, 'P');
0968 else
0969 seq_putc(seq, '0');
0970 }
0971 sep = true;
0972 }
0973 seq_putc(seq, '\n');
0974 mutex_unlock(&rdtgroup_mutex);
0975 return 0;
0976 }
0977
0978 static int rdt_min_bw_show(struct kernfs_open_file *of,
0979 struct seq_file *seq, void *v)
0980 {
0981 struct resctrl_schema *s = of->kn->parent->priv;
0982 struct rdt_resource *r = s->res;
0983
0984 seq_printf(seq, "%u\n", r->membw.min_bw);
0985 return 0;
0986 }
0987
0988 static int rdt_num_rmids_show(struct kernfs_open_file *of,
0989 struct seq_file *seq, void *v)
0990 {
0991 struct rdt_resource *r = of->kn->parent->priv;
0992
0993 seq_printf(seq, "%d\n", r->num_rmid);
0994
0995 return 0;
0996 }
0997
0998 static int rdt_mon_features_show(struct kernfs_open_file *of,
0999 struct seq_file *seq, void *v)
1000 {
1001 struct rdt_resource *r = of->kn->parent->priv;
1002 struct mon_evt *mevt;
1003
1004 list_for_each_entry(mevt, &r->evt_list, list)
1005 seq_printf(seq, "%s\n", mevt->name);
1006
1007 return 0;
1008 }
1009
1010 static int rdt_bw_gran_show(struct kernfs_open_file *of,
1011 struct seq_file *seq, void *v)
1012 {
1013 struct resctrl_schema *s = of->kn->parent->priv;
1014 struct rdt_resource *r = s->res;
1015
1016 seq_printf(seq, "%u\n", r->membw.bw_gran);
1017 return 0;
1018 }
1019
1020 static int rdt_delay_linear_show(struct kernfs_open_file *of,
1021 struct seq_file *seq, void *v)
1022 {
1023 struct resctrl_schema *s = of->kn->parent->priv;
1024 struct rdt_resource *r = s->res;
1025
1026 seq_printf(seq, "%u\n", r->membw.delay_linear);
1027 return 0;
1028 }
1029
1030 static int max_threshold_occ_show(struct kernfs_open_file *of,
1031 struct seq_file *seq, void *v)
1032 {
1033 struct rdt_resource *r = of->kn->parent->priv;
1034 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
1035
1036 seq_printf(seq, "%u\n", resctrl_cqm_threshold * hw_res->mon_scale);
1037
1038 return 0;
1039 }
1040
1041 static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
1042 struct seq_file *seq, void *v)
1043 {
1044 struct resctrl_schema *s = of->kn->parent->priv;
1045 struct rdt_resource *r = s->res;
1046
1047 if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD)
1048 seq_puts(seq, "per-thread\n");
1049 else
1050 seq_puts(seq, "max\n");
1051
1052 return 0;
1053 }
1054
1055 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
1056 char *buf, size_t nbytes, loff_t off)
1057 {
1058 struct rdt_hw_resource *hw_res;
1059 unsigned int bytes;
1060 int ret;
1061
1062 ret = kstrtouint(buf, 0, &bytes);
1063 if (ret)
1064 return ret;
1065
1066 if (bytes > (boot_cpu_data.x86_cache_size * 1024))
1067 return -EINVAL;
1068
1069 hw_res = resctrl_to_arch_res(of->kn->parent->priv);
1070 resctrl_cqm_threshold = bytes / hw_res->mon_scale;
1071
1072 return nbytes;
1073 }
1074
1075
1076
1077
1078 static int rdtgroup_mode_show(struct kernfs_open_file *of,
1079 struct seq_file *s, void *v)
1080 {
1081 struct rdtgroup *rdtgrp;
1082
1083 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1084 if (!rdtgrp) {
1085 rdtgroup_kn_unlock(of->kn);
1086 return -ENOENT;
1087 }
1088
1089 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode));
1090
1091 rdtgroup_kn_unlock(of->kn);
1092 return 0;
1093 }
1094
1095 static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type)
1096 {
1097 switch (my_type) {
1098 case CDP_CODE:
1099 return CDP_DATA;
1100 case CDP_DATA:
1101 return CDP_CODE;
1102 default:
1103 case CDP_NONE:
1104 return CDP_NONE;
1105 }
1106 }
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
1129 unsigned long cbm, int closid,
1130 enum resctrl_conf_type type, bool exclusive)
1131 {
1132 enum rdtgrp_mode mode;
1133 unsigned long ctrl_b;
1134 int i;
1135
1136
1137 if (!exclusive) {
1138 ctrl_b = r->cache.shareable_bits;
1139 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
1140 return true;
1141 }
1142
1143
1144 for (i = 0; i < closids_supported(); i++) {
1145 ctrl_b = resctrl_arch_get_config(r, d, i, type);
1146 mode = rdtgroup_mode_by_closid(i);
1147 if (closid_allocated(i) && i != closid &&
1148 mode != RDT_MODE_PSEUDO_LOCKSETUP) {
1149 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
1150 if (exclusive) {
1151 if (mode == RDT_MODE_EXCLUSIVE)
1152 return true;
1153 continue;
1154 }
1155 return true;
1156 }
1157 }
1158 }
1159
1160 return false;
1161 }
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183 bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d,
1184 unsigned long cbm, int closid, bool exclusive)
1185 {
1186 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
1187 struct rdt_resource *r = s->res;
1188
1189 if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type,
1190 exclusive))
1191 return true;
1192
1193 if (!resctrl_arch_get_cdp_enabled(r->rid))
1194 return false;
1195 return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive);
1196 }
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
1211 {
1212 int closid = rdtgrp->closid;
1213 struct resctrl_schema *s;
1214 struct rdt_resource *r;
1215 bool has_cache = false;
1216 struct rdt_domain *d;
1217 u32 ctrl;
1218
1219 list_for_each_entry(s, &resctrl_schema_all, list) {
1220 r = s->res;
1221 if (r->rid == RDT_RESOURCE_MBA)
1222 continue;
1223 has_cache = true;
1224 list_for_each_entry(d, &r->domains, list) {
1225 ctrl = resctrl_arch_get_config(r, d, closid,
1226 s->conf_type);
1227 if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) {
1228 rdt_last_cmd_puts("Schemata overlaps\n");
1229 return false;
1230 }
1231 }
1232 }
1233
1234 if (!has_cache) {
1235 rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n");
1236 return false;
1237 }
1238
1239 return true;
1240 }
1241
1242
1243
1244
1245
1246 static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
1247 char *buf, size_t nbytes, loff_t off)
1248 {
1249 struct rdtgroup *rdtgrp;
1250 enum rdtgrp_mode mode;
1251 int ret = 0;
1252
1253
1254 if (nbytes == 0 || buf[nbytes - 1] != '\n')
1255 return -EINVAL;
1256 buf[nbytes - 1] = '\0';
1257
1258 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1259 if (!rdtgrp) {
1260 rdtgroup_kn_unlock(of->kn);
1261 return -ENOENT;
1262 }
1263
1264 rdt_last_cmd_clear();
1265
1266 mode = rdtgrp->mode;
1267
1268 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) ||
1269 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) ||
1270 (!strcmp(buf, "pseudo-locksetup") &&
1271 mode == RDT_MODE_PSEUDO_LOCKSETUP) ||
1272 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED))
1273 goto out;
1274
1275 if (mode == RDT_MODE_PSEUDO_LOCKED) {
1276 rdt_last_cmd_puts("Cannot change pseudo-locked group\n");
1277 ret = -EINVAL;
1278 goto out;
1279 }
1280
1281 if (!strcmp(buf, "shareable")) {
1282 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1283 ret = rdtgroup_locksetup_exit(rdtgrp);
1284 if (ret)
1285 goto out;
1286 }
1287 rdtgrp->mode = RDT_MODE_SHAREABLE;
1288 } else if (!strcmp(buf, "exclusive")) {
1289 if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
1290 ret = -EINVAL;
1291 goto out;
1292 }
1293 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1294 ret = rdtgroup_locksetup_exit(rdtgrp);
1295 if (ret)
1296 goto out;
1297 }
1298 rdtgrp->mode = RDT_MODE_EXCLUSIVE;
1299 } else if (!strcmp(buf, "pseudo-locksetup")) {
1300 ret = rdtgroup_locksetup_enter(rdtgrp);
1301 if (ret)
1302 goto out;
1303 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
1304 } else {
1305 rdt_last_cmd_puts("Unknown or unsupported mode\n");
1306 ret = -EINVAL;
1307 }
1308
1309 out:
1310 rdtgroup_kn_unlock(of->kn);
1311 return ret ?: nbytes;
1312 }
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
1330 struct rdt_domain *d, unsigned long cbm)
1331 {
1332 struct cpu_cacheinfo *ci;
1333 unsigned int size = 0;
1334 int num_b, i;
1335
1336 num_b = bitmap_weight(&cbm, r->cache.cbm_len);
1337 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
1338 for (i = 0; i < ci->num_leaves; i++) {
1339 if (ci->info_list[i].level == r->cache_level) {
1340 size = ci->info_list[i].size / r->cache.cbm_len * num_b;
1341 break;
1342 }
1343 }
1344
1345 return size;
1346 }
1347
1348
1349
1350
1351
1352
1353
1354
1355 static int rdtgroup_size_show(struct kernfs_open_file *of,
1356 struct seq_file *s, void *v)
1357 {
1358 struct resctrl_schema *schema;
1359 struct rdtgroup *rdtgrp;
1360 struct rdt_resource *r;
1361 struct rdt_domain *d;
1362 unsigned int size;
1363 int ret = 0;
1364 bool sep;
1365 u32 ctrl;
1366
1367 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1368 if (!rdtgrp) {
1369 rdtgroup_kn_unlock(of->kn);
1370 return -ENOENT;
1371 }
1372
1373 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
1374 if (!rdtgrp->plr->d) {
1375 rdt_last_cmd_clear();
1376 rdt_last_cmd_puts("Cache domain offline\n");
1377 ret = -ENODEV;
1378 } else {
1379 seq_printf(s, "%*s:", max_name_width,
1380 rdtgrp->plr->s->name);
1381 size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res,
1382 rdtgrp->plr->d,
1383 rdtgrp->plr->cbm);
1384 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
1385 }
1386 goto out;
1387 }
1388
1389 list_for_each_entry(schema, &resctrl_schema_all, list) {
1390 r = schema->res;
1391 sep = false;
1392 seq_printf(s, "%*s:", max_name_width, schema->name);
1393 list_for_each_entry(d, &r->domains, list) {
1394 if (sep)
1395 seq_putc(s, ';');
1396 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1397 size = 0;
1398 } else {
1399 ctrl = resctrl_arch_get_config(r, d,
1400 rdtgrp->closid,
1401 schema->conf_type);
1402 if (r->rid == RDT_RESOURCE_MBA)
1403 size = ctrl;
1404 else
1405 size = rdtgroup_cbm_to_size(r, d, ctrl);
1406 }
1407 seq_printf(s, "%d=%u", d->id, size);
1408 sep = true;
1409 }
1410 seq_putc(s, '\n');
1411 }
1412
1413 out:
1414 rdtgroup_kn_unlock(of->kn);
1415
1416 return ret;
1417 }
1418
1419
1420 static struct rftype res_common_files[] = {
1421 {
1422 .name = "last_cmd_status",
1423 .mode = 0444,
1424 .kf_ops = &rdtgroup_kf_single_ops,
1425 .seq_show = rdt_last_cmd_status_show,
1426 .fflags = RF_TOP_INFO,
1427 },
1428 {
1429 .name = "num_closids",
1430 .mode = 0444,
1431 .kf_ops = &rdtgroup_kf_single_ops,
1432 .seq_show = rdt_num_closids_show,
1433 .fflags = RF_CTRL_INFO,
1434 },
1435 {
1436 .name = "mon_features",
1437 .mode = 0444,
1438 .kf_ops = &rdtgroup_kf_single_ops,
1439 .seq_show = rdt_mon_features_show,
1440 .fflags = RF_MON_INFO,
1441 },
1442 {
1443 .name = "num_rmids",
1444 .mode = 0444,
1445 .kf_ops = &rdtgroup_kf_single_ops,
1446 .seq_show = rdt_num_rmids_show,
1447 .fflags = RF_MON_INFO,
1448 },
1449 {
1450 .name = "cbm_mask",
1451 .mode = 0444,
1452 .kf_ops = &rdtgroup_kf_single_ops,
1453 .seq_show = rdt_default_ctrl_show,
1454 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1455 },
1456 {
1457 .name = "min_cbm_bits",
1458 .mode = 0444,
1459 .kf_ops = &rdtgroup_kf_single_ops,
1460 .seq_show = rdt_min_cbm_bits_show,
1461 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1462 },
1463 {
1464 .name = "shareable_bits",
1465 .mode = 0444,
1466 .kf_ops = &rdtgroup_kf_single_ops,
1467 .seq_show = rdt_shareable_bits_show,
1468 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1469 },
1470 {
1471 .name = "bit_usage",
1472 .mode = 0444,
1473 .kf_ops = &rdtgroup_kf_single_ops,
1474 .seq_show = rdt_bit_usage_show,
1475 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1476 },
1477 {
1478 .name = "min_bandwidth",
1479 .mode = 0444,
1480 .kf_ops = &rdtgroup_kf_single_ops,
1481 .seq_show = rdt_min_bw_show,
1482 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1483 },
1484 {
1485 .name = "bandwidth_gran",
1486 .mode = 0444,
1487 .kf_ops = &rdtgroup_kf_single_ops,
1488 .seq_show = rdt_bw_gran_show,
1489 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1490 },
1491 {
1492 .name = "delay_linear",
1493 .mode = 0444,
1494 .kf_ops = &rdtgroup_kf_single_ops,
1495 .seq_show = rdt_delay_linear_show,
1496 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1497 },
1498
1499
1500
1501
1502
1503 {
1504 .name = "thread_throttle_mode",
1505 .mode = 0444,
1506 .kf_ops = &rdtgroup_kf_single_ops,
1507 .seq_show = rdt_thread_throttle_mode_show,
1508 },
1509 {
1510 .name = "max_threshold_occupancy",
1511 .mode = 0644,
1512 .kf_ops = &rdtgroup_kf_single_ops,
1513 .write = max_threshold_occ_write,
1514 .seq_show = max_threshold_occ_show,
1515 .fflags = RF_MON_INFO | RFTYPE_RES_CACHE,
1516 },
1517 {
1518 .name = "cpus",
1519 .mode = 0644,
1520 .kf_ops = &rdtgroup_kf_single_ops,
1521 .write = rdtgroup_cpus_write,
1522 .seq_show = rdtgroup_cpus_show,
1523 .fflags = RFTYPE_BASE,
1524 },
1525 {
1526 .name = "cpus_list",
1527 .mode = 0644,
1528 .kf_ops = &rdtgroup_kf_single_ops,
1529 .write = rdtgroup_cpus_write,
1530 .seq_show = rdtgroup_cpus_show,
1531 .flags = RFTYPE_FLAGS_CPUS_LIST,
1532 .fflags = RFTYPE_BASE,
1533 },
1534 {
1535 .name = "tasks",
1536 .mode = 0644,
1537 .kf_ops = &rdtgroup_kf_single_ops,
1538 .write = rdtgroup_tasks_write,
1539 .seq_show = rdtgroup_tasks_show,
1540 .fflags = RFTYPE_BASE,
1541 },
1542 {
1543 .name = "schemata",
1544 .mode = 0644,
1545 .kf_ops = &rdtgroup_kf_single_ops,
1546 .write = rdtgroup_schemata_write,
1547 .seq_show = rdtgroup_schemata_show,
1548 .fflags = RF_CTRL_BASE,
1549 },
1550 {
1551 .name = "mode",
1552 .mode = 0644,
1553 .kf_ops = &rdtgroup_kf_single_ops,
1554 .write = rdtgroup_mode_write,
1555 .seq_show = rdtgroup_mode_show,
1556 .fflags = RF_CTRL_BASE,
1557 },
1558 {
1559 .name = "size",
1560 .mode = 0444,
1561 .kf_ops = &rdtgroup_kf_single_ops,
1562 .seq_show = rdtgroup_size_show,
1563 .fflags = RF_CTRL_BASE,
1564 },
1565
1566 };
1567
1568 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
1569 {
1570 struct rftype *rfts, *rft;
1571 int ret, len;
1572
1573 rfts = res_common_files;
1574 len = ARRAY_SIZE(res_common_files);
1575
1576 lockdep_assert_held(&rdtgroup_mutex);
1577
1578 for (rft = rfts; rft < rfts + len; rft++) {
1579 if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) {
1580 ret = rdtgroup_add_file(kn, rft);
1581 if (ret)
1582 goto error;
1583 }
1584 }
1585
1586 return 0;
1587 error:
1588 pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
1589 while (--rft >= rfts) {
1590 if ((fflags & rft->fflags) == rft->fflags)
1591 kernfs_remove_by_name(kn, rft->name);
1592 }
1593 return ret;
1594 }
1595
1596 static struct rftype *rdtgroup_get_rftype_by_name(const char *name)
1597 {
1598 struct rftype *rfts, *rft;
1599 int len;
1600
1601 rfts = res_common_files;
1602 len = ARRAY_SIZE(res_common_files);
1603
1604 for (rft = rfts; rft < rfts + len; rft++) {
1605 if (!strcmp(rft->name, name))
1606 return rft;
1607 }
1608
1609 return NULL;
1610 }
1611
1612 void __init thread_throttle_mode_init(void)
1613 {
1614 struct rftype *rft;
1615
1616 rft = rdtgroup_get_rftype_by_name("thread_throttle_mode");
1617 if (!rft)
1618 return;
1619
1620 rft->fflags = RF_CTRL_INFO | RFTYPE_RES_MB;
1621 }
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name)
1641 {
1642 struct iattr iattr = {.ia_valid = ATTR_MODE,};
1643 struct kernfs_node *kn;
1644 int ret = 0;
1645
1646 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
1647 if (!kn)
1648 return -ENOENT;
1649
1650 switch (kernfs_type(kn)) {
1651 case KERNFS_DIR:
1652 iattr.ia_mode = S_IFDIR;
1653 break;
1654 case KERNFS_FILE:
1655 iattr.ia_mode = S_IFREG;
1656 break;
1657 case KERNFS_LINK:
1658 iattr.ia_mode = S_IFLNK;
1659 break;
1660 }
1661
1662 ret = kernfs_setattr(kn, &iattr);
1663 kernfs_put(kn);
1664 return ret;
1665 }
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
1679 umode_t mask)
1680 {
1681 struct iattr iattr = {.ia_valid = ATTR_MODE,};
1682 struct kernfs_node *kn, *parent;
1683 struct rftype *rfts, *rft;
1684 int ret, len;
1685
1686 rfts = res_common_files;
1687 len = ARRAY_SIZE(res_common_files);
1688
1689 for (rft = rfts; rft < rfts + len; rft++) {
1690 if (!strcmp(rft->name, name))
1691 iattr.ia_mode = rft->mode & mask;
1692 }
1693
1694 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
1695 if (!kn)
1696 return -ENOENT;
1697
1698 switch (kernfs_type(kn)) {
1699 case KERNFS_DIR:
1700 parent = kernfs_get_parent(kn);
1701 if (parent) {
1702 iattr.ia_mode |= parent->mode;
1703 kernfs_put(parent);
1704 }
1705 iattr.ia_mode |= S_IFDIR;
1706 break;
1707 case KERNFS_FILE:
1708 iattr.ia_mode |= S_IFREG;
1709 break;
1710 case KERNFS_LINK:
1711 iattr.ia_mode |= S_IFLNK;
1712 break;
1713 }
1714
1715 ret = kernfs_setattr(kn, &iattr);
1716 kernfs_put(kn);
1717 return ret;
1718 }
1719
1720 static int rdtgroup_mkdir_info_resdir(void *priv, char *name,
1721 unsigned long fflags)
1722 {
1723 struct kernfs_node *kn_subdir;
1724 int ret;
1725
1726 kn_subdir = kernfs_create_dir(kn_info, name,
1727 kn_info->mode, priv);
1728 if (IS_ERR(kn_subdir))
1729 return PTR_ERR(kn_subdir);
1730
1731 ret = rdtgroup_kn_set_ugid(kn_subdir);
1732 if (ret)
1733 return ret;
1734
1735 ret = rdtgroup_add_files(kn_subdir, fflags);
1736 if (!ret)
1737 kernfs_activate(kn_subdir);
1738
1739 return ret;
1740 }
1741
1742 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
1743 {
1744 struct resctrl_schema *s;
1745 struct rdt_resource *r;
1746 unsigned long fflags;
1747 char name[32];
1748 int ret;
1749
1750
1751 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
1752 if (IS_ERR(kn_info))
1753 return PTR_ERR(kn_info);
1754
1755 ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
1756 if (ret)
1757 goto out_destroy;
1758
1759
1760 list_for_each_entry(s, &resctrl_schema_all, list) {
1761 r = s->res;
1762 fflags = r->fflags | RF_CTRL_INFO;
1763 ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags);
1764 if (ret)
1765 goto out_destroy;
1766 }
1767
1768 for_each_mon_enabled_rdt_resource(r) {
1769 fflags = r->fflags | RF_MON_INFO;
1770 sprintf(name, "%s_MON", r->name);
1771 ret = rdtgroup_mkdir_info_resdir(r, name, fflags);
1772 if (ret)
1773 goto out_destroy;
1774 }
1775
1776 ret = rdtgroup_kn_set_ugid(kn_info);
1777 if (ret)
1778 goto out_destroy;
1779
1780 kernfs_activate(kn_info);
1781
1782 return 0;
1783
1784 out_destroy:
1785 kernfs_remove(kn_info);
1786 return ret;
1787 }
1788
1789 static int
1790 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
1791 char *name, struct kernfs_node **dest_kn)
1792 {
1793 struct kernfs_node *kn;
1794 int ret;
1795
1796
1797 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
1798 if (IS_ERR(kn))
1799 return PTR_ERR(kn);
1800
1801 if (dest_kn)
1802 *dest_kn = kn;
1803
1804 ret = rdtgroup_kn_set_ugid(kn);
1805 if (ret)
1806 goto out_destroy;
1807
1808 kernfs_activate(kn);
1809
1810 return 0;
1811
1812 out_destroy:
1813 kernfs_remove(kn);
1814 return ret;
1815 }
1816
1817 static void l3_qos_cfg_update(void *arg)
1818 {
1819 bool *enable = arg;
1820
1821 wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
1822 }
1823
1824 static void l2_qos_cfg_update(void *arg)
1825 {
1826 bool *enable = arg;
1827
1828 wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
1829 }
1830
1831 static inline bool is_mba_linear(void)
1832 {
1833 return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.delay_linear;
1834 }
1835
1836 static int set_cache_qos_cfg(int level, bool enable)
1837 {
1838 void (*update)(void *arg);
1839 struct rdt_resource *r_l;
1840 cpumask_var_t cpu_mask;
1841 struct rdt_domain *d;
1842 int cpu;
1843
1844 if (level == RDT_RESOURCE_L3)
1845 update = l3_qos_cfg_update;
1846 else if (level == RDT_RESOURCE_L2)
1847 update = l2_qos_cfg_update;
1848 else
1849 return -EINVAL;
1850
1851 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
1852 return -ENOMEM;
1853
1854 r_l = &rdt_resources_all[level].r_resctrl;
1855 list_for_each_entry(d, &r_l->domains, list) {
1856 if (r_l->cache.arch_has_per_cpu_cfg)
1857
1858 for_each_cpu(cpu, &d->cpu_mask)
1859 cpumask_set_cpu(cpu, cpu_mask);
1860 else
1861
1862 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
1863 }
1864 cpu = get_cpu();
1865
1866 if (cpumask_test_cpu(cpu, cpu_mask))
1867 update(&enable);
1868
1869 smp_call_function_many(cpu_mask, update, &enable, 1);
1870 put_cpu();
1871
1872 free_cpumask_var(cpu_mask);
1873
1874 return 0;
1875 }
1876
1877
1878 void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
1879 {
1880 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
1881
1882 if (!r->cdp_capable)
1883 return;
1884
1885 if (r->rid == RDT_RESOURCE_L2)
1886 l2_qos_cfg_update(&hw_res->cdp_enabled);
1887
1888 if (r->rid == RDT_RESOURCE_L3)
1889 l3_qos_cfg_update(&hw_res->cdp_enabled);
1890 }
1891
1892
1893
1894
1895
1896
1897
1898 static int set_mba_sc(bool mba_sc)
1899 {
1900 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
1901 struct rdt_hw_domain *hw_dom;
1902 struct rdt_domain *d;
1903
1904 if (!is_mbm_enabled() || !is_mba_linear() ||
1905 mba_sc == is_mba_sc(r))
1906 return -EINVAL;
1907
1908 r->membw.mba_sc = mba_sc;
1909 list_for_each_entry(d, &r->domains, list) {
1910 hw_dom = resctrl_to_arch_dom(d);
1911 setup_default_ctrlval(r, hw_dom->ctrl_val, hw_dom->mbps_val);
1912 }
1913
1914 return 0;
1915 }
1916
1917 static int cdp_enable(int level)
1918 {
1919 struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl;
1920 int ret;
1921
1922 if (!r_l->alloc_capable)
1923 return -EINVAL;
1924
1925 ret = set_cache_qos_cfg(level, true);
1926 if (!ret)
1927 rdt_resources_all[level].cdp_enabled = true;
1928
1929 return ret;
1930 }
1931
1932 static void cdp_disable(int level)
1933 {
1934 struct rdt_hw_resource *r_hw = &rdt_resources_all[level];
1935
1936 if (r_hw->cdp_enabled) {
1937 set_cache_qos_cfg(level, false);
1938 r_hw->cdp_enabled = false;
1939 }
1940 }
1941
1942 int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable)
1943 {
1944 struct rdt_hw_resource *hw_res = &rdt_resources_all[l];
1945
1946 if (!hw_res->r_resctrl.cdp_capable)
1947 return -EINVAL;
1948
1949 if (enable)
1950 return cdp_enable(l);
1951
1952 cdp_disable(l);
1953
1954 return 0;
1955 }
1956
1957 static void cdp_disable_all(void)
1958 {
1959 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
1960 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
1961 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
1962 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
1963 }
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
1974 {
1975 if (kernfs_type(kn) == KERNFS_DIR) {
1976
1977
1978
1979
1980
1981
1982 if (kn == kn_info || kn->parent == kn_info)
1983 return NULL;
1984 else
1985 return kn->priv;
1986 } else {
1987 return kn->parent->priv;
1988 }
1989 }
1990
1991 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
1992 {
1993 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
1994
1995 if (!rdtgrp)
1996 return NULL;
1997
1998 atomic_inc(&rdtgrp->waitcount);
1999 kernfs_break_active_protection(kn);
2000
2001 mutex_lock(&rdtgroup_mutex);
2002
2003
2004 if (rdtgrp->flags & RDT_DELETED)
2005 return NULL;
2006
2007 return rdtgrp;
2008 }
2009
2010 void rdtgroup_kn_unlock(struct kernfs_node *kn)
2011 {
2012 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
2013
2014 if (!rdtgrp)
2015 return;
2016
2017 mutex_unlock(&rdtgroup_mutex);
2018
2019 if (atomic_dec_and_test(&rdtgrp->waitcount) &&
2020 (rdtgrp->flags & RDT_DELETED)) {
2021 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2022 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
2023 rdtgroup_pseudo_lock_remove(rdtgrp);
2024 kernfs_unbreak_active_protection(kn);
2025 rdtgroup_remove(rdtgrp);
2026 } else {
2027 kernfs_unbreak_active_protection(kn);
2028 }
2029 }
2030
2031 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
2032 struct rdtgroup *prgrp,
2033 struct kernfs_node **mon_data_kn);
2034
2035 static int rdt_enable_ctx(struct rdt_fs_context *ctx)
2036 {
2037 int ret = 0;
2038
2039 if (ctx->enable_cdpl2)
2040 ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true);
2041
2042 if (!ret && ctx->enable_cdpl3)
2043 ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true);
2044
2045 if (!ret && ctx->enable_mba_mbps)
2046 ret = set_mba_sc(true);
2047
2048 return ret;
2049 }
2050
2051 static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type)
2052 {
2053 struct resctrl_schema *s;
2054 const char *suffix = "";
2055 int ret, cl;
2056
2057 s = kzalloc(sizeof(*s), GFP_KERNEL);
2058 if (!s)
2059 return -ENOMEM;
2060
2061 s->res = r;
2062 s->num_closid = resctrl_arch_get_num_closid(r);
2063 if (resctrl_arch_get_cdp_enabled(r->rid))
2064 s->num_closid /= 2;
2065
2066 s->conf_type = type;
2067 switch (type) {
2068 case CDP_CODE:
2069 suffix = "CODE";
2070 break;
2071 case CDP_DATA:
2072 suffix = "DATA";
2073 break;
2074 case CDP_NONE:
2075 suffix = "";
2076 break;
2077 }
2078
2079 ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix);
2080 if (ret >= sizeof(s->name)) {
2081 kfree(s);
2082 return -EINVAL;
2083 }
2084
2085 cl = strlen(s->name);
2086
2087
2088
2089
2090
2091
2092 if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid))
2093 cl += 4;
2094
2095 if (cl > max_name_width)
2096 max_name_width = cl;
2097
2098 INIT_LIST_HEAD(&s->list);
2099 list_add(&s->list, &resctrl_schema_all);
2100
2101 return 0;
2102 }
2103
2104 static int schemata_list_create(void)
2105 {
2106 struct rdt_resource *r;
2107 int ret = 0;
2108
2109 for_each_alloc_enabled_rdt_resource(r) {
2110 if (resctrl_arch_get_cdp_enabled(r->rid)) {
2111 ret = schemata_list_add(r, CDP_CODE);
2112 if (ret)
2113 break;
2114
2115 ret = schemata_list_add(r, CDP_DATA);
2116 } else {
2117 ret = schemata_list_add(r, CDP_NONE);
2118 }
2119
2120 if (ret)
2121 break;
2122 }
2123
2124 return ret;
2125 }
2126
2127 static void schemata_list_destroy(void)
2128 {
2129 struct resctrl_schema *s, *tmp;
2130
2131 list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) {
2132 list_del(&s->list);
2133 kfree(s);
2134 }
2135 }
2136
2137 static int rdt_get_tree(struct fs_context *fc)
2138 {
2139 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2140 struct rdt_domain *dom;
2141 struct rdt_resource *r;
2142 int ret;
2143
2144 cpus_read_lock();
2145 mutex_lock(&rdtgroup_mutex);
2146
2147
2148
2149 if (static_branch_unlikely(&rdt_enable_key)) {
2150 ret = -EBUSY;
2151 goto out;
2152 }
2153
2154 ret = rdt_enable_ctx(ctx);
2155 if (ret < 0)
2156 goto out_cdp;
2157
2158 ret = schemata_list_create();
2159 if (ret) {
2160 schemata_list_destroy();
2161 goto out_mba;
2162 }
2163
2164 closid_init();
2165
2166 ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
2167 if (ret < 0)
2168 goto out_schemata_free;
2169
2170 if (rdt_mon_capable) {
2171 ret = mongroup_create_dir(rdtgroup_default.kn,
2172 &rdtgroup_default, "mon_groups",
2173 &kn_mongrp);
2174 if (ret < 0)
2175 goto out_info;
2176
2177 ret = mkdir_mondata_all(rdtgroup_default.kn,
2178 &rdtgroup_default, &kn_mondata);
2179 if (ret < 0)
2180 goto out_mongrp;
2181 rdtgroup_default.mon.mon_data_kn = kn_mondata;
2182 }
2183
2184 ret = rdt_pseudo_lock_init();
2185 if (ret)
2186 goto out_mondata;
2187
2188 ret = kernfs_get_tree(fc);
2189 if (ret < 0)
2190 goto out_psl;
2191
2192 if (rdt_alloc_capable)
2193 static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
2194 if (rdt_mon_capable)
2195 static_branch_enable_cpuslocked(&rdt_mon_enable_key);
2196
2197 if (rdt_alloc_capable || rdt_mon_capable)
2198 static_branch_enable_cpuslocked(&rdt_enable_key);
2199
2200 if (is_mbm_enabled()) {
2201 r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
2202 list_for_each_entry(dom, &r->domains, list)
2203 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
2204 }
2205
2206 goto out;
2207
2208 out_psl:
2209 rdt_pseudo_lock_release();
2210 out_mondata:
2211 if (rdt_mon_capable)
2212 kernfs_remove(kn_mondata);
2213 out_mongrp:
2214 if (rdt_mon_capable)
2215 kernfs_remove(kn_mongrp);
2216 out_info:
2217 kernfs_remove(kn_info);
2218 out_schemata_free:
2219 schemata_list_destroy();
2220 out_mba:
2221 if (ctx->enable_mba_mbps)
2222 set_mba_sc(false);
2223 out_cdp:
2224 cdp_disable_all();
2225 out:
2226 rdt_last_cmd_clear();
2227 mutex_unlock(&rdtgroup_mutex);
2228 cpus_read_unlock();
2229 return ret;
2230 }
2231
2232 enum rdt_param {
2233 Opt_cdp,
2234 Opt_cdpl2,
2235 Opt_mba_mbps,
2236 nr__rdt_params
2237 };
2238
2239 static const struct fs_parameter_spec rdt_fs_parameters[] = {
2240 fsparam_flag("cdp", Opt_cdp),
2241 fsparam_flag("cdpl2", Opt_cdpl2),
2242 fsparam_flag("mba_MBps", Opt_mba_mbps),
2243 {}
2244 };
2245
2246 static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
2247 {
2248 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2249 struct fs_parse_result result;
2250 int opt;
2251
2252 opt = fs_parse(fc, rdt_fs_parameters, param, &result);
2253 if (opt < 0)
2254 return opt;
2255
2256 switch (opt) {
2257 case Opt_cdp:
2258 ctx->enable_cdpl3 = true;
2259 return 0;
2260 case Opt_cdpl2:
2261 ctx->enable_cdpl2 = true;
2262 return 0;
2263 case Opt_mba_mbps:
2264 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2265 return -EINVAL;
2266 ctx->enable_mba_mbps = true;
2267 return 0;
2268 }
2269
2270 return -EINVAL;
2271 }
2272
2273 static void rdt_fs_context_free(struct fs_context *fc)
2274 {
2275 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2276
2277 kernfs_free_fs_context(fc);
2278 kfree(ctx);
2279 }
2280
2281 static const struct fs_context_operations rdt_fs_context_ops = {
2282 .free = rdt_fs_context_free,
2283 .parse_param = rdt_parse_param,
2284 .get_tree = rdt_get_tree,
2285 };
2286
2287 static int rdt_init_fs_context(struct fs_context *fc)
2288 {
2289 struct rdt_fs_context *ctx;
2290
2291 ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL);
2292 if (!ctx)
2293 return -ENOMEM;
2294
2295 ctx->kfc.root = rdt_root;
2296 ctx->kfc.magic = RDTGROUP_SUPER_MAGIC;
2297 fc->fs_private = &ctx->kfc;
2298 fc->ops = &rdt_fs_context_ops;
2299 put_user_ns(fc->user_ns);
2300 fc->user_ns = get_user_ns(&init_user_ns);
2301 fc->global = true;
2302 return 0;
2303 }
2304
2305 static int reset_all_ctrls(struct rdt_resource *r)
2306 {
2307 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
2308 struct rdt_hw_domain *hw_dom;
2309 struct msr_param msr_param;
2310 cpumask_var_t cpu_mask;
2311 struct rdt_domain *d;
2312 int i, cpu;
2313
2314 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
2315 return -ENOMEM;
2316
2317 msr_param.res = r;
2318 msr_param.low = 0;
2319 msr_param.high = hw_res->num_closid;
2320
2321
2322
2323
2324
2325
2326 list_for_each_entry(d, &r->domains, list) {
2327 hw_dom = resctrl_to_arch_dom(d);
2328 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
2329
2330 for (i = 0; i < hw_res->num_closid; i++)
2331 hw_dom->ctrl_val[i] = r->default_ctrl;
2332 }
2333 cpu = get_cpu();
2334
2335 if (cpumask_test_cpu(cpu, cpu_mask))
2336 rdt_ctrl_update(&msr_param);
2337
2338 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
2339 put_cpu();
2340
2341 free_cpumask_var(cpu_mask);
2342
2343 return 0;
2344 }
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
2355 struct cpumask *mask)
2356 {
2357 struct task_struct *p, *t;
2358
2359 read_lock(&tasklist_lock);
2360 for_each_process_thread(p, t) {
2361 if (!from || is_closid_match(t, from) ||
2362 is_rmid_match(t, from)) {
2363 WRITE_ONCE(t->closid, to->closid);
2364 WRITE_ONCE(t->rmid, to->mon.rmid);
2365
2366
2367
2368
2369
2370
2371
2372
2373 if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
2374 cpumask_set_cpu(task_cpu(t), mask);
2375 }
2376 }
2377 read_unlock(&tasklist_lock);
2378 }
2379
2380 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
2381 {
2382 struct rdtgroup *sentry, *stmp;
2383 struct list_head *head;
2384
2385 head = &rdtgrp->mon.crdtgrp_list;
2386 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
2387 free_rmid(sentry->mon.rmid);
2388 list_del(&sentry->mon.crdtgrp_list);
2389
2390 if (atomic_read(&sentry->waitcount) != 0)
2391 sentry->flags = RDT_DELETED;
2392 else
2393 rdtgroup_remove(sentry);
2394 }
2395 }
2396
2397
2398
2399
2400 static void rmdir_all_sub(void)
2401 {
2402 struct rdtgroup *rdtgrp, *tmp;
2403
2404
2405 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
2406
2407 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
2408
2409 free_all_child_rdtgrp(rdtgrp);
2410
2411
2412 if (rdtgrp == &rdtgroup_default)
2413 continue;
2414
2415 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2416 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
2417 rdtgroup_pseudo_lock_remove(rdtgrp);
2418
2419
2420
2421
2422
2423
2424 cpumask_or(&rdtgroup_default.cpu_mask,
2425 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
2426
2427 free_rmid(rdtgrp->mon.rmid);
2428
2429 kernfs_remove(rdtgrp->kn);
2430 list_del(&rdtgrp->rdtgroup_list);
2431
2432 if (atomic_read(&rdtgrp->waitcount) != 0)
2433 rdtgrp->flags = RDT_DELETED;
2434 else
2435 rdtgroup_remove(rdtgrp);
2436 }
2437
2438 update_closid_rmid(cpu_online_mask, &rdtgroup_default);
2439
2440 kernfs_remove(kn_info);
2441 kernfs_remove(kn_mongrp);
2442 kernfs_remove(kn_mondata);
2443 }
2444
2445 static void rdt_kill_sb(struct super_block *sb)
2446 {
2447 struct rdt_resource *r;
2448
2449 cpus_read_lock();
2450 mutex_lock(&rdtgroup_mutex);
2451
2452 set_mba_sc(false);
2453
2454
2455 for_each_alloc_enabled_rdt_resource(r)
2456 reset_all_ctrls(r);
2457 cdp_disable_all();
2458 rmdir_all_sub();
2459 rdt_pseudo_lock_release();
2460 rdtgroup_default.mode = RDT_MODE_SHAREABLE;
2461 schemata_list_destroy();
2462 static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
2463 static_branch_disable_cpuslocked(&rdt_mon_enable_key);
2464 static_branch_disable_cpuslocked(&rdt_enable_key);
2465 kernfs_kill_sb(sb);
2466 mutex_unlock(&rdtgroup_mutex);
2467 cpus_read_unlock();
2468 }
2469
2470 static struct file_system_type rdt_fs_type = {
2471 .name = "resctrl",
2472 .init_fs_context = rdt_init_fs_context,
2473 .parameters = rdt_fs_parameters,
2474 .kill_sb = rdt_kill_sb,
2475 };
2476
2477 static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
2478 void *priv)
2479 {
2480 struct kernfs_node *kn;
2481 int ret = 0;
2482
2483 kn = __kernfs_create_file(parent_kn, name, 0444,
2484 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0,
2485 &kf_mondata_ops, priv, NULL, NULL);
2486 if (IS_ERR(kn))
2487 return PTR_ERR(kn);
2488
2489 ret = rdtgroup_kn_set_ugid(kn);
2490 if (ret) {
2491 kernfs_remove(kn);
2492 return ret;
2493 }
2494
2495 return ret;
2496 }
2497
2498
2499
2500
2501
2502 void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int dom_id)
2503 {
2504 struct rdtgroup *prgrp, *crgrp;
2505 char name[32];
2506
2507 if (!r->mon_enabled)
2508 return;
2509
2510 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
2511 sprintf(name, "mon_%s_%02d", r->name, dom_id);
2512 kernfs_remove_by_name(prgrp->mon.mon_data_kn, name);
2513
2514 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
2515 kernfs_remove_by_name(crgrp->mon.mon_data_kn, name);
2516 }
2517 }
2518
2519 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
2520 struct rdt_domain *d,
2521 struct rdt_resource *r, struct rdtgroup *prgrp)
2522 {
2523 union mon_data_bits priv;
2524 struct kernfs_node *kn;
2525 struct mon_evt *mevt;
2526 struct rmid_read rr;
2527 char name[32];
2528 int ret;
2529
2530 sprintf(name, "mon_%s_%02d", r->name, d->id);
2531
2532 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
2533 if (IS_ERR(kn))
2534 return PTR_ERR(kn);
2535
2536 ret = rdtgroup_kn_set_ugid(kn);
2537 if (ret)
2538 goto out_destroy;
2539
2540 if (WARN_ON(list_empty(&r->evt_list))) {
2541 ret = -EPERM;
2542 goto out_destroy;
2543 }
2544
2545 priv.u.rid = r->rid;
2546 priv.u.domid = d->id;
2547 list_for_each_entry(mevt, &r->evt_list, list) {
2548 priv.u.evtid = mevt->evtid;
2549 ret = mon_addfile(kn, mevt->name, priv.priv);
2550 if (ret)
2551 goto out_destroy;
2552
2553 if (is_mbm_event(mevt->evtid))
2554 mon_event_read(&rr, r, d, prgrp, mevt->evtid, true);
2555 }
2556 kernfs_activate(kn);
2557 return 0;
2558
2559 out_destroy:
2560 kernfs_remove(kn);
2561 return ret;
2562 }
2563
2564
2565
2566
2567
2568 void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
2569 struct rdt_domain *d)
2570 {
2571 struct kernfs_node *parent_kn;
2572 struct rdtgroup *prgrp, *crgrp;
2573 struct list_head *head;
2574
2575 if (!r->mon_enabled)
2576 return;
2577
2578 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
2579 parent_kn = prgrp->mon.mon_data_kn;
2580 mkdir_mondata_subdir(parent_kn, d, r, prgrp);
2581
2582 head = &prgrp->mon.crdtgrp_list;
2583 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
2584 parent_kn = crgrp->mon.mon_data_kn;
2585 mkdir_mondata_subdir(parent_kn, d, r, crgrp);
2586 }
2587 }
2588 }
2589
2590 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
2591 struct rdt_resource *r,
2592 struct rdtgroup *prgrp)
2593 {
2594 struct rdt_domain *dom;
2595 int ret;
2596
2597 list_for_each_entry(dom, &r->domains, list) {
2598 ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp);
2599 if (ret)
2600 return ret;
2601 }
2602
2603 return 0;
2604 }
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
2624 struct rdtgroup *prgrp,
2625 struct kernfs_node **dest_kn)
2626 {
2627 struct rdt_resource *r;
2628 struct kernfs_node *kn;
2629 int ret;
2630
2631
2632
2633
2634 ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn);
2635 if (ret)
2636 return ret;
2637
2638 if (dest_kn)
2639 *dest_kn = kn;
2640
2641
2642
2643
2644
2645 for_each_mon_enabled_rdt_resource(r) {
2646 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp);
2647 if (ret)
2648 goto out_destroy;
2649 }
2650
2651 return 0;
2652
2653 out_destroy:
2654 kernfs_remove(kn);
2655 return ret;
2656 }
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673 static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r)
2674 {
2675 unsigned int cbm_len = r->cache.cbm_len;
2676 unsigned long first_bit, zero_bit;
2677 unsigned long val = _val;
2678
2679 if (!val)
2680 return 0;
2681
2682 first_bit = find_first_bit(&val, cbm_len);
2683 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
2684
2685
2686 bitmap_clear(&val, zero_bit, cbm_len - zero_bit);
2687 return (u32)val;
2688 }
2689
2690
2691
2692
2693
2694
2695
2696 static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s,
2697 u32 closid)
2698 {
2699 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
2700 enum resctrl_conf_type t = s->conf_type;
2701 struct resctrl_staged_config *cfg;
2702 struct rdt_resource *r = s->res;
2703 u32 used_b = 0, unused_b = 0;
2704 unsigned long tmp_cbm;
2705 enum rdtgrp_mode mode;
2706 u32 peer_ctl, ctrl_val;
2707 int i;
2708
2709 cfg = &d->staged_config[t];
2710 cfg->have_new_ctrl = false;
2711 cfg->new_ctrl = r->cache.shareable_bits;
2712 used_b = r->cache.shareable_bits;
2713 for (i = 0; i < closids_supported(); i++) {
2714 if (closid_allocated(i) && i != closid) {
2715 mode = rdtgroup_mode_by_closid(i);
2716 if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
2717
2718
2719
2720
2721
2722 continue;
2723
2724
2725
2726
2727
2728 if (resctrl_arch_get_cdp_enabled(r->rid))
2729 peer_ctl = resctrl_arch_get_config(r, d, i,
2730 peer_type);
2731 else
2732 peer_ctl = 0;
2733 ctrl_val = resctrl_arch_get_config(r, d, i,
2734 s->conf_type);
2735 used_b |= ctrl_val | peer_ctl;
2736 if (mode == RDT_MODE_SHAREABLE)
2737 cfg->new_ctrl |= ctrl_val | peer_ctl;
2738 }
2739 }
2740 if (d->plr && d->plr->cbm > 0)
2741 used_b |= d->plr->cbm;
2742 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
2743 unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
2744 cfg->new_ctrl |= unused_b;
2745
2746
2747
2748
2749 cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r);
2750
2751
2752
2753
2754 tmp_cbm = cfg->new_ctrl;
2755 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
2756 rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id);
2757 return -ENOSPC;
2758 }
2759 cfg->have_new_ctrl = true;
2760
2761 return 0;
2762 }
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774 static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid)
2775 {
2776 struct rdt_domain *d;
2777 int ret;
2778
2779 list_for_each_entry(d, &s->res->domains, list) {
2780 ret = __init_one_rdt_domain(d, s, closid);
2781 if (ret < 0)
2782 return ret;
2783 }
2784
2785 return 0;
2786 }
2787
2788
2789 static void rdtgroup_init_mba(struct rdt_resource *r)
2790 {
2791 struct resctrl_staged_config *cfg;
2792 struct rdt_domain *d;
2793
2794 list_for_each_entry(d, &r->domains, list) {
2795 cfg = &d->staged_config[CDP_NONE];
2796 cfg->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
2797 cfg->have_new_ctrl = true;
2798 }
2799 }
2800
2801
2802 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
2803 {
2804 struct resctrl_schema *s;
2805 struct rdt_resource *r;
2806 int ret;
2807
2808 list_for_each_entry(s, &resctrl_schema_all, list) {
2809 r = s->res;
2810 if (r->rid == RDT_RESOURCE_MBA) {
2811 rdtgroup_init_mba(r);
2812 } else {
2813 ret = rdtgroup_init_cat(s, rdtgrp->closid);
2814 if (ret < 0)
2815 return ret;
2816 }
2817
2818 ret = resctrl_arch_update_domains(r, rdtgrp->closid);
2819 if (ret < 0) {
2820 rdt_last_cmd_puts("Failed to initialize allocations\n");
2821 return ret;
2822 }
2823
2824 }
2825
2826 rdtgrp->mode = RDT_MODE_SHAREABLE;
2827
2828 return 0;
2829 }
2830
2831 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
2832 const char *name, umode_t mode,
2833 enum rdt_group_type rtype, struct rdtgroup **r)
2834 {
2835 struct rdtgroup *prdtgrp, *rdtgrp;
2836 struct kernfs_node *kn;
2837 uint files = 0;
2838 int ret;
2839
2840 prdtgrp = rdtgroup_kn_lock_live(parent_kn);
2841 if (!prdtgrp) {
2842 ret = -ENODEV;
2843 goto out_unlock;
2844 }
2845
2846 if (rtype == RDTMON_GROUP &&
2847 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2848 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
2849 ret = -EINVAL;
2850 rdt_last_cmd_puts("Pseudo-locking in progress\n");
2851 goto out_unlock;
2852 }
2853
2854
2855 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
2856 if (!rdtgrp) {
2857 ret = -ENOSPC;
2858 rdt_last_cmd_puts("Kernel out of memory\n");
2859 goto out_unlock;
2860 }
2861 *r = rdtgrp;
2862 rdtgrp->mon.parent = prdtgrp;
2863 rdtgrp->type = rtype;
2864 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list);
2865
2866
2867 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp);
2868 if (IS_ERR(kn)) {
2869 ret = PTR_ERR(kn);
2870 rdt_last_cmd_puts("kernfs create error\n");
2871 goto out_free_rgrp;
2872 }
2873 rdtgrp->kn = kn;
2874
2875
2876
2877
2878
2879
2880
2881 kernfs_get(kn);
2882
2883 ret = rdtgroup_kn_set_ugid(kn);
2884 if (ret) {
2885 rdt_last_cmd_puts("kernfs perm error\n");
2886 goto out_destroy;
2887 }
2888
2889 files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype);
2890 ret = rdtgroup_add_files(kn, files);
2891 if (ret) {
2892 rdt_last_cmd_puts("kernfs fill error\n");
2893 goto out_destroy;
2894 }
2895
2896 if (rdt_mon_capable) {
2897 ret = alloc_rmid();
2898 if (ret < 0) {
2899 rdt_last_cmd_puts("Out of RMIDs\n");
2900 goto out_destroy;
2901 }
2902 rdtgrp->mon.rmid = ret;
2903
2904 ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
2905 if (ret) {
2906 rdt_last_cmd_puts("kernfs subdir error\n");
2907 goto out_idfree;
2908 }
2909 }
2910 kernfs_activate(kn);
2911
2912
2913
2914
2915 return 0;
2916
2917 out_idfree:
2918 free_rmid(rdtgrp->mon.rmid);
2919 out_destroy:
2920 kernfs_put(rdtgrp->kn);
2921 kernfs_remove(rdtgrp->kn);
2922 out_free_rgrp:
2923 kfree(rdtgrp);
2924 out_unlock:
2925 rdtgroup_kn_unlock(parent_kn);
2926 return ret;
2927 }
2928
2929 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
2930 {
2931 kernfs_remove(rgrp->kn);
2932 free_rmid(rgrp->mon.rmid);
2933 rdtgroup_remove(rgrp);
2934 }
2935
2936
2937
2938
2939
2940
2941 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
2942 const char *name, umode_t mode)
2943 {
2944 struct rdtgroup *rdtgrp, *prgrp;
2945 int ret;
2946
2947 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp);
2948 if (ret)
2949 return ret;
2950
2951 prgrp = rdtgrp->mon.parent;
2952 rdtgrp->closid = prgrp->closid;
2953
2954
2955
2956
2957
2958 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list);
2959
2960 rdtgroup_kn_unlock(parent_kn);
2961 return ret;
2962 }
2963
2964
2965
2966
2967
2968 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
2969 const char *name, umode_t mode)
2970 {
2971 struct rdtgroup *rdtgrp;
2972 struct kernfs_node *kn;
2973 u32 closid;
2974 int ret;
2975
2976 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp);
2977 if (ret)
2978 return ret;
2979
2980 kn = rdtgrp->kn;
2981 ret = closid_alloc();
2982 if (ret < 0) {
2983 rdt_last_cmd_puts("Out of CLOSIDs\n");
2984 goto out_common_fail;
2985 }
2986 closid = ret;
2987 ret = 0;
2988
2989 rdtgrp->closid = closid;
2990 ret = rdtgroup_init_alloc(rdtgrp);
2991 if (ret < 0)
2992 goto out_id_free;
2993
2994 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
2995
2996 if (rdt_mon_capable) {
2997
2998
2999
3000
3001 ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL);
3002 if (ret) {
3003 rdt_last_cmd_puts("kernfs subdir error\n");
3004 goto out_del_list;
3005 }
3006 }
3007
3008 goto out_unlock;
3009
3010 out_del_list:
3011 list_del(&rdtgrp->rdtgroup_list);
3012 out_id_free:
3013 closid_free(closid);
3014 out_common_fail:
3015 mkdir_rdt_prepare_clean(rdtgrp);
3016 out_unlock:
3017 rdtgroup_kn_unlock(parent_kn);
3018 return ret;
3019 }
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031 static bool is_mon_groups(struct kernfs_node *kn, const char *name)
3032 {
3033 return (!strcmp(kn->name, "mon_groups") &&
3034 strcmp(name, "mon_groups"));
3035 }
3036
3037 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
3038 umode_t mode)
3039 {
3040
3041 if (strchr(name, '\n'))
3042 return -EINVAL;
3043
3044
3045
3046
3047
3048
3049 if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn)
3050 return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode);
3051
3052
3053
3054
3055
3056 if (rdt_mon_capable && is_mon_groups(parent_kn, name))
3057 return rdtgroup_mkdir_mon(parent_kn, name, mode);
3058
3059 return -EPERM;
3060 }
3061
3062 static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
3063 {
3064 struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
3065 int cpu;
3066
3067
3068 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
3069
3070
3071 for_each_cpu(cpu, &rdtgrp->cpu_mask)
3072 per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid;
3073
3074
3075
3076
3077 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
3078 update_closid_rmid(tmpmask, NULL);
3079
3080 rdtgrp->flags = RDT_DELETED;
3081 free_rmid(rdtgrp->mon.rmid);
3082
3083
3084
3085
3086 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
3087 list_del(&rdtgrp->mon.crdtgrp_list);
3088
3089 kernfs_remove(rdtgrp->kn);
3090
3091 return 0;
3092 }
3093
3094 static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp)
3095 {
3096 rdtgrp->flags = RDT_DELETED;
3097 list_del(&rdtgrp->rdtgroup_list);
3098
3099 kernfs_remove(rdtgrp->kn);
3100 return 0;
3101 }
3102
3103 static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
3104 {
3105 int cpu;
3106
3107
3108 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
3109
3110
3111 cpumask_or(&rdtgroup_default.cpu_mask,
3112 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
3113
3114
3115 for_each_cpu(cpu, &rdtgrp->cpu_mask) {
3116 per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid;
3117 per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid;
3118 }
3119
3120
3121
3122
3123
3124 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
3125 update_closid_rmid(tmpmask, NULL);
3126
3127 closid_free(rdtgrp->closid);
3128 free_rmid(rdtgrp->mon.rmid);
3129
3130 rdtgroup_ctrl_remove(rdtgrp);
3131
3132
3133
3134
3135 free_all_child_rdtgrp(rdtgrp);
3136
3137 return 0;
3138 }
3139
3140 static int rdtgroup_rmdir(struct kernfs_node *kn)
3141 {
3142 struct kernfs_node *parent_kn = kn->parent;
3143 struct rdtgroup *rdtgrp;
3144 cpumask_var_t tmpmask;
3145 int ret = 0;
3146
3147 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
3148 return -ENOMEM;
3149
3150 rdtgrp = rdtgroup_kn_lock_live(kn);
3151 if (!rdtgrp) {
3152 ret = -EPERM;
3153 goto out;
3154 }
3155
3156
3157
3158
3159
3160
3161
3162
3163 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn &&
3164 rdtgrp != &rdtgroup_default) {
3165 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
3166 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
3167 ret = rdtgroup_ctrl_remove(rdtgrp);
3168 } else {
3169 ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask);
3170 }
3171 } else if (rdtgrp->type == RDTMON_GROUP &&
3172 is_mon_groups(parent_kn, kn->name)) {
3173 ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask);
3174 } else {
3175 ret = -EPERM;
3176 }
3177
3178 out:
3179 rdtgroup_kn_unlock(kn);
3180 free_cpumask_var(tmpmask);
3181 return ret;
3182 }
3183
3184 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
3185 {
3186 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
3187 seq_puts(seq, ",cdp");
3188
3189 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
3190 seq_puts(seq, ",cdpl2");
3191
3192 if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl))
3193 seq_puts(seq, ",mba_MBps");
3194
3195 return 0;
3196 }
3197
3198 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
3199 .mkdir = rdtgroup_mkdir,
3200 .rmdir = rdtgroup_rmdir,
3201 .show_options = rdtgroup_show_options,
3202 };
3203
3204 static int __init rdtgroup_setup_root(void)
3205 {
3206 int ret;
3207
3208 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
3209 KERNFS_ROOT_CREATE_DEACTIVATED |
3210 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
3211 &rdtgroup_default);
3212 if (IS_ERR(rdt_root))
3213 return PTR_ERR(rdt_root);
3214
3215 mutex_lock(&rdtgroup_mutex);
3216
3217 rdtgroup_default.closid = 0;
3218 rdtgroup_default.mon.rmid = 0;
3219 rdtgroup_default.type = RDTCTRL_GROUP;
3220 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list);
3221
3222 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
3223
3224 ret = rdtgroup_add_files(kernfs_root_to_node(rdt_root), RF_CTRL_BASE);
3225 if (ret) {
3226 kernfs_destroy_root(rdt_root);
3227 goto out;
3228 }
3229
3230 rdtgroup_default.kn = kernfs_root_to_node(rdt_root);
3231 kernfs_activate(rdtgroup_default.kn);
3232
3233 out:
3234 mutex_unlock(&rdtgroup_mutex);
3235
3236 return ret;
3237 }
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247 int __init rdtgroup_init(void)
3248 {
3249 int ret = 0;
3250
3251 seq_buf_init(&last_cmd_status, last_cmd_status_buf,
3252 sizeof(last_cmd_status_buf));
3253
3254 ret = rdtgroup_setup_root();
3255 if (ret)
3256 return ret;
3257
3258 ret = sysfs_create_mount_point(fs_kobj, "resctrl");
3259 if (ret)
3260 goto cleanup_root;
3261
3262 ret = register_filesystem(&rdt_fs_type);
3263 if (ret)
3264 goto cleanup_mountpoint;
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287 debugfs_resctrl = debugfs_create_dir("resctrl", NULL);
3288
3289 return 0;
3290
3291 cleanup_mountpoint:
3292 sysfs_remove_mount_point(fs_kobj, "resctrl");
3293 cleanup_root:
3294 kernfs_destroy_root(rdt_root);
3295
3296 return ret;
3297 }
3298
3299 void __exit rdtgroup_exit(void)
3300 {
3301 debugfs_remove_recursive(debugfs_resctrl);
3302 unregister_filesystem(&rdt_fs_type);
3303 sysfs_remove_mount_point(fs_kobj, "resctrl");
3304 kernfs_destroy_root(rdt_root);
3305 }