0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0017
0018 #include <linux/cpu.h>
0019 #include <linux/kernfs.h>
0020 #include <linux/seq_file.h>
0021 #include <linux/slab.h>
0022 #include "internal.h"
0023
0024
0025
0026
0027
0028
0029
0030 static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
0031 {
0032 unsigned long bw;
0033 int ret;
0034
0035
0036
0037
0038 if (!r->membw.delay_linear && r->membw.arch_needs_linear) {
0039 rdt_last_cmd_puts("No support for non-linear MB domains\n");
0040 return false;
0041 }
0042
0043 ret = kstrtoul(buf, 10, &bw);
0044 if (ret) {
0045 rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
0046 return false;
0047 }
0048
0049 if ((bw < r->membw.min_bw || bw > r->default_ctrl) &&
0050 !is_mba_sc(r)) {
0051 rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
0052 r->membw.min_bw, r->default_ctrl);
0053 return false;
0054 }
0055
0056 *data = roundup(bw, (unsigned long)r->membw.bw_gran);
0057 return true;
0058 }
0059
0060 int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
0061 struct rdt_domain *d)
0062 {
0063 struct resctrl_staged_config *cfg;
0064 struct rdt_resource *r = s->res;
0065 unsigned long bw_val;
0066
0067 cfg = &d->staged_config[s->conf_type];
0068 if (cfg->have_new_ctrl) {
0069 rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
0070 return -EINVAL;
0071 }
0072
0073 if (!bw_validate(data->buf, &bw_val, r))
0074 return -EINVAL;
0075 cfg->new_ctrl = bw_val;
0076 cfg->have_new_ctrl = true;
0077
0078 return 0;
0079 }
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089 static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
0090 {
0091 unsigned long first_bit, zero_bit, val;
0092 unsigned int cbm_len = r->cache.cbm_len;
0093 int ret;
0094
0095 ret = kstrtoul(buf, 16, &val);
0096 if (ret) {
0097 rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
0098 return false;
0099 }
0100
0101 if ((!r->cache.arch_has_empty_bitmaps && val == 0) ||
0102 val > r->default_ctrl) {
0103 rdt_last_cmd_puts("Mask out of range\n");
0104 return false;
0105 }
0106
0107 first_bit = find_first_bit(&val, cbm_len);
0108 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
0109
0110
0111 if (!r->cache.arch_has_sparse_bitmaps &&
0112 (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) {
0113 rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
0114 return false;
0115 }
0116
0117 if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
0118 rdt_last_cmd_printf("Need at least %d bits in the mask\n",
0119 r->cache.min_cbm_bits);
0120 return false;
0121 }
0122
0123 *data = val;
0124 return true;
0125 }
0126
0127
0128
0129
0130
0131 int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
0132 struct rdt_domain *d)
0133 {
0134 struct rdtgroup *rdtgrp = data->rdtgrp;
0135 struct resctrl_staged_config *cfg;
0136 struct rdt_resource *r = s->res;
0137 u32 cbm_val;
0138
0139 cfg = &d->staged_config[s->conf_type];
0140 if (cfg->have_new_ctrl) {
0141 rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
0142 return -EINVAL;
0143 }
0144
0145
0146
0147
0148
0149 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
0150 rdtgroup_pseudo_locked_in_hierarchy(d)) {
0151 rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n");
0152 return -EINVAL;
0153 }
0154
0155 if (!cbm_validate(data->buf, &cbm_val, r))
0156 return -EINVAL;
0157
0158 if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
0159 rdtgrp->mode == RDT_MODE_SHAREABLE) &&
0160 rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
0161 rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n");
0162 return -EINVAL;
0163 }
0164
0165
0166
0167
0168
0169 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) {
0170 rdt_last_cmd_puts("Overlaps with exclusive group\n");
0171 return -EINVAL;
0172 }
0173
0174 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) {
0175 if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
0176 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
0177 rdt_last_cmd_puts("Overlaps with other group\n");
0178 return -EINVAL;
0179 }
0180 }
0181
0182 cfg->new_ctrl = cbm_val;
0183 cfg->have_new_ctrl = true;
0184
0185 return 0;
0186 }
0187
0188
0189
0190
0191
0192
0193
0194 static int parse_line(char *line, struct resctrl_schema *s,
0195 struct rdtgroup *rdtgrp)
0196 {
0197 enum resctrl_conf_type t = s->conf_type;
0198 struct resctrl_staged_config *cfg;
0199 struct rdt_resource *r = s->res;
0200 struct rdt_parse_data data;
0201 char *dom = NULL, *id;
0202 struct rdt_domain *d;
0203 unsigned long dom_id;
0204
0205 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
0206 r->rid == RDT_RESOURCE_MBA) {
0207 rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
0208 return -EINVAL;
0209 }
0210
0211 next:
0212 if (!line || line[0] == '\0')
0213 return 0;
0214 dom = strsep(&line, ";");
0215 id = strsep(&dom, "=");
0216 if (!dom || kstrtoul(id, 10, &dom_id)) {
0217 rdt_last_cmd_puts("Missing '=' or non-numeric domain\n");
0218 return -EINVAL;
0219 }
0220 dom = strim(dom);
0221 list_for_each_entry(d, &r->domains, list) {
0222 if (d->id == dom_id) {
0223 data.buf = dom;
0224 data.rdtgrp = rdtgrp;
0225 if (r->parse_ctrlval(&data, s, d))
0226 return -EINVAL;
0227 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
0228 cfg = &d->staged_config[t];
0229
0230
0231
0232
0233
0234
0235
0236
0237 rdtgrp->plr->s = s;
0238 rdtgrp->plr->d = d;
0239 rdtgrp->plr->cbm = cfg->new_ctrl;
0240 d->plr = rdtgrp->plr;
0241 return 0;
0242 }
0243 goto next;
0244 }
0245 }
0246 return -EINVAL;
0247 }
0248
0249 static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
0250 {
0251 switch (type) {
0252 default:
0253 case CDP_NONE:
0254 return closid;
0255 case CDP_CODE:
0256 return closid * 2 + 1;
0257 case CDP_DATA:
0258 return closid * 2;
0259 }
0260 }
0261
0262 static bool apply_config(struct rdt_hw_domain *hw_dom,
0263 struct resctrl_staged_config *cfg, u32 idx,
0264 cpumask_var_t cpu_mask, bool mba_sc)
0265 {
0266 struct rdt_domain *dom = &hw_dom->d_resctrl;
0267 u32 *dc = !mba_sc ? hw_dom->ctrl_val : hw_dom->mbps_val;
0268
0269 if (cfg->new_ctrl != dc[idx]) {
0270 cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
0271 dc[idx] = cfg->new_ctrl;
0272
0273 return true;
0274 }
0275
0276 return false;
0277 }
0278
0279 int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
0280 {
0281 struct resctrl_staged_config *cfg;
0282 struct rdt_hw_domain *hw_dom;
0283 struct msr_param msr_param;
0284 enum resctrl_conf_type t;
0285 cpumask_var_t cpu_mask;
0286 struct rdt_domain *d;
0287 bool mba_sc;
0288 int cpu;
0289 u32 idx;
0290
0291 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
0292 return -ENOMEM;
0293
0294 mba_sc = is_mba_sc(r);
0295 msr_param.res = NULL;
0296 list_for_each_entry(d, &r->domains, list) {
0297 hw_dom = resctrl_to_arch_dom(d);
0298 for (t = 0; t < CDP_NUM_TYPES; t++) {
0299 cfg = &hw_dom->d_resctrl.staged_config[t];
0300 if (!cfg->have_new_ctrl)
0301 continue;
0302
0303 idx = get_config_index(closid, t);
0304 if (!apply_config(hw_dom, cfg, idx, cpu_mask, mba_sc))
0305 continue;
0306
0307 if (!msr_param.res) {
0308 msr_param.low = idx;
0309 msr_param.high = msr_param.low + 1;
0310 msr_param.res = r;
0311 } else {
0312 msr_param.low = min(msr_param.low, idx);
0313 msr_param.high = max(msr_param.high, idx + 1);
0314 }
0315 }
0316 }
0317
0318
0319
0320
0321
0322 if (cpumask_empty(cpu_mask) || mba_sc)
0323 goto done;
0324 cpu = get_cpu();
0325
0326 if (cpumask_test_cpu(cpu, cpu_mask))
0327 rdt_ctrl_update(&msr_param);
0328
0329 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
0330 put_cpu();
0331
0332 done:
0333 free_cpumask_var(cpu_mask);
0334
0335 return 0;
0336 }
0337
0338 static int rdtgroup_parse_resource(char *resname, char *tok,
0339 struct rdtgroup *rdtgrp)
0340 {
0341 struct resctrl_schema *s;
0342
0343 list_for_each_entry(s, &resctrl_schema_all, list) {
0344 if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid)
0345 return parse_line(tok, s, rdtgrp);
0346 }
0347 rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
0348 return -EINVAL;
0349 }
0350
0351 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
0352 char *buf, size_t nbytes, loff_t off)
0353 {
0354 struct resctrl_schema *s;
0355 struct rdtgroup *rdtgrp;
0356 struct rdt_domain *dom;
0357 struct rdt_resource *r;
0358 char *tok, *resname;
0359 int ret = 0;
0360
0361
0362 if (nbytes == 0 || buf[nbytes - 1] != '\n')
0363 return -EINVAL;
0364 buf[nbytes - 1] = '\0';
0365
0366 cpus_read_lock();
0367 rdtgrp = rdtgroup_kn_lock_live(of->kn);
0368 if (!rdtgrp) {
0369 rdtgroup_kn_unlock(of->kn);
0370 cpus_read_unlock();
0371 return -ENOENT;
0372 }
0373 rdt_last_cmd_clear();
0374
0375
0376
0377
0378
0379 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
0380 ret = -EINVAL;
0381 rdt_last_cmd_puts("Resource group is pseudo-locked\n");
0382 goto out;
0383 }
0384
0385 list_for_each_entry(s, &resctrl_schema_all, list) {
0386 list_for_each_entry(dom, &s->res->domains, list)
0387 memset(dom->staged_config, 0, sizeof(dom->staged_config));
0388 }
0389
0390 while ((tok = strsep(&buf, "\n")) != NULL) {
0391 resname = strim(strsep(&tok, ":"));
0392 if (!tok) {
0393 rdt_last_cmd_puts("Missing ':'\n");
0394 ret = -EINVAL;
0395 goto out;
0396 }
0397 if (tok[0] == '\0') {
0398 rdt_last_cmd_printf("Missing '%s' value\n", resname);
0399 ret = -EINVAL;
0400 goto out;
0401 }
0402 ret = rdtgroup_parse_resource(resname, tok, rdtgrp);
0403 if (ret)
0404 goto out;
0405 }
0406
0407 list_for_each_entry(s, &resctrl_schema_all, list) {
0408 r = s->res;
0409 ret = resctrl_arch_update_domains(r, rdtgrp->closid);
0410 if (ret)
0411 goto out;
0412 }
0413
0414 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
0415
0416
0417
0418
0419
0420
0421 ret = rdtgroup_pseudo_lock_create(rdtgrp);
0422 }
0423
0424 out:
0425 rdtgroup_kn_unlock(of->kn);
0426 cpus_read_unlock();
0427 return ret ?: nbytes;
0428 }
0429
0430 u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
0431 u32 closid, enum resctrl_conf_type type)
0432 {
0433 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
0434 u32 idx = get_config_index(closid, type);
0435
0436 if (!is_mba_sc(r))
0437 return hw_dom->ctrl_val[idx];
0438 return hw_dom->mbps_val[idx];
0439 }
0440
0441 static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
0442 {
0443 struct rdt_resource *r = schema->res;
0444 struct rdt_domain *dom;
0445 bool sep = false;
0446 u32 ctrl_val;
0447
0448 seq_printf(s, "%*s:", max_name_width, schema->name);
0449 list_for_each_entry(dom, &r->domains, list) {
0450 if (sep)
0451 seq_puts(s, ";");
0452
0453 ctrl_val = resctrl_arch_get_config(r, dom, closid,
0454 schema->conf_type);
0455 seq_printf(s, r->format_str, dom->id, max_data_width,
0456 ctrl_val);
0457 sep = true;
0458 }
0459 seq_puts(s, "\n");
0460 }
0461
0462 int rdtgroup_schemata_show(struct kernfs_open_file *of,
0463 struct seq_file *s, void *v)
0464 {
0465 struct resctrl_schema *schema;
0466 struct rdtgroup *rdtgrp;
0467 int ret = 0;
0468 u32 closid;
0469
0470 rdtgrp = rdtgroup_kn_lock_live(of->kn);
0471 if (rdtgrp) {
0472 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
0473 list_for_each_entry(schema, &resctrl_schema_all, list) {
0474 seq_printf(s, "%s:uninitialized\n", schema->name);
0475 }
0476 } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
0477 if (!rdtgrp->plr->d) {
0478 rdt_last_cmd_clear();
0479 rdt_last_cmd_puts("Cache domain offline\n");
0480 ret = -ENODEV;
0481 } else {
0482 seq_printf(s, "%s:%d=%x\n",
0483 rdtgrp->plr->s->res->name,
0484 rdtgrp->plr->d->id,
0485 rdtgrp->plr->cbm);
0486 }
0487 } else {
0488 closid = rdtgrp->closid;
0489 list_for_each_entry(schema, &resctrl_schema_all, list) {
0490 if (closid < schema->num_closid)
0491 show_doms(s, schema, closid);
0492 }
0493 }
0494 } else {
0495 ret = -ENOENT;
0496 }
0497 rdtgroup_kn_unlock(of->kn);
0498 return ret;
0499 }
0500
0501 void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
0502 struct rdt_domain *d, struct rdtgroup *rdtgrp,
0503 int evtid, int first)
0504 {
0505
0506
0507
0508 rr->rgrp = rdtgrp;
0509 rr->evtid = evtid;
0510 rr->r = r;
0511 rr->d = d;
0512 rr->val = 0;
0513 rr->first = first;
0514
0515 smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
0516 }
0517
0518 int rdtgroup_mondata_show(struct seq_file *m, void *arg)
0519 {
0520 struct kernfs_open_file *of = m->private;
0521 struct rdt_hw_resource *hw_res;
0522 u32 resid, evtid, domid;
0523 struct rdtgroup *rdtgrp;
0524 struct rdt_resource *r;
0525 union mon_data_bits md;
0526 struct rdt_domain *d;
0527 struct rmid_read rr;
0528 int ret = 0;
0529
0530 rdtgrp = rdtgroup_kn_lock_live(of->kn);
0531 if (!rdtgrp) {
0532 ret = -ENOENT;
0533 goto out;
0534 }
0535
0536 md.priv = of->kn->priv;
0537 resid = md.u.rid;
0538 domid = md.u.domid;
0539 evtid = md.u.evtid;
0540
0541 hw_res = &rdt_resources_all[resid];
0542 r = &hw_res->r_resctrl;
0543 d = rdt_find_domain(r, domid, NULL);
0544 if (IS_ERR_OR_NULL(d)) {
0545 ret = -ENOENT;
0546 goto out;
0547 }
0548
0549 mon_event_read(&rr, r, d, rdtgrp, evtid, false);
0550
0551 if (rr.val & RMID_VAL_ERROR)
0552 seq_puts(m, "Error\n");
0553 else if (rr.val & RMID_VAL_UNAVAIL)
0554 seq_puts(m, "Unavailable\n");
0555 else
0556 seq_printf(m, "%llu\n", rr.val * hw_res->mon_scale);
0557
0558 out:
0559 rdtgroup_kn_unlock(of->kn);
0560 return ret;
0561 }