0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #define pr_fmt(fmt) "resctrl: " fmt
0018
0019 #include <linux/slab.h>
0020 #include <linux/err.h>
0021 #include <linux/cacheinfo.h>
0022 #include <linux/cpuhotplug.h>
0023
0024 #include <asm/intel-family.h>
0025 #include <asm/resctrl.h>
0026 #include "internal.h"
0027
0028
0029 DEFINE_MUTEX(rdtgroup_mutex);
0030
0031
0032
0033
0034
0035
0036
0037 DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state);
0038
0039
0040
0041
0042
0043 int max_name_width, max_data_width;
0044
0045
0046
0047
0048
0049 bool rdt_alloc_capable;
0050
0051 static void
0052 mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
0053 struct rdt_resource *r);
0054 static void
0055 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
0056 static void
0057 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
0058 struct rdt_resource *r);
0059
0060 #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.domains)
0061
0062 struct rdt_hw_resource rdt_resources_all[] = {
0063 [RDT_RESOURCE_L3] =
0064 {
0065 .r_resctrl = {
0066 .rid = RDT_RESOURCE_L3,
0067 .name = "L3",
0068 .cache_level = 3,
0069 .cache = {
0070 .min_cbm_bits = 1,
0071 },
0072 .domains = domain_init(RDT_RESOURCE_L3),
0073 .parse_ctrlval = parse_cbm,
0074 .format_str = "%d=%0*x",
0075 .fflags = RFTYPE_RES_CACHE,
0076 },
0077 .msr_base = MSR_IA32_L3_CBM_BASE,
0078 .msr_update = cat_wrmsr,
0079 },
0080 [RDT_RESOURCE_L2] =
0081 {
0082 .r_resctrl = {
0083 .rid = RDT_RESOURCE_L2,
0084 .name = "L2",
0085 .cache_level = 2,
0086 .cache = {
0087 .min_cbm_bits = 1,
0088 },
0089 .domains = domain_init(RDT_RESOURCE_L2),
0090 .parse_ctrlval = parse_cbm,
0091 .format_str = "%d=%0*x",
0092 .fflags = RFTYPE_RES_CACHE,
0093 },
0094 .msr_base = MSR_IA32_L2_CBM_BASE,
0095 .msr_update = cat_wrmsr,
0096 },
0097 [RDT_RESOURCE_MBA] =
0098 {
0099 .r_resctrl = {
0100 .rid = RDT_RESOURCE_MBA,
0101 .name = "MB",
0102 .cache_level = 3,
0103 .domains = domain_init(RDT_RESOURCE_MBA),
0104 .parse_ctrlval = parse_bw,
0105 .format_str = "%d=%*u",
0106 .fflags = RFTYPE_RES_MB,
0107 },
0108 },
0109 };
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129 static inline void cache_alloc_hsw_probe(void)
0130 {
0131 struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3];
0132 struct rdt_resource *r = &hw_res->r_resctrl;
0133 u32 l, h, max_cbm = BIT_MASK(20) - 1;
0134
0135 if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0))
0136 return;
0137
0138 rdmsr(MSR_IA32_L3_CBM_BASE, l, h);
0139
0140
0141 if (l != max_cbm)
0142 return;
0143
0144 hw_res->num_closid = 4;
0145 r->default_ctrl = max_cbm;
0146 r->cache.cbm_len = 20;
0147 r->cache.shareable_bits = 0xc0000;
0148 r->cache.min_cbm_bits = 2;
0149 r->alloc_capable = true;
0150 r->alloc_enabled = true;
0151
0152 rdt_alloc_capable = true;
0153 }
0154
0155 bool is_mba_sc(struct rdt_resource *r)
0156 {
0157 if (!r)
0158 return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc;
0159
0160 return r->membw.mba_sc;
0161 }
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173 static inline bool rdt_get_mb_table(struct rdt_resource *r)
0174 {
0175
0176
0177
0178 pr_info("MBA b/w map not implemented for cpu:%d, model:%d",
0179 boot_cpu_data.x86, boot_cpu_data.x86_model);
0180
0181 return false;
0182 }
0183
0184 static bool __get_mem_config_intel(struct rdt_resource *r)
0185 {
0186 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
0187 union cpuid_0x10_3_eax eax;
0188 union cpuid_0x10_x_edx edx;
0189 u32 ebx, ecx, max_delay;
0190
0191 cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full);
0192 hw_res->num_closid = edx.split.cos_max + 1;
0193 max_delay = eax.split.max_delay + 1;
0194 r->default_ctrl = MAX_MBA_BW;
0195 r->membw.arch_needs_linear = true;
0196 if (ecx & MBA_IS_LINEAR) {
0197 r->membw.delay_linear = true;
0198 r->membw.min_bw = MAX_MBA_BW - max_delay;
0199 r->membw.bw_gran = MAX_MBA_BW - max_delay;
0200 } else {
0201 if (!rdt_get_mb_table(r))
0202 return false;
0203 r->membw.arch_needs_linear = false;
0204 }
0205 r->data_width = 3;
0206
0207 if (boot_cpu_has(X86_FEATURE_PER_THREAD_MBA))
0208 r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD;
0209 else
0210 r->membw.throttle_mode = THREAD_THROTTLE_MAX;
0211 thread_throttle_mode_init();
0212
0213 r->alloc_capable = true;
0214 r->alloc_enabled = true;
0215
0216 return true;
0217 }
0218
0219 static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
0220 {
0221 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
0222 union cpuid_0x10_3_eax eax;
0223 union cpuid_0x10_x_edx edx;
0224 u32 ebx, ecx;
0225
0226 cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full);
0227 hw_res->num_closid = edx.split.cos_max + 1;
0228 r->default_ctrl = MAX_MBA_BW_AMD;
0229
0230
0231 r->membw.delay_linear = false;
0232 r->membw.arch_needs_linear = false;
0233
0234
0235
0236
0237
0238 r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED;
0239 r->membw.min_bw = 0;
0240 r->membw.bw_gran = 1;
0241
0242 r->data_width = 4;
0243
0244 r->alloc_capable = true;
0245 r->alloc_enabled = true;
0246
0247 return true;
0248 }
0249
0250 static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
0251 {
0252 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
0253 union cpuid_0x10_1_eax eax;
0254 union cpuid_0x10_x_edx edx;
0255 u32 ebx, ecx;
0256
0257 cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
0258 hw_res->num_closid = edx.split.cos_max + 1;
0259 r->cache.cbm_len = eax.split.cbm_len + 1;
0260 r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
0261 r->cache.shareable_bits = ebx & r->default_ctrl;
0262 r->data_width = (r->cache.cbm_len + 3) / 4;
0263 r->alloc_capable = true;
0264 r->alloc_enabled = true;
0265 }
0266
0267 static void rdt_get_cdp_config(int level)
0268 {
0269
0270
0271
0272
0273 rdt_resources_all[level].cdp_enabled = false;
0274 rdt_resources_all[level].r_resctrl.cdp_capable = true;
0275 }
0276
0277 static void rdt_get_cdp_l3_config(void)
0278 {
0279 rdt_get_cdp_config(RDT_RESOURCE_L3);
0280 }
0281
0282 static void rdt_get_cdp_l2_config(void)
0283 {
0284 rdt_get_cdp_config(RDT_RESOURCE_L2);
0285 }
0286
0287 static void
0288 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
0289 {
0290 unsigned int i;
0291 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
0292 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
0293
0294 for (i = m->low; i < m->high; i++)
0295 wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
0296 }
0297
0298
0299
0300
0301
0302
0303 u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
0304 {
0305 if (r->membw.delay_linear)
0306 return MAX_MBA_BW - bw;
0307
0308 pr_warn_once("Non Linear delay-bw map not supported but queried\n");
0309 return r->default_ctrl;
0310 }
0311
0312 static void
0313 mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
0314 struct rdt_resource *r)
0315 {
0316 unsigned int i;
0317 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
0318 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
0319
0320
0321 for (i = m->low; i < m->high; i++)
0322 wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], r));
0323 }
0324
0325 static void
0326 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
0327 {
0328 unsigned int i;
0329 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
0330 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
0331
0332 for (i = m->low; i < m->high; i++)
0333 wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
0334 }
0335
0336 struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
0337 {
0338 struct rdt_domain *d;
0339
0340 list_for_each_entry(d, &r->domains, list) {
0341
0342 if (cpumask_test_cpu(cpu, &d->cpu_mask))
0343 return d;
0344 }
0345
0346 return NULL;
0347 }
0348
0349 u32 resctrl_arch_get_num_closid(struct rdt_resource *r)
0350 {
0351 return resctrl_to_arch_res(r)->num_closid;
0352 }
0353
0354 void rdt_ctrl_update(void *arg)
0355 {
0356 struct msr_param *m = arg;
0357 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
0358 struct rdt_resource *r = m->res;
0359 int cpu = smp_processor_id();
0360 struct rdt_domain *d;
0361
0362 d = get_domain_from_cpu(cpu, r);
0363 if (d) {
0364 hw_res->msr_update(d, m, r);
0365 return;
0366 }
0367 pr_warn_once("cpu %d not found in any domain for resource %s\n",
0368 cpu, r->name);
0369 }
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379 struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
0380 struct list_head **pos)
0381 {
0382 struct rdt_domain *d;
0383 struct list_head *l;
0384
0385 if (id < 0)
0386 return ERR_PTR(-ENODEV);
0387
0388 list_for_each(l, &r->domains) {
0389 d = list_entry(l, struct rdt_domain, list);
0390
0391 if (id == d->id)
0392 return d;
0393
0394 if (id < d->id)
0395 break;
0396 }
0397
0398 if (pos)
0399 *pos = l;
0400
0401 return NULL;
0402 }
0403
0404 void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
0405 {
0406 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
0407 int i;
0408
0409
0410
0411
0412
0413
0414
0415 for (i = 0; i < hw_res->num_closid; i++, dc++, dm++) {
0416 *dc = r->default_ctrl;
0417 *dm = MBA_MAX_MBPS;
0418 }
0419 }
0420
0421 static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
0422 {
0423 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
0424 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
0425 struct msr_param m;
0426 u32 *dc, *dm;
0427
0428 dc = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->ctrl_val),
0429 GFP_KERNEL);
0430 if (!dc)
0431 return -ENOMEM;
0432
0433 dm = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->mbps_val),
0434 GFP_KERNEL);
0435 if (!dm) {
0436 kfree(dc);
0437 return -ENOMEM;
0438 }
0439
0440 hw_dom->ctrl_val = dc;
0441 hw_dom->mbps_val = dm;
0442 setup_default_ctrlval(r, dc, dm);
0443
0444 m.low = 0;
0445 m.high = hw_res->num_closid;
0446 hw_res->msr_update(d, &m, r);
0447 return 0;
0448 }
0449
0450 static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
0451 {
0452 size_t tsize;
0453
0454 if (is_llc_occupancy_enabled()) {
0455 d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL);
0456 if (!d->rmid_busy_llc)
0457 return -ENOMEM;
0458 INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
0459 }
0460 if (is_mbm_total_enabled()) {
0461 tsize = sizeof(*d->mbm_total);
0462 d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
0463 if (!d->mbm_total) {
0464 bitmap_free(d->rmid_busy_llc);
0465 return -ENOMEM;
0466 }
0467 }
0468 if (is_mbm_local_enabled()) {
0469 tsize = sizeof(*d->mbm_local);
0470 d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
0471 if (!d->mbm_local) {
0472 bitmap_free(d->rmid_busy_llc);
0473 kfree(d->mbm_total);
0474 return -ENOMEM;
0475 }
0476 }
0477
0478 if (is_mbm_enabled()) {
0479 INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
0480 mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL);
0481 }
0482
0483 return 0;
0484 }
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499 static void domain_add_cpu(int cpu, struct rdt_resource *r)
0500 {
0501 int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
0502 struct list_head *add_pos = NULL;
0503 struct rdt_hw_domain *hw_dom;
0504 struct rdt_domain *d;
0505
0506 d = rdt_find_domain(r, id, &add_pos);
0507 if (IS_ERR(d)) {
0508 pr_warn("Couldn't find cache id for CPU %d\n", cpu);
0509 return;
0510 }
0511
0512 if (d) {
0513 cpumask_set_cpu(cpu, &d->cpu_mask);
0514 if (r->cache.arch_has_per_cpu_cfg)
0515 rdt_domain_reconfigure_cdp(r);
0516 return;
0517 }
0518
0519 hw_dom = kzalloc_node(sizeof(*hw_dom), GFP_KERNEL, cpu_to_node(cpu));
0520 if (!hw_dom)
0521 return;
0522
0523 d = &hw_dom->d_resctrl;
0524 d->id = id;
0525 cpumask_set_cpu(cpu, &d->cpu_mask);
0526
0527 rdt_domain_reconfigure_cdp(r);
0528
0529 if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
0530 kfree(hw_dom);
0531 return;
0532 }
0533
0534 if (r->mon_capable && domain_setup_mon_state(r, d)) {
0535 kfree(hw_dom->ctrl_val);
0536 kfree(hw_dom->mbps_val);
0537 kfree(hw_dom);
0538 return;
0539 }
0540
0541 list_add_tail(&d->list, add_pos);
0542
0543
0544
0545
0546
0547 if (static_branch_unlikely(&rdt_mon_enable_key))
0548 mkdir_mondata_subdir_allrdtgrp(r, d);
0549 }
0550
0551 static void domain_remove_cpu(int cpu, struct rdt_resource *r)
0552 {
0553 int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
0554 struct rdt_hw_domain *hw_dom;
0555 struct rdt_domain *d;
0556
0557 d = rdt_find_domain(r, id, NULL);
0558 if (IS_ERR_OR_NULL(d)) {
0559 pr_warn("Couldn't find cache id for CPU %d\n", cpu);
0560 return;
0561 }
0562 hw_dom = resctrl_to_arch_dom(d);
0563
0564 cpumask_clear_cpu(cpu, &d->cpu_mask);
0565 if (cpumask_empty(&d->cpu_mask)) {
0566
0567
0568
0569
0570 if (static_branch_unlikely(&rdt_mon_enable_key))
0571 rmdir_mondata_subdir_allrdtgrp(r, d->id);
0572 list_del(&d->list);
0573 if (r->mon_capable && is_mbm_enabled())
0574 cancel_delayed_work(&d->mbm_over);
0575 if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) {
0576
0577
0578
0579
0580
0581
0582
0583
0584 __check_limbo(d, true);
0585 cancel_delayed_work(&d->cqm_limbo);
0586 }
0587
0588
0589
0590
0591
0592 if (d->plr)
0593 d->plr->d = NULL;
0594
0595 kfree(hw_dom->ctrl_val);
0596 kfree(hw_dom->mbps_val);
0597 bitmap_free(d->rmid_busy_llc);
0598 kfree(d->mbm_total);
0599 kfree(d->mbm_local);
0600 kfree(hw_dom);
0601 return;
0602 }
0603
0604 if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) {
0605 if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
0606 cancel_delayed_work(&d->mbm_over);
0607 mbm_setup_overflow_handler(d, 0);
0608 }
0609 if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu &&
0610 has_busy_rmid(r, d)) {
0611 cancel_delayed_work(&d->cqm_limbo);
0612 cqm_setup_limbo_handler(d, 0);
0613 }
0614 }
0615 }
0616
0617 static void clear_closid_rmid(int cpu)
0618 {
0619 struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
0620
0621 state->default_closid = 0;
0622 state->default_rmid = 0;
0623 state->cur_closid = 0;
0624 state->cur_rmid = 0;
0625 wrmsr(IA32_PQR_ASSOC, 0, 0);
0626 }
0627
0628 static int resctrl_online_cpu(unsigned int cpu)
0629 {
0630 struct rdt_resource *r;
0631
0632 mutex_lock(&rdtgroup_mutex);
0633 for_each_capable_rdt_resource(r)
0634 domain_add_cpu(cpu, r);
0635
0636 cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
0637 clear_closid_rmid(cpu);
0638 mutex_unlock(&rdtgroup_mutex);
0639
0640 return 0;
0641 }
0642
0643 static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
0644 {
0645 struct rdtgroup *cr;
0646
0647 list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
0648 if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) {
0649 break;
0650 }
0651 }
0652 }
0653
0654 static int resctrl_offline_cpu(unsigned int cpu)
0655 {
0656 struct rdtgroup *rdtgrp;
0657 struct rdt_resource *r;
0658
0659 mutex_lock(&rdtgroup_mutex);
0660 for_each_capable_rdt_resource(r)
0661 domain_remove_cpu(cpu, r);
0662 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
0663 if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
0664 clear_childcpus(rdtgrp, cpu);
0665 break;
0666 }
0667 }
0668 clear_closid_rmid(cpu);
0669 mutex_unlock(&rdtgroup_mutex);
0670
0671 return 0;
0672 }
0673
0674
0675
0676
0677
0678 static __init void rdt_init_padding(void)
0679 {
0680 struct rdt_resource *r;
0681
0682 for_each_alloc_capable_rdt_resource(r) {
0683 if (r->data_width > max_data_width)
0684 max_data_width = r->data_width;
0685 }
0686 }
0687
0688 enum {
0689 RDT_FLAG_CMT,
0690 RDT_FLAG_MBM_TOTAL,
0691 RDT_FLAG_MBM_LOCAL,
0692 RDT_FLAG_L3_CAT,
0693 RDT_FLAG_L3_CDP,
0694 RDT_FLAG_L2_CAT,
0695 RDT_FLAG_L2_CDP,
0696 RDT_FLAG_MBA,
0697 };
0698
0699 #define RDT_OPT(idx, n, f) \
0700 [idx] = { \
0701 .name = n, \
0702 .flag = f \
0703 }
0704
0705 struct rdt_options {
0706 char *name;
0707 int flag;
0708 bool force_off, force_on;
0709 };
0710
0711 static struct rdt_options rdt_options[] __initdata = {
0712 RDT_OPT(RDT_FLAG_CMT, "cmt", X86_FEATURE_CQM_OCCUP_LLC),
0713 RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL),
0714 RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL),
0715 RDT_OPT(RDT_FLAG_L3_CAT, "l3cat", X86_FEATURE_CAT_L3),
0716 RDT_OPT(RDT_FLAG_L3_CDP, "l3cdp", X86_FEATURE_CDP_L3),
0717 RDT_OPT(RDT_FLAG_L2_CAT, "l2cat", X86_FEATURE_CAT_L2),
0718 RDT_OPT(RDT_FLAG_L2_CDP, "l2cdp", X86_FEATURE_CDP_L2),
0719 RDT_OPT(RDT_FLAG_MBA, "mba", X86_FEATURE_MBA),
0720 };
0721 #define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options)
0722
0723 static int __init set_rdt_options(char *str)
0724 {
0725 struct rdt_options *o;
0726 bool force_off;
0727 char *tok;
0728
0729 if (*str == '=')
0730 str++;
0731 while ((tok = strsep(&str, ",")) != NULL) {
0732 force_off = *tok == '!';
0733 if (force_off)
0734 tok++;
0735 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
0736 if (strcmp(tok, o->name) == 0) {
0737 if (force_off)
0738 o->force_off = true;
0739 else
0740 o->force_on = true;
0741 break;
0742 }
0743 }
0744 }
0745 return 1;
0746 }
0747 __setup("rdt", set_rdt_options);
0748
0749 static bool __init rdt_cpu_has(int flag)
0750 {
0751 bool ret = boot_cpu_has(flag);
0752 struct rdt_options *o;
0753
0754 if (!ret)
0755 return ret;
0756
0757 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
0758 if (flag == o->flag) {
0759 if (o->force_off)
0760 ret = false;
0761 if (o->force_on)
0762 ret = true;
0763 break;
0764 }
0765 }
0766 return ret;
0767 }
0768
0769 static __init bool get_mem_config(void)
0770 {
0771 struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA];
0772
0773 if (!rdt_cpu_has(X86_FEATURE_MBA))
0774 return false;
0775
0776 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
0777 return __get_mem_config_intel(&hw_res->r_resctrl);
0778 else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
0779 return __rdt_get_mem_config_amd(&hw_res->r_resctrl);
0780
0781 return false;
0782 }
0783
0784 static __init bool get_rdt_alloc_resources(void)
0785 {
0786 struct rdt_resource *r;
0787 bool ret = false;
0788
0789 if (rdt_alloc_capable)
0790 return true;
0791
0792 if (!boot_cpu_has(X86_FEATURE_RDT_A))
0793 return false;
0794
0795 if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
0796 r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
0797 rdt_get_cache_alloc_cfg(1, r);
0798 if (rdt_cpu_has(X86_FEATURE_CDP_L3))
0799 rdt_get_cdp_l3_config();
0800 ret = true;
0801 }
0802 if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
0803
0804 r = &rdt_resources_all[RDT_RESOURCE_L2].r_resctrl;
0805 rdt_get_cache_alloc_cfg(2, r);
0806 if (rdt_cpu_has(X86_FEATURE_CDP_L2))
0807 rdt_get_cdp_l2_config();
0808 ret = true;
0809 }
0810
0811 if (get_mem_config())
0812 ret = true;
0813
0814 return ret;
0815 }
0816
0817 static __init bool get_rdt_mon_resources(void)
0818 {
0819 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
0820
0821 if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC))
0822 rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID);
0823 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL))
0824 rdt_mon_features |= (1 << QOS_L3_MBM_TOTAL_EVENT_ID);
0825 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))
0826 rdt_mon_features |= (1 << QOS_L3_MBM_LOCAL_EVENT_ID);
0827
0828 if (!rdt_mon_features)
0829 return false;
0830
0831 return !rdt_get_mon_l3_config(r);
0832 }
0833
0834 static __init void __check_quirks_intel(void)
0835 {
0836 switch (boot_cpu_data.x86_model) {
0837 case INTEL_FAM6_HASWELL_X:
0838 if (!rdt_options[RDT_FLAG_L3_CAT].force_off)
0839 cache_alloc_hsw_probe();
0840 break;
0841 case INTEL_FAM6_SKYLAKE_X:
0842 if (boot_cpu_data.x86_stepping <= 4)
0843 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
0844 else
0845 set_rdt_options("!l3cat");
0846 fallthrough;
0847 case INTEL_FAM6_BROADWELL_X:
0848 intel_rdt_mbm_apply_quirk();
0849 break;
0850 }
0851 }
0852
0853 static __init void check_quirks(void)
0854 {
0855 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
0856 __check_quirks_intel();
0857 }
0858
0859 static __init bool get_rdt_resources(void)
0860 {
0861 rdt_alloc_capable = get_rdt_alloc_resources();
0862 rdt_mon_capable = get_rdt_mon_resources();
0863
0864 return (rdt_mon_capable || rdt_alloc_capable);
0865 }
0866
0867 static __init void rdt_init_res_defs_intel(void)
0868 {
0869 struct rdt_hw_resource *hw_res;
0870 struct rdt_resource *r;
0871
0872 for_each_rdt_resource(r) {
0873 hw_res = resctrl_to_arch_res(r);
0874
0875 if (r->rid == RDT_RESOURCE_L3 ||
0876 r->rid == RDT_RESOURCE_L2) {
0877 r->cache.arch_has_sparse_bitmaps = false;
0878 r->cache.arch_has_empty_bitmaps = false;
0879 r->cache.arch_has_per_cpu_cfg = false;
0880 } else if (r->rid == RDT_RESOURCE_MBA) {
0881 hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE;
0882 hw_res->msr_update = mba_wrmsr_intel;
0883 }
0884 }
0885 }
0886
0887 static __init void rdt_init_res_defs_amd(void)
0888 {
0889 struct rdt_hw_resource *hw_res;
0890 struct rdt_resource *r;
0891
0892 for_each_rdt_resource(r) {
0893 hw_res = resctrl_to_arch_res(r);
0894
0895 if (r->rid == RDT_RESOURCE_L3 ||
0896 r->rid == RDT_RESOURCE_L2) {
0897 r->cache.arch_has_sparse_bitmaps = true;
0898 r->cache.arch_has_empty_bitmaps = true;
0899 r->cache.arch_has_per_cpu_cfg = true;
0900 } else if (r->rid == RDT_RESOURCE_MBA) {
0901 hw_res->msr_base = MSR_IA32_MBA_BW_BASE;
0902 hw_res->msr_update = mba_wrmsr_amd;
0903 }
0904 }
0905 }
0906
0907 static __init void rdt_init_res_defs(void)
0908 {
0909 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
0910 rdt_init_res_defs_intel();
0911 else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
0912 rdt_init_res_defs_amd();
0913 }
0914
0915 static enum cpuhp_state rdt_online;
0916
0917
0918 void resctrl_cpu_detect(struct cpuinfo_x86 *c)
0919 {
0920 if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
0921 c->x86_cache_max_rmid = -1;
0922 c->x86_cache_occ_scale = -1;
0923 c->x86_cache_mbm_width_offset = -1;
0924 return;
0925 }
0926
0927
0928 c->x86_cache_max_rmid = cpuid_ebx(0xf);
0929
0930 if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
0931 cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
0932 cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
0933 u32 eax, ebx, ecx, edx;
0934
0935
0936 cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
0937
0938 c->x86_cache_max_rmid = ecx;
0939 c->x86_cache_occ_scale = ebx;
0940 c->x86_cache_mbm_width_offset = eax & 0xff;
0941
0942 if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset)
0943 c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
0944 }
0945 }
0946
0947 static int __init resctrl_late_init(void)
0948 {
0949 struct rdt_resource *r;
0950 int state, ret;
0951
0952
0953
0954
0955
0956 rdt_init_res_defs();
0957
0958 check_quirks();
0959
0960 if (!get_rdt_resources())
0961 return -ENODEV;
0962
0963 rdt_init_padding();
0964
0965 state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
0966 "x86/resctrl/cat:online:",
0967 resctrl_online_cpu, resctrl_offline_cpu);
0968 if (state < 0)
0969 return state;
0970
0971 ret = rdtgroup_init();
0972 if (ret) {
0973 cpuhp_remove_state(state);
0974 return ret;
0975 }
0976 rdt_online = state;
0977
0978 for_each_alloc_capable_rdt_resource(r)
0979 pr_info("%s allocation detected\n", r->name);
0980
0981 for_each_mon_capable_rdt_resource(r)
0982 pr_info("%s monitoring detected\n", r->name);
0983
0984 return 0;
0985 }
0986
0987 late_initcall(resctrl_late_init);
0988
0989 static void __exit resctrl_exit(void)
0990 {
0991 cpuhp_remove_state(rdt_online);
0992 rdtgroup_exit();
0993 }
0994
0995 __exitcall(resctrl_exit);