0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/bitfield.h>
0013 #include <linux/bsearch.h>
0014 #include <linux/kvm_host.h>
0015 #include <linux/mm.h>
0016 #include <linux/printk.h>
0017 #include <linux/uaccess.h>
0018
0019 #include <asm/cacheflush.h>
0020 #include <asm/cputype.h>
0021 #include <asm/debug-monitors.h>
0022 #include <asm/esr.h>
0023 #include <asm/kvm_arm.h>
0024 #include <asm/kvm_emulate.h>
0025 #include <asm/kvm_hyp.h>
0026 #include <asm/kvm_mmu.h>
0027 #include <asm/perf_event.h>
0028 #include <asm/sysreg.h>
0029
0030 #include <trace/events/kvm.h>
0031
0032 #include "sys_regs.h"
0033
0034 #include "trace.h"
0035
0036
0037
0038
0039
0040
0041
0042 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
0043
0044 static bool read_from_write_only(struct kvm_vcpu *vcpu,
0045 struct sys_reg_params *params,
0046 const struct sys_reg_desc *r)
0047 {
0048 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
0049 print_sys_reg_instr(params);
0050 kvm_inject_undefined(vcpu);
0051 return false;
0052 }
0053
0054 static bool write_to_read_only(struct kvm_vcpu *vcpu,
0055 struct sys_reg_params *params,
0056 const struct sys_reg_desc *r)
0057 {
0058 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
0059 print_sys_reg_instr(params);
0060 kvm_inject_undefined(vcpu);
0061 return false;
0062 }
0063
0064 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
0065 {
0066 u64 val = 0x8badf00d8badf00d;
0067
0068 if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
0069 __vcpu_read_sys_reg_from_cpu(reg, &val))
0070 return val;
0071
0072 return __vcpu_sys_reg(vcpu, reg);
0073 }
0074
0075 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
0076 {
0077 if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
0078 __vcpu_write_sys_reg_to_cpu(val, reg))
0079 return;
0080
0081 __vcpu_sys_reg(vcpu, reg) = val;
0082 }
0083
0084
0085 static u32 cache_levels;
0086
0087
0088 #define CSSELR_MAX 14
0089
0090
0091 static u32 get_ccsidr(u32 csselr)
0092 {
0093 u32 ccsidr;
0094
0095
0096 local_irq_disable();
0097 write_sysreg(csselr, csselr_el1);
0098 isb();
0099 ccsidr = read_sysreg(ccsidr_el1);
0100 local_irq_enable();
0101
0102 return ccsidr;
0103 }
0104
0105
0106
0107
0108 static bool access_dcsw(struct kvm_vcpu *vcpu,
0109 struct sys_reg_params *p,
0110 const struct sys_reg_desc *r)
0111 {
0112 if (!p->is_write)
0113 return read_from_write_only(vcpu, p, r);
0114
0115
0116
0117
0118
0119
0120
0121
0122 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
0123 kvm_set_way_flush(vcpu);
0124
0125 return true;
0126 }
0127
0128 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
0129 {
0130 switch (r->aarch32_map) {
0131 case AA32_LO:
0132 *mask = GENMASK_ULL(31, 0);
0133 *shift = 0;
0134 break;
0135 case AA32_HI:
0136 *mask = GENMASK_ULL(63, 32);
0137 *shift = 32;
0138 break;
0139 default:
0140 *mask = GENMASK_ULL(63, 0);
0141 *shift = 0;
0142 break;
0143 }
0144 }
0145
0146
0147
0148
0149
0150
0151 static bool access_vm_reg(struct kvm_vcpu *vcpu,
0152 struct sys_reg_params *p,
0153 const struct sys_reg_desc *r)
0154 {
0155 bool was_enabled = vcpu_has_cache_enabled(vcpu);
0156 u64 val, mask, shift;
0157
0158 BUG_ON(!p->is_write);
0159
0160 get_access_mask(r, &mask, &shift);
0161
0162 if (~mask) {
0163 val = vcpu_read_sys_reg(vcpu, r->reg);
0164 val &= ~mask;
0165 } else {
0166 val = 0;
0167 }
0168
0169 val |= (p->regval & (mask >> shift)) << shift;
0170 vcpu_write_sys_reg(vcpu, val, r->reg);
0171
0172 kvm_toggle_cache(vcpu, was_enabled);
0173 return true;
0174 }
0175
0176 static bool access_actlr(struct kvm_vcpu *vcpu,
0177 struct sys_reg_params *p,
0178 const struct sys_reg_desc *r)
0179 {
0180 u64 mask, shift;
0181
0182 if (p->is_write)
0183 return ignore_write(vcpu, p);
0184
0185 get_access_mask(r, &mask, &shift);
0186 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
0187
0188 return true;
0189 }
0190
0191
0192
0193
0194
0195
0196
0197 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
0198 struct sys_reg_params *p,
0199 const struct sys_reg_desc *r)
0200 {
0201 bool g1;
0202
0203 if (!p->is_write)
0204 return read_from_write_only(vcpu, p, r);
0205
0206
0207
0208
0209
0210
0211
0212
0213 if (p->Op0 == 0) {
0214 switch (p->Op1) {
0215 default:
0216 case 0:
0217 g1 = true;
0218 break;
0219 case 1:
0220 case 2:
0221 g1 = false;
0222 break;
0223 }
0224 } else {
0225 switch (p->Op2) {
0226 default:
0227 case 5:
0228 g1 = true;
0229 break;
0230 case 6:
0231 case 7:
0232 g1 = false;
0233 break;
0234 }
0235 }
0236
0237 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
0238
0239 return true;
0240 }
0241
0242 static bool access_gic_sre(struct kvm_vcpu *vcpu,
0243 struct sys_reg_params *p,
0244 const struct sys_reg_desc *r)
0245 {
0246 if (p->is_write)
0247 return ignore_write(vcpu, p);
0248
0249 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
0250 return true;
0251 }
0252
0253 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
0254 struct sys_reg_params *p,
0255 const struct sys_reg_desc *r)
0256 {
0257 if (p->is_write)
0258 return ignore_write(vcpu, p);
0259 else
0260 return read_zero(vcpu, p);
0261 }
0262
0263
0264
0265
0266
0267
0268
0269 static bool trap_loregion(struct kvm_vcpu *vcpu,
0270 struct sys_reg_params *p,
0271 const struct sys_reg_desc *r)
0272 {
0273 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
0274 u32 sr = reg_to_encoding(r);
0275
0276 if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
0277 kvm_inject_undefined(vcpu);
0278 return false;
0279 }
0280
0281 if (p->is_write && sr == SYS_LORID_EL1)
0282 return write_to_read_only(vcpu, p, r);
0283
0284 return trap_raz_wi(vcpu, p, r);
0285 }
0286
0287 static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
0288 struct sys_reg_params *p,
0289 const struct sys_reg_desc *r)
0290 {
0291 u64 oslsr;
0292
0293 if (!p->is_write)
0294 return read_from_write_only(vcpu, p, r);
0295
0296
0297 oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~SYS_OSLSR_OSLK;
0298 if (p->regval & SYS_OSLAR_OSLK)
0299 oslsr |= SYS_OSLSR_OSLK;
0300
0301 __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
0302 return true;
0303 }
0304
0305 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
0306 struct sys_reg_params *p,
0307 const struct sys_reg_desc *r)
0308 {
0309 if (p->is_write)
0310 return write_to_read_only(vcpu, p, r);
0311
0312 p->regval = __vcpu_sys_reg(vcpu, r->reg);
0313 return true;
0314 }
0315
0316 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
0317 u64 val)
0318 {
0319
0320
0321
0322
0323 if ((val ^ rd->val) & ~SYS_OSLSR_OSLK)
0324 return -EINVAL;
0325
0326 __vcpu_sys_reg(vcpu, rd->reg) = val;
0327 return 0;
0328 }
0329
0330 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
0331 struct sys_reg_params *p,
0332 const struct sys_reg_desc *r)
0333 {
0334 if (p->is_write) {
0335 return ignore_write(vcpu, p);
0336 } else {
0337 p->regval = read_sysreg(dbgauthstatus_el1);
0338 return true;
0339 }
0340 }
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
0370 struct sys_reg_params *p,
0371 const struct sys_reg_desc *r)
0372 {
0373 if (p->is_write) {
0374 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
0375 vcpu_set_flag(vcpu, DEBUG_DIRTY);
0376 } else {
0377 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
0378 }
0379
0380 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
0381
0382 return true;
0383 }
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394 static void reg_to_dbg(struct kvm_vcpu *vcpu,
0395 struct sys_reg_params *p,
0396 const struct sys_reg_desc *rd,
0397 u64 *dbg_reg)
0398 {
0399 u64 mask, shift, val;
0400
0401 get_access_mask(rd, &mask, &shift);
0402
0403 val = *dbg_reg;
0404 val &= ~mask;
0405 val |= (p->regval & (mask >> shift)) << shift;
0406 *dbg_reg = val;
0407
0408 vcpu_set_flag(vcpu, DEBUG_DIRTY);
0409 }
0410
0411 static void dbg_to_reg(struct kvm_vcpu *vcpu,
0412 struct sys_reg_params *p,
0413 const struct sys_reg_desc *rd,
0414 u64 *dbg_reg)
0415 {
0416 u64 mask, shift;
0417
0418 get_access_mask(rd, &mask, &shift);
0419 p->regval = (*dbg_reg & mask) >> shift;
0420 }
0421
0422 static bool trap_bvr(struct kvm_vcpu *vcpu,
0423 struct sys_reg_params *p,
0424 const struct sys_reg_desc *rd)
0425 {
0426 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
0427
0428 if (p->is_write)
0429 reg_to_dbg(vcpu, p, rd, dbg_reg);
0430 else
0431 dbg_to_reg(vcpu, p, rd, dbg_reg);
0432
0433 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
0434
0435 return true;
0436 }
0437
0438 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
0439 u64 val)
0440 {
0441 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
0442 return 0;
0443 }
0444
0445 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
0446 u64 *val)
0447 {
0448 *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
0449 return 0;
0450 }
0451
0452 static void reset_bvr(struct kvm_vcpu *vcpu,
0453 const struct sys_reg_desc *rd)
0454 {
0455 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
0456 }
0457
0458 static bool trap_bcr(struct kvm_vcpu *vcpu,
0459 struct sys_reg_params *p,
0460 const struct sys_reg_desc *rd)
0461 {
0462 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
0463
0464 if (p->is_write)
0465 reg_to_dbg(vcpu, p, rd, dbg_reg);
0466 else
0467 dbg_to_reg(vcpu, p, rd, dbg_reg);
0468
0469 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
0470
0471 return true;
0472 }
0473
0474 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
0475 u64 val)
0476 {
0477 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
0478 return 0;
0479 }
0480
0481 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
0482 u64 *val)
0483 {
0484 *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
0485 return 0;
0486 }
0487
0488 static void reset_bcr(struct kvm_vcpu *vcpu,
0489 const struct sys_reg_desc *rd)
0490 {
0491 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
0492 }
0493
0494 static bool trap_wvr(struct kvm_vcpu *vcpu,
0495 struct sys_reg_params *p,
0496 const struct sys_reg_desc *rd)
0497 {
0498 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
0499
0500 if (p->is_write)
0501 reg_to_dbg(vcpu, p, rd, dbg_reg);
0502 else
0503 dbg_to_reg(vcpu, p, rd, dbg_reg);
0504
0505 trace_trap_reg(__func__, rd->CRm, p->is_write,
0506 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
0507
0508 return true;
0509 }
0510
0511 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
0512 u64 val)
0513 {
0514 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
0515 return 0;
0516 }
0517
0518 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
0519 u64 *val)
0520 {
0521 *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
0522 return 0;
0523 }
0524
0525 static void reset_wvr(struct kvm_vcpu *vcpu,
0526 const struct sys_reg_desc *rd)
0527 {
0528 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
0529 }
0530
0531 static bool trap_wcr(struct kvm_vcpu *vcpu,
0532 struct sys_reg_params *p,
0533 const struct sys_reg_desc *rd)
0534 {
0535 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
0536
0537 if (p->is_write)
0538 reg_to_dbg(vcpu, p, rd, dbg_reg);
0539 else
0540 dbg_to_reg(vcpu, p, rd, dbg_reg);
0541
0542 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
0543
0544 return true;
0545 }
0546
0547 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
0548 u64 val)
0549 {
0550 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
0551 return 0;
0552 }
0553
0554 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
0555 u64 *val)
0556 {
0557 *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
0558 return 0;
0559 }
0560
0561 static void reset_wcr(struct kvm_vcpu *vcpu,
0562 const struct sys_reg_desc *rd)
0563 {
0564 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
0565 }
0566
0567 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
0568 {
0569 u64 amair = read_sysreg(amair_el1);
0570 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
0571 }
0572
0573 static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
0574 {
0575 u64 actlr = read_sysreg(actlr_el1);
0576 vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
0577 }
0578
0579 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
0580 {
0581 u64 mpidr;
0582
0583
0584
0585
0586
0587
0588
0589
0590 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
0591 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
0592 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
0593 vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
0594 }
0595
0596 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
0597 const struct sys_reg_desc *r)
0598 {
0599 if (kvm_vcpu_has_pmu(vcpu))
0600 return 0;
0601
0602 return REG_HIDDEN;
0603 }
0604
0605 static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
0606 {
0607 u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
0608
0609
0610 if (!kvm_arm_support_pmu_v3())
0611 return;
0612
0613 n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
0614 n &= ARMV8_PMU_PMCR_N_MASK;
0615 if (n)
0616 mask |= GENMASK(n - 1, 0);
0617
0618 reset_unknown(vcpu, r);
0619 __vcpu_sys_reg(vcpu, r->reg) &= mask;
0620 }
0621
0622 static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
0623 {
0624 reset_unknown(vcpu, r);
0625 __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
0626 }
0627
0628 static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
0629 {
0630 reset_unknown(vcpu, r);
0631 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
0632 }
0633
0634 static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
0635 {
0636 reset_unknown(vcpu, r);
0637 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
0638 }
0639
0640 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
0641 {
0642 u64 pmcr, val;
0643
0644
0645 if (!kvm_arm_support_pmu_v3())
0646 return;
0647
0648 pmcr = read_sysreg(pmcr_el0);
0649
0650
0651
0652
0653 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
0654 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
0655 if (!kvm_supports_32bit_el0())
0656 val |= ARMV8_PMU_PMCR_LC;
0657 __vcpu_sys_reg(vcpu, r->reg) = val;
0658 }
0659
0660 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
0661 {
0662 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
0663 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
0664
0665 if (!enabled)
0666 kvm_inject_undefined(vcpu);
0667
0668 return !enabled;
0669 }
0670
0671 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
0672 {
0673 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
0674 }
0675
0676 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
0677 {
0678 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
0679 }
0680
0681 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
0682 {
0683 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
0684 }
0685
0686 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
0687 {
0688 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
0689 }
0690
0691 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
0692 const struct sys_reg_desc *r)
0693 {
0694 u64 val;
0695
0696 if (pmu_access_el0_disabled(vcpu))
0697 return false;
0698
0699 if (p->is_write) {
0700
0701 val = __vcpu_sys_reg(vcpu, PMCR_EL0);
0702 val &= ~ARMV8_PMU_PMCR_MASK;
0703 val |= p->regval & ARMV8_PMU_PMCR_MASK;
0704 if (!kvm_supports_32bit_el0())
0705 val |= ARMV8_PMU_PMCR_LC;
0706 __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
0707 kvm_pmu_handle_pmcr(vcpu, val);
0708 kvm_vcpu_pmu_restore_guest(vcpu);
0709 } else {
0710
0711 val = __vcpu_sys_reg(vcpu, PMCR_EL0)
0712 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
0713 p->regval = val;
0714 }
0715
0716 return true;
0717 }
0718
0719 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
0720 const struct sys_reg_desc *r)
0721 {
0722 if (pmu_access_event_counter_el0_disabled(vcpu))
0723 return false;
0724
0725 if (p->is_write)
0726 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
0727 else
0728
0729 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
0730 & ARMV8_PMU_COUNTER_MASK;
0731
0732 return true;
0733 }
0734
0735 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
0736 const struct sys_reg_desc *r)
0737 {
0738 u64 pmceid, mask, shift;
0739
0740 BUG_ON(p->is_write);
0741
0742 if (pmu_access_el0_disabled(vcpu))
0743 return false;
0744
0745 get_access_mask(r, &mask, &shift);
0746
0747 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
0748 pmceid &= mask;
0749 pmceid >>= shift;
0750
0751 p->regval = pmceid;
0752
0753 return true;
0754 }
0755
0756 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
0757 {
0758 u64 pmcr, val;
0759
0760 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
0761 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
0762 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
0763 kvm_inject_undefined(vcpu);
0764 return false;
0765 }
0766
0767 return true;
0768 }
0769
0770 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
0771 struct sys_reg_params *p,
0772 const struct sys_reg_desc *r)
0773 {
0774 u64 idx = ~0UL;
0775
0776 if (r->CRn == 9 && r->CRm == 13) {
0777 if (r->Op2 == 2) {
0778
0779 if (pmu_access_event_counter_el0_disabled(vcpu))
0780 return false;
0781
0782 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
0783 & ARMV8_PMU_COUNTER_MASK;
0784 } else if (r->Op2 == 0) {
0785
0786 if (pmu_access_cycle_counter_el0_disabled(vcpu))
0787 return false;
0788
0789 idx = ARMV8_PMU_CYCLE_IDX;
0790 }
0791 } else if (r->CRn == 0 && r->CRm == 9) {
0792
0793 if (pmu_access_event_counter_el0_disabled(vcpu))
0794 return false;
0795
0796 idx = ARMV8_PMU_CYCLE_IDX;
0797 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
0798
0799 if (pmu_access_event_counter_el0_disabled(vcpu))
0800 return false;
0801
0802 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
0803 }
0804
0805
0806 WARN_ON(idx == ~0UL);
0807
0808 if (!pmu_counter_idx_valid(vcpu, idx))
0809 return false;
0810
0811 if (p->is_write) {
0812 if (pmu_access_el0_disabled(vcpu))
0813 return false;
0814
0815 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
0816 } else {
0817 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
0818 }
0819
0820 return true;
0821 }
0822
0823 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
0824 const struct sys_reg_desc *r)
0825 {
0826 u64 idx, reg;
0827
0828 if (pmu_access_el0_disabled(vcpu))
0829 return false;
0830
0831 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
0832
0833 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
0834 reg = PMEVTYPER0_EL0 + idx;
0835 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
0836 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
0837 if (idx == ARMV8_PMU_CYCLE_IDX)
0838 reg = PMCCFILTR_EL0;
0839 else
0840
0841 reg = PMEVTYPER0_EL0 + idx;
0842 } else {
0843 BUG();
0844 }
0845
0846 if (!pmu_counter_idx_valid(vcpu, idx))
0847 return false;
0848
0849 if (p->is_write) {
0850 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
0851 __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
0852 kvm_vcpu_pmu_restore_guest(vcpu);
0853 } else {
0854 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
0855 }
0856
0857 return true;
0858 }
0859
0860 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
0861 const struct sys_reg_desc *r)
0862 {
0863 u64 val, mask;
0864
0865 if (pmu_access_el0_disabled(vcpu))
0866 return false;
0867
0868 mask = kvm_pmu_valid_counter_mask(vcpu);
0869 if (p->is_write) {
0870 val = p->regval & mask;
0871 if (r->Op2 & 0x1) {
0872
0873 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
0874 kvm_pmu_enable_counter_mask(vcpu, val);
0875 kvm_vcpu_pmu_restore_guest(vcpu);
0876 } else {
0877
0878 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
0879 kvm_pmu_disable_counter_mask(vcpu, val);
0880 }
0881 } else {
0882 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
0883 }
0884
0885 return true;
0886 }
0887
0888 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
0889 const struct sys_reg_desc *r)
0890 {
0891 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
0892
0893 if (check_pmu_access_disabled(vcpu, 0))
0894 return false;
0895
0896 if (p->is_write) {
0897 u64 val = p->regval & mask;
0898
0899 if (r->Op2 & 0x1)
0900
0901 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
0902 else
0903
0904 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
0905 } else {
0906 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
0907 }
0908
0909 return true;
0910 }
0911
0912 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
0913 const struct sys_reg_desc *r)
0914 {
0915 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
0916
0917 if (pmu_access_el0_disabled(vcpu))
0918 return false;
0919
0920 if (p->is_write) {
0921 if (r->CRm & 0x2)
0922
0923 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
0924 else
0925
0926 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
0927 } else {
0928 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
0929 }
0930
0931 return true;
0932 }
0933
0934 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
0935 const struct sys_reg_desc *r)
0936 {
0937 u64 mask;
0938
0939 if (!p->is_write)
0940 return read_from_write_only(vcpu, p, r);
0941
0942 if (pmu_write_swinc_el0_disabled(vcpu))
0943 return false;
0944
0945 mask = kvm_pmu_valid_counter_mask(vcpu);
0946 kvm_pmu_software_increment(vcpu, p->regval & mask);
0947 return true;
0948 }
0949
0950 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
0951 const struct sys_reg_desc *r)
0952 {
0953 if (p->is_write) {
0954 if (!vcpu_mode_priv(vcpu)) {
0955 kvm_inject_undefined(vcpu);
0956 return false;
0957 }
0958
0959 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
0960 p->regval & ARMV8_PMU_USERENR_MASK;
0961 } else {
0962 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
0963 & ARMV8_PMU_USERENR_MASK;
0964 }
0965
0966 return true;
0967 }
0968
0969
0970 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
0971 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
0972 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
0973 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
0974 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
0975 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
0976 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
0977 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
0978 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
0979
0980 #define PMU_SYS_REG(r) \
0981 SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
0982
0983
0984 #define PMU_PMEVCNTR_EL0(n) \
0985 { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
0986 .reset = reset_pmevcntr, \
0987 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
0988
0989
0990 #define PMU_PMEVTYPER_EL0(n) \
0991 { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
0992 .reset = reset_pmevtyper, \
0993 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
0994
0995 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
0996 const struct sys_reg_desc *r)
0997 {
0998 kvm_inject_undefined(vcpu);
0999
1000 return false;
1001 }
1002
1003
1004 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1005 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1006 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1007 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1008
1009 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1010 const struct sys_reg_desc *rd)
1011 {
1012 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1013 }
1014
1015
1016
1017
1018
1019
1020
1021 #define __PTRAUTH_KEY(k) \
1022 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1023 .visibility = ptrauth_visibility}
1024
1025 #define PTRAUTH_KEY(k) \
1026 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1027 __PTRAUTH_KEY(k ## KEYHI_EL1)
1028
1029 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1030 struct sys_reg_params *p,
1031 const struct sys_reg_desc *r)
1032 {
1033 enum kvm_arch_timers tmr;
1034 enum kvm_arch_timer_regs treg;
1035 u64 reg = reg_to_encoding(r);
1036
1037 switch (reg) {
1038 case SYS_CNTP_TVAL_EL0:
1039 case SYS_AARCH32_CNTP_TVAL:
1040 tmr = TIMER_PTIMER;
1041 treg = TIMER_REG_TVAL;
1042 break;
1043 case SYS_CNTP_CTL_EL0:
1044 case SYS_AARCH32_CNTP_CTL:
1045 tmr = TIMER_PTIMER;
1046 treg = TIMER_REG_CTL;
1047 break;
1048 case SYS_CNTP_CVAL_EL0:
1049 case SYS_AARCH32_CNTP_CVAL:
1050 tmr = TIMER_PTIMER;
1051 treg = TIMER_REG_CVAL;
1052 break;
1053 default:
1054 BUG();
1055 }
1056
1057 if (p->is_write)
1058 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1059 else
1060 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1061
1062 return true;
1063 }
1064
1065
1066 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
1067 struct sys_reg_desc const *r, bool raz)
1068 {
1069 u32 id = reg_to_encoding(r);
1070 u64 val;
1071
1072 if (raz)
1073 return 0;
1074
1075 val = read_sanitised_ftr_reg(id);
1076
1077 switch (id) {
1078 case SYS_ID_AA64PFR0_EL1:
1079 if (!vcpu_has_sve(vcpu))
1080 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE);
1081 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU);
1082 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2);
1083 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
1084 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
1085 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
1086 if (kvm_vgic_global_state.type == VGIC_V3) {
1087 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC);
1088 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1);
1089 }
1090 break;
1091 case SYS_ID_AA64PFR1_EL1:
1092 if (!kvm_has_mte(vcpu->kvm))
1093 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
1094
1095 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_SME);
1096 break;
1097 case SYS_ID_AA64ISAR1_EL1:
1098 if (!vcpu_has_ptrauth(vcpu))
1099 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
1100 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
1101 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
1102 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
1103 break;
1104 case SYS_ID_AA64ISAR2_EL1:
1105 if (!vcpu_has_ptrauth(vcpu))
1106 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
1107 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
1108 if (!cpus_have_final_cap(ARM64_HAS_WFXT))
1109 val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
1110 break;
1111 case SYS_ID_AA64DFR0_EL1:
1112
1113 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER);
1114 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6);
1115
1116 val = cpuid_feature_cap_perfmon_field(val,
1117 ID_AA64DFR0_PMUVER_SHIFT,
1118 kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
1119
1120 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER);
1121 break;
1122 case SYS_ID_DFR0_EL1:
1123
1124 val = cpuid_feature_cap_perfmon_field(val,
1125 ID_DFR0_PERFMON_SHIFT,
1126 kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0);
1127 break;
1128 }
1129
1130 return val;
1131 }
1132
1133 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1134 const struct sys_reg_desc *r)
1135 {
1136 u32 id = reg_to_encoding(r);
1137
1138 switch (id) {
1139 case SYS_ID_AA64ZFR0_EL1:
1140 if (!vcpu_has_sve(vcpu))
1141 return REG_RAZ;
1142 break;
1143 }
1144
1145 return 0;
1146 }
1147
1148
1149
1150 static bool __access_id_reg(struct kvm_vcpu *vcpu,
1151 struct sys_reg_params *p,
1152 const struct sys_reg_desc *r,
1153 bool raz)
1154 {
1155 if (p->is_write)
1156 return write_to_read_only(vcpu, p, r);
1157
1158 p->regval = read_id_reg(vcpu, r, raz);
1159 return true;
1160 }
1161
1162 static bool access_id_reg(struct kvm_vcpu *vcpu,
1163 struct sys_reg_params *p,
1164 const struct sys_reg_desc *r)
1165 {
1166 bool raz = sysreg_visible_as_raz(vcpu, r);
1167
1168 return __access_id_reg(vcpu, p, r, raz);
1169 }
1170
1171 static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
1172 struct sys_reg_params *p,
1173 const struct sys_reg_desc *r)
1174 {
1175 return __access_id_reg(vcpu, p, r, true);
1176 }
1177
1178
1179 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1180 const struct sys_reg_desc *rd)
1181 {
1182 if (vcpu_has_sve(vcpu))
1183 return 0;
1184
1185 return REG_HIDDEN;
1186 }
1187
1188 static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1189 const struct sys_reg_desc *rd,
1190 u64 val)
1191 {
1192 u8 csv2, csv3;
1193
1194
1195
1196
1197
1198
1199 csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT);
1200 if (csv2 > 1 ||
1201 (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
1202 return -EINVAL;
1203
1204
1205 csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV3_SHIFT);
1206 if (csv3 > 1 ||
1207 (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
1208 return -EINVAL;
1209
1210
1211 val ^= read_id_reg(vcpu, rd, false);
1212 val &= ~((0xFUL << ID_AA64PFR0_CSV2_SHIFT) |
1213 (0xFUL << ID_AA64PFR0_CSV3_SHIFT));
1214 if (val)
1215 return -EINVAL;
1216
1217 vcpu->kvm->arch.pfr0_csv2 = csv2;
1218 vcpu->kvm->arch.pfr0_csv3 = csv3;
1219
1220 return 0;
1221 }
1222
1223
1224
1225
1226
1227
1228
1229
1230 static int __get_id_reg(const struct kvm_vcpu *vcpu,
1231 const struct sys_reg_desc *rd, u64 *val,
1232 bool raz)
1233 {
1234 *val = read_id_reg(vcpu, rd, raz);
1235 return 0;
1236 }
1237
1238 static int __set_id_reg(const struct kvm_vcpu *vcpu,
1239 const struct sys_reg_desc *rd, u64 val,
1240 bool raz)
1241 {
1242
1243 if (val != read_id_reg(vcpu, rd, raz))
1244 return -EINVAL;
1245
1246 return 0;
1247 }
1248
1249 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1250 u64 *val)
1251 {
1252 bool raz = sysreg_visible_as_raz(vcpu, rd);
1253
1254 return __get_id_reg(vcpu, rd, val, raz);
1255 }
1256
1257 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1258 u64 val)
1259 {
1260 bool raz = sysreg_visible_as_raz(vcpu, rd);
1261
1262 return __set_id_reg(vcpu, rd, val, raz);
1263 }
1264
1265 static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1266 u64 val)
1267 {
1268 return __set_id_reg(vcpu, rd, val, true);
1269 }
1270
1271 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1272 u64 *val)
1273 {
1274 *val = 0;
1275 return 0;
1276 }
1277
1278 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1279 u64 val)
1280 {
1281 return 0;
1282 }
1283
1284 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1285 const struct sys_reg_desc *r)
1286 {
1287 if (p->is_write)
1288 return write_to_read_only(vcpu, p, r);
1289
1290 p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1291 return true;
1292 }
1293
1294 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1295 const struct sys_reg_desc *r)
1296 {
1297 if (p->is_write)
1298 return write_to_read_only(vcpu, p, r);
1299
1300 p->regval = read_sysreg(clidr_el1);
1301 return true;
1302 }
1303
1304 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1305 const struct sys_reg_desc *r)
1306 {
1307 int reg = r->reg;
1308
1309 if (p->is_write)
1310 vcpu_write_sys_reg(vcpu, p->regval, reg);
1311 else
1312 p->regval = vcpu_read_sys_reg(vcpu, reg);
1313 return true;
1314 }
1315
1316 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1317 const struct sys_reg_desc *r)
1318 {
1319 u32 csselr;
1320
1321 if (p->is_write)
1322 return write_to_read_only(vcpu, p, r);
1323
1324 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1325 p->regval = get_ccsidr(csselr);
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339 if (!(csselr & 1))
1340 p->regval &= ~GENMASK(27, 3);
1341 return true;
1342 }
1343
1344 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
1345 const struct sys_reg_desc *rd)
1346 {
1347 if (kvm_has_mte(vcpu->kvm))
1348 return 0;
1349
1350 return REG_HIDDEN;
1351 }
1352
1353 #define MTE_REG(name) { \
1354 SYS_DESC(SYS_##name), \
1355 .access = undef_access, \
1356 .reset = reset_unknown, \
1357 .reg = name, \
1358 .visibility = mte_visibility, \
1359 }
1360
1361
1362 #define ID_SANITISED(name) { \
1363 SYS_DESC(SYS_##name), \
1364 .access = access_id_reg, \
1365 .get_user = get_id_reg, \
1366 .set_user = set_id_reg, \
1367 .visibility = id_visibility, \
1368 }
1369
1370
1371
1372
1373
1374
1375 #define ID_UNALLOCATED(crm, op2) { \
1376 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1377 .access = access_raz_id_reg, \
1378 .get_user = get_raz_reg, \
1379 .set_user = set_raz_id_reg, \
1380 }
1381
1382
1383
1384
1385
1386
1387 #define ID_HIDDEN(name) { \
1388 SYS_DESC(SYS_##name), \
1389 .access = access_raz_id_reg, \
1390 .get_user = get_raz_reg, \
1391 .set_user = set_raz_id_reg, \
1392 }
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405 static const struct sys_reg_desc sys_reg_descs[] = {
1406 { SYS_DESC(SYS_DC_ISW), access_dcsw },
1407 { SYS_DESC(SYS_DC_CSW), access_dcsw },
1408 { SYS_DESC(SYS_DC_CISW), access_dcsw },
1409
1410 DBG_BCR_BVR_WCR_WVR_EL1(0),
1411 DBG_BCR_BVR_WCR_WVR_EL1(1),
1412 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1413 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1414 DBG_BCR_BVR_WCR_WVR_EL1(2),
1415 DBG_BCR_BVR_WCR_WVR_EL1(3),
1416 DBG_BCR_BVR_WCR_WVR_EL1(4),
1417 DBG_BCR_BVR_WCR_WVR_EL1(5),
1418 DBG_BCR_BVR_WCR_WVR_EL1(6),
1419 DBG_BCR_BVR_WCR_WVR_EL1(7),
1420 DBG_BCR_BVR_WCR_WVR_EL1(8),
1421 DBG_BCR_BVR_WCR_WVR_EL1(9),
1422 DBG_BCR_BVR_WCR_WVR_EL1(10),
1423 DBG_BCR_BVR_WCR_WVR_EL1(11),
1424 DBG_BCR_BVR_WCR_WVR_EL1(12),
1425 DBG_BCR_BVR_WCR_WVR_EL1(13),
1426 DBG_BCR_BVR_WCR_WVR_EL1(14),
1427 DBG_BCR_BVR_WCR_WVR_EL1(15),
1428
1429 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1430 { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
1431 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
1432 SYS_OSLSR_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
1433 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1434 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1435 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1436 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1437 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1438
1439 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1440 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1441
1442 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1443
1444 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1445
1446 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1447
1448
1449
1450
1451
1452
1453
1454
1455 ID_SANITISED(ID_PFR0_EL1),
1456 ID_SANITISED(ID_PFR1_EL1),
1457 ID_SANITISED(ID_DFR0_EL1),
1458 ID_HIDDEN(ID_AFR0_EL1),
1459 ID_SANITISED(ID_MMFR0_EL1),
1460 ID_SANITISED(ID_MMFR1_EL1),
1461 ID_SANITISED(ID_MMFR2_EL1),
1462 ID_SANITISED(ID_MMFR3_EL1),
1463
1464
1465 ID_SANITISED(ID_ISAR0_EL1),
1466 ID_SANITISED(ID_ISAR1_EL1),
1467 ID_SANITISED(ID_ISAR2_EL1),
1468 ID_SANITISED(ID_ISAR3_EL1),
1469 ID_SANITISED(ID_ISAR4_EL1),
1470 ID_SANITISED(ID_ISAR5_EL1),
1471 ID_SANITISED(ID_MMFR4_EL1),
1472 ID_SANITISED(ID_ISAR6_EL1),
1473
1474
1475 ID_SANITISED(MVFR0_EL1),
1476 ID_SANITISED(MVFR1_EL1),
1477 ID_SANITISED(MVFR2_EL1),
1478 ID_UNALLOCATED(3,3),
1479 ID_SANITISED(ID_PFR2_EL1),
1480 ID_HIDDEN(ID_DFR1_EL1),
1481 ID_SANITISED(ID_MMFR5_EL1),
1482 ID_UNALLOCATED(3,7),
1483
1484
1485
1486 { SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
1487 .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
1488 ID_SANITISED(ID_AA64PFR1_EL1),
1489 ID_UNALLOCATED(4,2),
1490 ID_UNALLOCATED(4,3),
1491 ID_SANITISED(ID_AA64ZFR0_EL1),
1492 ID_HIDDEN(ID_AA64SMFR0_EL1),
1493 ID_UNALLOCATED(4,6),
1494 ID_UNALLOCATED(4,7),
1495
1496
1497 ID_SANITISED(ID_AA64DFR0_EL1),
1498 ID_SANITISED(ID_AA64DFR1_EL1),
1499 ID_UNALLOCATED(5,2),
1500 ID_UNALLOCATED(5,3),
1501 ID_HIDDEN(ID_AA64AFR0_EL1),
1502 ID_HIDDEN(ID_AA64AFR1_EL1),
1503 ID_UNALLOCATED(5,6),
1504 ID_UNALLOCATED(5,7),
1505
1506
1507 ID_SANITISED(ID_AA64ISAR0_EL1),
1508 ID_SANITISED(ID_AA64ISAR1_EL1),
1509 ID_SANITISED(ID_AA64ISAR2_EL1),
1510 ID_UNALLOCATED(6,3),
1511 ID_UNALLOCATED(6,4),
1512 ID_UNALLOCATED(6,5),
1513 ID_UNALLOCATED(6,6),
1514 ID_UNALLOCATED(6,7),
1515
1516
1517 ID_SANITISED(ID_AA64MMFR0_EL1),
1518 ID_SANITISED(ID_AA64MMFR1_EL1),
1519 ID_SANITISED(ID_AA64MMFR2_EL1),
1520 ID_UNALLOCATED(7,3),
1521 ID_UNALLOCATED(7,4),
1522 ID_UNALLOCATED(7,5),
1523 ID_UNALLOCATED(7,6),
1524 ID_UNALLOCATED(7,7),
1525
1526 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1527 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
1528 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1529
1530 MTE_REG(RGSR_EL1),
1531 MTE_REG(GCR_EL1),
1532
1533 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
1534 { SYS_DESC(SYS_TRFCR_EL1), undef_access },
1535 { SYS_DESC(SYS_SMPRI_EL1), undef_access },
1536 { SYS_DESC(SYS_SMCR_EL1), undef_access },
1537 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1538 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1539 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1540
1541 PTRAUTH_KEY(APIA),
1542 PTRAUTH_KEY(APIB),
1543 PTRAUTH_KEY(APDA),
1544 PTRAUTH_KEY(APDB),
1545 PTRAUTH_KEY(APGA),
1546
1547 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1548 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1549 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1550
1551 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1552 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1553 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1554 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1555 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1556 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1557 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1558 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1559
1560 MTE_REG(TFSR_EL1),
1561 MTE_REG(TFSRE0_EL1),
1562
1563 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1564 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1565
1566 { SYS_DESC(SYS_PMSCR_EL1), undef_access },
1567 { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
1568 { SYS_DESC(SYS_PMSICR_EL1), undef_access },
1569 { SYS_DESC(SYS_PMSIRR_EL1), undef_access },
1570 { SYS_DESC(SYS_PMSFCR_EL1), undef_access },
1571 { SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
1572 { SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
1573 { SYS_DESC(SYS_PMSIDR_EL1), undef_access },
1574 { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
1575 { SYS_DESC(SYS_PMBPTR_EL1), undef_access },
1576 { SYS_DESC(SYS_PMBSR_EL1), undef_access },
1577
1578
1579 { PMU_SYS_REG(SYS_PMINTENSET_EL1),
1580 .access = access_pminten, .reg = PMINTENSET_EL1 },
1581 { PMU_SYS_REG(SYS_PMINTENCLR_EL1),
1582 .access = access_pminten, .reg = PMINTENSET_EL1 },
1583 { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
1584
1585 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1586 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1587
1588 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1589 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1590 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1591 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1592 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1593
1594 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1595 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1596
1597 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1598 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1599 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1600 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1601 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1602 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1603 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1604 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1605 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1606 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1607 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1608 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1609
1610 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1611 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1612
1613 { SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
1614
1615 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1616
1617 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
1618 { SYS_DESC(SYS_CLIDR_EL1), access_clidr },
1619 { SYS_DESC(SYS_SMIDR_EL1), undef_access },
1620 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
1621 { SYS_DESC(SYS_CTR_EL0), access_ctr },
1622 { SYS_DESC(SYS_SVCR), undef_access },
1623
1624 { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
1625 .reset = reset_pmcr, .reg = PMCR_EL0 },
1626 { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
1627 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
1628 { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
1629 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
1630 { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
1631 .access = access_pmovs, .reg = PMOVSSET_EL0 },
1632
1633
1634
1635
1636 { PMU_SYS_REG(SYS_PMSWINC_EL0),
1637 .get_user = get_raz_reg, .set_user = set_wi_reg,
1638 .access = access_pmswinc, .reset = NULL },
1639 { PMU_SYS_REG(SYS_PMSELR_EL0),
1640 .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
1641 { PMU_SYS_REG(SYS_PMCEID0_EL0),
1642 .access = access_pmceid, .reset = NULL },
1643 { PMU_SYS_REG(SYS_PMCEID1_EL0),
1644 .access = access_pmceid, .reset = NULL },
1645 { PMU_SYS_REG(SYS_PMCCNTR_EL0),
1646 .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
1647 { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
1648 .access = access_pmu_evtyper, .reset = NULL },
1649 { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
1650 .access = access_pmu_evcntr, .reset = NULL },
1651
1652
1653
1654
1655 { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
1656 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
1657 { PMU_SYS_REG(SYS_PMOVSSET_EL0),
1658 .access = access_pmovs, .reg = PMOVSSET_EL0 },
1659
1660 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1661 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1662 { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
1663
1664 { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
1665
1666 { SYS_DESC(SYS_AMCR_EL0), undef_access },
1667 { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
1668 { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
1669 { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
1670 { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
1671 { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
1672 { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
1673 { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
1674 AMU_AMEVCNTR0_EL0(0),
1675 AMU_AMEVCNTR0_EL0(1),
1676 AMU_AMEVCNTR0_EL0(2),
1677 AMU_AMEVCNTR0_EL0(3),
1678 AMU_AMEVCNTR0_EL0(4),
1679 AMU_AMEVCNTR0_EL0(5),
1680 AMU_AMEVCNTR0_EL0(6),
1681 AMU_AMEVCNTR0_EL0(7),
1682 AMU_AMEVCNTR0_EL0(8),
1683 AMU_AMEVCNTR0_EL0(9),
1684 AMU_AMEVCNTR0_EL0(10),
1685 AMU_AMEVCNTR0_EL0(11),
1686 AMU_AMEVCNTR0_EL0(12),
1687 AMU_AMEVCNTR0_EL0(13),
1688 AMU_AMEVCNTR0_EL0(14),
1689 AMU_AMEVCNTR0_EL0(15),
1690 AMU_AMEVTYPER0_EL0(0),
1691 AMU_AMEVTYPER0_EL0(1),
1692 AMU_AMEVTYPER0_EL0(2),
1693 AMU_AMEVTYPER0_EL0(3),
1694 AMU_AMEVTYPER0_EL0(4),
1695 AMU_AMEVTYPER0_EL0(5),
1696 AMU_AMEVTYPER0_EL0(6),
1697 AMU_AMEVTYPER0_EL0(7),
1698 AMU_AMEVTYPER0_EL0(8),
1699 AMU_AMEVTYPER0_EL0(9),
1700 AMU_AMEVTYPER0_EL0(10),
1701 AMU_AMEVTYPER0_EL0(11),
1702 AMU_AMEVTYPER0_EL0(12),
1703 AMU_AMEVTYPER0_EL0(13),
1704 AMU_AMEVTYPER0_EL0(14),
1705 AMU_AMEVTYPER0_EL0(15),
1706 AMU_AMEVCNTR1_EL0(0),
1707 AMU_AMEVCNTR1_EL0(1),
1708 AMU_AMEVCNTR1_EL0(2),
1709 AMU_AMEVCNTR1_EL0(3),
1710 AMU_AMEVCNTR1_EL0(4),
1711 AMU_AMEVCNTR1_EL0(5),
1712 AMU_AMEVCNTR1_EL0(6),
1713 AMU_AMEVCNTR1_EL0(7),
1714 AMU_AMEVCNTR1_EL0(8),
1715 AMU_AMEVCNTR1_EL0(9),
1716 AMU_AMEVCNTR1_EL0(10),
1717 AMU_AMEVCNTR1_EL0(11),
1718 AMU_AMEVCNTR1_EL0(12),
1719 AMU_AMEVCNTR1_EL0(13),
1720 AMU_AMEVCNTR1_EL0(14),
1721 AMU_AMEVCNTR1_EL0(15),
1722 AMU_AMEVTYPER1_EL0(0),
1723 AMU_AMEVTYPER1_EL0(1),
1724 AMU_AMEVTYPER1_EL0(2),
1725 AMU_AMEVTYPER1_EL0(3),
1726 AMU_AMEVTYPER1_EL0(4),
1727 AMU_AMEVTYPER1_EL0(5),
1728 AMU_AMEVTYPER1_EL0(6),
1729 AMU_AMEVTYPER1_EL0(7),
1730 AMU_AMEVTYPER1_EL0(8),
1731 AMU_AMEVTYPER1_EL0(9),
1732 AMU_AMEVTYPER1_EL0(10),
1733 AMU_AMEVTYPER1_EL0(11),
1734 AMU_AMEVTYPER1_EL0(12),
1735 AMU_AMEVTYPER1_EL0(13),
1736 AMU_AMEVTYPER1_EL0(14),
1737 AMU_AMEVTYPER1_EL0(15),
1738
1739 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
1740 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
1741 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
1742
1743
1744 PMU_PMEVCNTR_EL0(0),
1745 PMU_PMEVCNTR_EL0(1),
1746 PMU_PMEVCNTR_EL0(2),
1747 PMU_PMEVCNTR_EL0(3),
1748 PMU_PMEVCNTR_EL0(4),
1749 PMU_PMEVCNTR_EL0(5),
1750 PMU_PMEVCNTR_EL0(6),
1751 PMU_PMEVCNTR_EL0(7),
1752 PMU_PMEVCNTR_EL0(8),
1753 PMU_PMEVCNTR_EL0(9),
1754 PMU_PMEVCNTR_EL0(10),
1755 PMU_PMEVCNTR_EL0(11),
1756 PMU_PMEVCNTR_EL0(12),
1757 PMU_PMEVCNTR_EL0(13),
1758 PMU_PMEVCNTR_EL0(14),
1759 PMU_PMEVCNTR_EL0(15),
1760 PMU_PMEVCNTR_EL0(16),
1761 PMU_PMEVCNTR_EL0(17),
1762 PMU_PMEVCNTR_EL0(18),
1763 PMU_PMEVCNTR_EL0(19),
1764 PMU_PMEVCNTR_EL0(20),
1765 PMU_PMEVCNTR_EL0(21),
1766 PMU_PMEVCNTR_EL0(22),
1767 PMU_PMEVCNTR_EL0(23),
1768 PMU_PMEVCNTR_EL0(24),
1769 PMU_PMEVCNTR_EL0(25),
1770 PMU_PMEVCNTR_EL0(26),
1771 PMU_PMEVCNTR_EL0(27),
1772 PMU_PMEVCNTR_EL0(28),
1773 PMU_PMEVCNTR_EL0(29),
1774 PMU_PMEVCNTR_EL0(30),
1775
1776 PMU_PMEVTYPER_EL0(0),
1777 PMU_PMEVTYPER_EL0(1),
1778 PMU_PMEVTYPER_EL0(2),
1779 PMU_PMEVTYPER_EL0(3),
1780 PMU_PMEVTYPER_EL0(4),
1781 PMU_PMEVTYPER_EL0(5),
1782 PMU_PMEVTYPER_EL0(6),
1783 PMU_PMEVTYPER_EL0(7),
1784 PMU_PMEVTYPER_EL0(8),
1785 PMU_PMEVTYPER_EL0(9),
1786 PMU_PMEVTYPER_EL0(10),
1787 PMU_PMEVTYPER_EL0(11),
1788 PMU_PMEVTYPER_EL0(12),
1789 PMU_PMEVTYPER_EL0(13),
1790 PMU_PMEVTYPER_EL0(14),
1791 PMU_PMEVTYPER_EL0(15),
1792 PMU_PMEVTYPER_EL0(16),
1793 PMU_PMEVTYPER_EL0(17),
1794 PMU_PMEVTYPER_EL0(18),
1795 PMU_PMEVTYPER_EL0(19),
1796 PMU_PMEVTYPER_EL0(20),
1797 PMU_PMEVTYPER_EL0(21),
1798 PMU_PMEVTYPER_EL0(22),
1799 PMU_PMEVTYPER_EL0(23),
1800 PMU_PMEVTYPER_EL0(24),
1801 PMU_PMEVTYPER_EL0(25),
1802 PMU_PMEVTYPER_EL0(26),
1803 PMU_PMEVTYPER_EL0(27),
1804 PMU_PMEVTYPER_EL0(28),
1805 PMU_PMEVTYPER_EL0(29),
1806 PMU_PMEVTYPER_EL0(30),
1807
1808
1809
1810
1811 { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
1812 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
1813
1814 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1815 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1816 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
1817 };
1818
1819 static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
1820 struct sys_reg_params *p,
1821 const struct sys_reg_desc *r)
1822 {
1823 if (p->is_write) {
1824 return ignore_write(vcpu, p);
1825 } else {
1826 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1827 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1828 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1829
1830 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1831 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1832 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1833 | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
1834 return true;
1835 }
1836 }
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847 #define DBG_BCR_BVR_WCR_WVR(n) \
1848 \
1849 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1850 \
1851 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1852 \
1853 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1854 \
1855 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1856
1857 #define DBGBXVR(n) \
1858 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
1859
1860
1861
1862
1863
1864
1865 static const struct sys_reg_desc cp14_regs[] = {
1866
1867 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
1868
1869 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1870
1871 DBG_BCR_BVR_WCR_WVR(0),
1872
1873 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1874 DBG_BCR_BVR_WCR_WVR(1),
1875
1876 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
1877
1878 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
1879 DBG_BCR_BVR_WCR_WVR(2),
1880
1881 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1882
1883 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1884 DBG_BCR_BVR_WCR_WVR(3),
1885 DBG_BCR_BVR_WCR_WVR(4),
1886 DBG_BCR_BVR_WCR_WVR(5),
1887
1888 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1889
1890 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1891 DBG_BCR_BVR_WCR_WVR(6),
1892
1893 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
1894 DBG_BCR_BVR_WCR_WVR(7),
1895 DBG_BCR_BVR_WCR_WVR(8),
1896 DBG_BCR_BVR_WCR_WVR(9),
1897 DBG_BCR_BVR_WCR_WVR(10),
1898 DBG_BCR_BVR_WCR_WVR(11),
1899 DBG_BCR_BVR_WCR_WVR(12),
1900 DBG_BCR_BVR_WCR_WVR(13),
1901 DBG_BCR_BVR_WCR_WVR(14),
1902 DBG_BCR_BVR_WCR_WVR(15),
1903
1904
1905 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1906
1907 DBGBXVR(0),
1908
1909 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
1910 DBGBXVR(1),
1911
1912 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
1913 DBGBXVR(2),
1914 DBGBXVR(3),
1915
1916 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1917 DBGBXVR(4),
1918
1919 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1920 DBGBXVR(5),
1921 DBGBXVR(6),
1922 DBGBXVR(7),
1923 DBGBXVR(8),
1924 DBGBXVR(9),
1925 DBGBXVR(10),
1926 DBGBXVR(11),
1927 DBGBXVR(12),
1928 DBGBXVR(13),
1929 DBGBXVR(14),
1930 DBGBXVR(15),
1931
1932
1933 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1934
1935
1936 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1937
1938 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1939
1940 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1941
1942 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1943
1944 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1945
1946 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1947 };
1948
1949
1950 static const struct sys_reg_desc cp14_64_regs[] = {
1951
1952 { Op1( 0), CRm( 1), .access = trap_raz_wi },
1953
1954
1955 { Op1( 0), CRm( 2), .access = trap_raz_wi },
1956 };
1957
1958 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
1959 AA32(_map), \
1960 Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
1961 .visibility = pmu_visibility
1962
1963
1964 #define PMU_PMEVCNTR(n) \
1965 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
1966 (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
1967 .access = access_pmu_evcntr }
1968
1969
1970 #define PMU_PMEVTYPER(n) \
1971 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
1972 (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
1973 .access = access_pmu_evtyper }
1974
1975
1976
1977
1978
1979 static const struct sys_reg_desc cp15_regs[] = {
1980 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
1981 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
1982
1983 { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
1984
1985 { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
1986 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
1987 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
1988
1989 { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
1990
1991 { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
1992 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
1993
1994 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
1995 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
1996
1997 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
1998
1999 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
2000
2001 { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
2002
2003 { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
2004
2005
2006
2007
2008 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
2009 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
2010 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
2011
2012
2013 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
2014 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
2015 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
2016 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
2017 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
2018 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
2019 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid },
2020 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid },
2021 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
2022 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
2023 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
2024 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
2025 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
2026 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
2027 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
2028 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid },
2029 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid },
2030
2031 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
2032
2033
2034 { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
2035
2036 { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
2037
2038 { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
2039
2040 { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
2041
2042
2043 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
2044
2045 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
2046
2047
2048 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
2049 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
2050
2051
2052 PMU_PMEVCNTR(0),
2053 PMU_PMEVCNTR(1),
2054 PMU_PMEVCNTR(2),
2055 PMU_PMEVCNTR(3),
2056 PMU_PMEVCNTR(4),
2057 PMU_PMEVCNTR(5),
2058 PMU_PMEVCNTR(6),
2059 PMU_PMEVCNTR(7),
2060 PMU_PMEVCNTR(8),
2061 PMU_PMEVCNTR(9),
2062 PMU_PMEVCNTR(10),
2063 PMU_PMEVCNTR(11),
2064 PMU_PMEVCNTR(12),
2065 PMU_PMEVCNTR(13),
2066 PMU_PMEVCNTR(14),
2067 PMU_PMEVCNTR(15),
2068 PMU_PMEVCNTR(16),
2069 PMU_PMEVCNTR(17),
2070 PMU_PMEVCNTR(18),
2071 PMU_PMEVCNTR(19),
2072 PMU_PMEVCNTR(20),
2073 PMU_PMEVCNTR(21),
2074 PMU_PMEVCNTR(22),
2075 PMU_PMEVCNTR(23),
2076 PMU_PMEVCNTR(24),
2077 PMU_PMEVCNTR(25),
2078 PMU_PMEVCNTR(26),
2079 PMU_PMEVCNTR(27),
2080 PMU_PMEVCNTR(28),
2081 PMU_PMEVCNTR(29),
2082 PMU_PMEVCNTR(30),
2083
2084 PMU_PMEVTYPER(0),
2085 PMU_PMEVTYPER(1),
2086 PMU_PMEVTYPER(2),
2087 PMU_PMEVTYPER(3),
2088 PMU_PMEVTYPER(4),
2089 PMU_PMEVTYPER(5),
2090 PMU_PMEVTYPER(6),
2091 PMU_PMEVTYPER(7),
2092 PMU_PMEVTYPER(8),
2093 PMU_PMEVTYPER(9),
2094 PMU_PMEVTYPER(10),
2095 PMU_PMEVTYPER(11),
2096 PMU_PMEVTYPER(12),
2097 PMU_PMEVTYPER(13),
2098 PMU_PMEVTYPER(14),
2099 PMU_PMEVTYPER(15),
2100 PMU_PMEVTYPER(16),
2101 PMU_PMEVTYPER(17),
2102 PMU_PMEVTYPER(18),
2103 PMU_PMEVTYPER(19),
2104 PMU_PMEVTYPER(20),
2105 PMU_PMEVTYPER(21),
2106 PMU_PMEVTYPER(22),
2107 PMU_PMEVTYPER(23),
2108 PMU_PMEVTYPER(24),
2109 PMU_PMEVTYPER(25),
2110 PMU_PMEVTYPER(26),
2111 PMU_PMEVTYPER(27),
2112 PMU_PMEVTYPER(28),
2113 PMU_PMEVTYPER(29),
2114 PMU_PMEVTYPER(30),
2115
2116 { CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
2117
2118 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
2119 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
2120 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
2121 };
2122
2123 static const struct sys_reg_desc cp15_64_regs[] = {
2124 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2125 { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
2126 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
2127 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
2128 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
2129 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
2130 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
2131 };
2132
2133 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
2134 bool is_32)
2135 {
2136 unsigned int i;
2137
2138 for (i = 0; i < n; i++) {
2139 if (!is_32 && table[i].reg && !table[i].reset) {
2140 kvm_err("sys_reg table %pS entry %d lacks reset\n", &table[i], i);
2141 return false;
2142 }
2143
2144 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2145 kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1);
2146 return false;
2147 }
2148 }
2149
2150 return true;
2151 }
2152
2153 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
2154 {
2155 kvm_inject_undefined(vcpu);
2156 return 1;
2157 }
2158
2159 static void perform_access(struct kvm_vcpu *vcpu,
2160 struct sys_reg_params *params,
2161 const struct sys_reg_desc *r)
2162 {
2163 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2164
2165
2166 if (sysreg_hidden(vcpu, r)) {
2167 kvm_inject_undefined(vcpu);
2168 return;
2169 }
2170
2171
2172
2173
2174
2175
2176 BUG_ON(!r->access);
2177
2178
2179 if (likely(r->access(vcpu, params, r)))
2180 kvm_incr_pc(vcpu);
2181 }
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193 static bool emulate_cp(struct kvm_vcpu *vcpu,
2194 struct sys_reg_params *params,
2195 const struct sys_reg_desc *table,
2196 size_t num)
2197 {
2198 const struct sys_reg_desc *r;
2199
2200 if (!table)
2201 return false;
2202
2203 r = find_reg(params, table, num);
2204
2205 if (r) {
2206 perform_access(vcpu, params, r);
2207 return true;
2208 }
2209
2210
2211 return false;
2212 }
2213
2214 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2215 struct sys_reg_params *params)
2216 {
2217 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
2218 int cp = -1;
2219
2220 switch (esr_ec) {
2221 case ESR_ELx_EC_CP15_32:
2222 case ESR_ELx_EC_CP15_64:
2223 cp = 15;
2224 break;
2225 case ESR_ELx_EC_CP14_MR:
2226 case ESR_ELx_EC_CP14_64:
2227 cp = 14;
2228 break;
2229 default:
2230 WARN_ON(1);
2231 }
2232
2233 print_sys_reg_msg(params,
2234 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2235 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2236 kvm_inject_undefined(vcpu);
2237 }
2238
2239
2240
2241
2242
2243
2244 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2245 const struct sys_reg_desc *global,
2246 size_t nr_global)
2247 {
2248 struct sys_reg_params params;
2249 u64 esr = kvm_vcpu_get_esr(vcpu);
2250 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2251 int Rt2 = (esr >> 10) & 0x1f;
2252
2253 params.CRm = (esr >> 1) & 0xf;
2254 params.is_write = ((esr & 1) == 0);
2255
2256 params.Op0 = 0;
2257 params.Op1 = (esr >> 16) & 0xf;
2258 params.Op2 = 0;
2259 params.CRn = 0;
2260
2261
2262
2263
2264
2265 if (params.is_write) {
2266 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
2267 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
2268 }
2269
2270
2271
2272
2273
2274
2275 if (emulate_cp(vcpu, ¶ms, global, nr_global)) {
2276
2277 if (!params.is_write) {
2278 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
2279 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
2280 }
2281
2282 return 1;
2283 }
2284
2285 unhandled_cp_access(vcpu, ¶ms);
2286 return 1;
2287 }
2288
2289 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
2290
2291
2292
2293
2294
2295
2296 static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
2297 {
2298 u8 reg_id = (esr >> 10) & 0xf;
2299 bool valid;
2300
2301 params->is_write = ((esr & 1) == 0);
2302 params->Op0 = 3;
2303 params->Op1 = 0;
2304 params->CRn = 0;
2305 params->CRm = 3;
2306
2307
2308 valid = !params->is_write;
2309
2310 switch (reg_id) {
2311
2312 case 0b0111:
2313 params->Op2 = 0;
2314 break;
2315
2316 case 0b0110:
2317 params->Op2 = 1;
2318 break;
2319
2320 case 0b0101:
2321 params->Op2 = 2;
2322 break;
2323 default:
2324 valid = false;
2325 }
2326
2327 if (valid)
2328 return true;
2329
2330 kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
2331 params->is_write ? "write" : "read", reg_id);
2332 return false;
2333 }
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
2345 {
2346 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2347 u64 esr = kvm_vcpu_get_esr(vcpu);
2348 struct sys_reg_params params;
2349
2350
2351 if (!kvm_esr_cp10_id_to_sys64(esr, ¶ms)) {
2352 kvm_inject_undefined(vcpu);
2353 return 1;
2354 }
2355
2356 if (emulate_sys_reg(vcpu, ¶ms))
2357 vcpu_set_reg(vcpu, Rt, params.regval);
2358
2359 return 1;
2360 }
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
2380 struct sys_reg_params *params)
2381 {
2382 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2383
2384
2385 if (params->is_write) {
2386 unhandled_cp_access(vcpu, params);
2387 return 1;
2388 }
2389
2390 params->Op0 = 3;
2391
2392
2393
2394
2395
2396
2397 if (params->CRm > 3)
2398 params->regval = 0;
2399 else if (!emulate_sys_reg(vcpu, params))
2400 return 1;
2401
2402 vcpu_set_reg(vcpu, Rt, params->regval);
2403 return 1;
2404 }
2405
2406
2407
2408
2409
2410
2411 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2412 struct sys_reg_params *params,
2413 const struct sys_reg_desc *global,
2414 size_t nr_global)
2415 {
2416 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2417
2418 params->regval = vcpu_get_reg(vcpu, Rt);
2419
2420 if (emulate_cp(vcpu, params, global, nr_global)) {
2421 if (!params->is_write)
2422 vcpu_set_reg(vcpu, Rt, params->regval);
2423 return 1;
2424 }
2425
2426 unhandled_cp_access(vcpu, params);
2427 return 1;
2428 }
2429
2430 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
2431 {
2432 return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
2433 }
2434
2435 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
2436 {
2437 struct sys_reg_params params;
2438
2439 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
2440
2441
2442
2443
2444
2445
2446
2447 if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
2448 return kvm_emulate_cp15_id_reg(vcpu, ¶ms);
2449
2450 return kvm_handle_cp_32(vcpu, ¶ms, cp15_regs, ARRAY_SIZE(cp15_regs));
2451 }
2452
2453 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
2454 {
2455 return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
2456 }
2457
2458 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
2459 {
2460 struct sys_reg_params params;
2461
2462 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
2463
2464 return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs));
2465 }
2466
2467 static bool is_imp_def_sys_reg(struct sys_reg_params *params)
2468 {
2469
2470 return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
2471 }
2472
2473
2474
2475
2476
2477
2478
2479
2480 static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
2481 struct sys_reg_params *params)
2482 {
2483 const struct sys_reg_desc *r;
2484
2485 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2486
2487 if (likely(r)) {
2488 perform_access(vcpu, params, r);
2489 return true;
2490 }
2491
2492 if (is_imp_def_sys_reg(params)) {
2493 kvm_inject_undefined(vcpu);
2494 } else {
2495 print_sys_reg_msg(params,
2496 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
2497 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2498 kvm_inject_undefined(vcpu);
2499 }
2500 return false;
2501 }
2502
2503
2504
2505
2506
2507
2508
2509
2510 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2511 {
2512 unsigned long i;
2513
2514 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++)
2515 if (sys_reg_descs[i].reset)
2516 sys_reg_descs[i].reset(vcpu, &sys_reg_descs[i]);
2517 }
2518
2519
2520
2521
2522
2523 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
2524 {
2525 struct sys_reg_params params;
2526 unsigned long esr = kvm_vcpu_get_esr(vcpu);
2527 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2528
2529 trace_kvm_handle_sys_reg(esr);
2530
2531 params = esr_sys64_to_params(esr);
2532 params.regval = vcpu_get_reg(vcpu, Rt);
2533
2534 if (!emulate_sys_reg(vcpu, ¶ms))
2535 return 1;
2536
2537 if (!params.is_write)
2538 vcpu_set_reg(vcpu, Rt, params.regval);
2539 return 1;
2540 }
2541
2542
2543
2544
2545
2546 static bool index_to_params(u64 id, struct sys_reg_params *params)
2547 {
2548 switch (id & KVM_REG_SIZE_MASK) {
2549 case KVM_REG_SIZE_U64:
2550
2551 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2552 | KVM_REG_ARM_COPROC_MASK
2553 | KVM_REG_ARM64_SYSREG_OP0_MASK
2554 | KVM_REG_ARM64_SYSREG_OP1_MASK
2555 | KVM_REG_ARM64_SYSREG_CRN_MASK
2556 | KVM_REG_ARM64_SYSREG_CRM_MASK
2557 | KVM_REG_ARM64_SYSREG_OP2_MASK))
2558 return false;
2559 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2560 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2561 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2562 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2563 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2564 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2565 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2566 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2567 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2568 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2569 return true;
2570 default:
2571 return false;
2572 }
2573 }
2574
2575 const struct sys_reg_desc *get_reg_by_id(u64 id,
2576 const struct sys_reg_desc table[],
2577 unsigned int num)
2578 {
2579 struct sys_reg_params params;
2580
2581 if (!index_to_params(id, ¶ms))
2582 return NULL;
2583
2584 return find_reg(¶ms, table, num);
2585 }
2586
2587
2588 static const struct sys_reg_desc *
2589 id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
2590 const struct sys_reg_desc table[], unsigned int num)
2591
2592 {
2593 const struct sys_reg_desc *r;
2594
2595
2596 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2597 return NULL;
2598
2599 r = get_reg_by_id(id, table, num);
2600
2601
2602 if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
2603 r = NULL;
2604
2605 return r;
2606 }
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616 #define FUNCTION_INVARIANT(reg) \
2617 static void get_##reg(struct kvm_vcpu *v, \
2618 const struct sys_reg_desc *r) \
2619 { \
2620 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
2621 }
2622
2623 FUNCTION_INVARIANT(midr_el1)
2624 FUNCTION_INVARIANT(revidr_el1)
2625 FUNCTION_INVARIANT(clidr_el1)
2626 FUNCTION_INVARIANT(aidr_el1)
2627
2628 static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
2629 {
2630 ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
2631 }
2632
2633
2634 static struct sys_reg_desc invariant_sys_regs[] = {
2635 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2636 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2637 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2638 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2639 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2640 };
2641
2642 static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
2643 {
2644 const struct sys_reg_desc *r;
2645
2646 r = get_reg_by_id(id, invariant_sys_regs,
2647 ARRAY_SIZE(invariant_sys_regs));
2648 if (!r)
2649 return -ENOENT;
2650
2651 return put_user(r->val, uaddr);
2652 }
2653
2654 static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
2655 {
2656 const struct sys_reg_desc *r;
2657 u64 val;
2658
2659 r = get_reg_by_id(id, invariant_sys_regs,
2660 ARRAY_SIZE(invariant_sys_regs));
2661 if (!r)
2662 return -ENOENT;
2663
2664 if (get_user(val, uaddr))
2665 return -EFAULT;
2666
2667
2668 if (r->val != val)
2669 return -EINVAL;
2670
2671 return 0;
2672 }
2673
2674 static bool is_valid_cache(u32 val)
2675 {
2676 u32 level, ctype;
2677
2678 if (val >= CSSELR_MAX)
2679 return false;
2680
2681
2682 level = (val >> 1);
2683 ctype = (cache_levels >> (level * 3)) & 7;
2684
2685 switch (ctype) {
2686 case 0:
2687 return false;
2688 case 1:
2689 return (val & 1);
2690 case 2:
2691 case 4:
2692 return !(val & 1);
2693 case 3:
2694 return true;
2695 default:
2696 return false;
2697 }
2698 }
2699
2700 static int demux_c15_get(u64 id, void __user *uaddr)
2701 {
2702 u32 val;
2703 u32 __user *uval = uaddr;
2704
2705
2706 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2707 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2708 return -ENOENT;
2709
2710 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2711 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2712 if (KVM_REG_SIZE(id) != 4)
2713 return -ENOENT;
2714 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2715 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2716 if (!is_valid_cache(val))
2717 return -ENOENT;
2718
2719 return put_user(get_ccsidr(val), uval);
2720 default:
2721 return -ENOENT;
2722 }
2723 }
2724
2725 static int demux_c15_set(u64 id, void __user *uaddr)
2726 {
2727 u32 val, newval;
2728 u32 __user *uval = uaddr;
2729
2730
2731 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2732 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2733 return -ENOENT;
2734
2735 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2736 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2737 if (KVM_REG_SIZE(id) != 4)
2738 return -ENOENT;
2739 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2740 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2741 if (!is_valid_cache(val))
2742 return -ENOENT;
2743
2744 if (get_user(newval, uval))
2745 return -EFAULT;
2746
2747
2748 if (newval != get_ccsidr(val))
2749 return -EINVAL;
2750 return 0;
2751 default:
2752 return -ENOENT;
2753 }
2754 }
2755
2756 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
2757 const struct sys_reg_desc table[], unsigned int num)
2758 {
2759 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
2760 const struct sys_reg_desc *r;
2761 u64 val;
2762 int ret;
2763
2764 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
2765 if (!r)
2766 return -ENOENT;
2767
2768 if (r->get_user) {
2769 ret = (r->get_user)(vcpu, r, &val);
2770 } else {
2771 val = __vcpu_sys_reg(vcpu, r->reg);
2772 ret = 0;
2773 }
2774
2775 if (!ret)
2776 ret = put_user(val, uaddr);
2777
2778 return ret;
2779 }
2780
2781 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2782 {
2783 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2784 int err;
2785
2786 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2787 return demux_c15_get(reg->id, uaddr);
2788
2789 err = get_invariant_sys_reg(reg->id, uaddr);
2790 if (err != -ENOENT)
2791 return err;
2792
2793 return kvm_sys_reg_get_user(vcpu, reg,
2794 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2795 }
2796
2797 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
2798 const struct sys_reg_desc table[], unsigned int num)
2799 {
2800 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
2801 const struct sys_reg_desc *r;
2802 u64 val;
2803 int ret;
2804
2805 if (get_user(val, uaddr))
2806 return -EFAULT;
2807
2808 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
2809 if (!r)
2810 return -ENOENT;
2811
2812 if (r->set_user) {
2813 ret = (r->set_user)(vcpu, r, val);
2814 } else {
2815 __vcpu_sys_reg(vcpu, r->reg) = val;
2816 ret = 0;
2817 }
2818
2819 return ret;
2820 }
2821
2822 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2823 {
2824 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2825 int err;
2826
2827 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2828 return demux_c15_set(reg->id, uaddr);
2829
2830 err = set_invariant_sys_reg(reg->id, uaddr);
2831 if (err != -ENOENT)
2832 return err;
2833
2834 return kvm_sys_reg_set_user(vcpu, reg,
2835 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2836 }
2837
2838 static unsigned int num_demux_regs(void)
2839 {
2840 unsigned int i, count = 0;
2841
2842 for (i = 0; i < CSSELR_MAX; i++)
2843 if (is_valid_cache(i))
2844 count++;
2845
2846 return count;
2847 }
2848
2849 static int write_demux_regids(u64 __user *uindices)
2850 {
2851 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2852 unsigned int i;
2853
2854 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2855 for (i = 0; i < CSSELR_MAX; i++) {
2856 if (!is_valid_cache(i))
2857 continue;
2858 if (put_user(val | i, uindices))
2859 return -EFAULT;
2860 uindices++;
2861 }
2862 return 0;
2863 }
2864
2865 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2866 {
2867 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2868 KVM_REG_ARM64_SYSREG |
2869 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2870 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2871 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2872 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2873 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2874 }
2875
2876 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2877 {
2878 if (!*uind)
2879 return true;
2880
2881 if (put_user(sys_reg_to_index(reg), *uind))
2882 return false;
2883
2884 (*uind)++;
2885 return true;
2886 }
2887
2888 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
2889 const struct sys_reg_desc *rd,
2890 u64 __user **uind,
2891 unsigned int *total)
2892 {
2893
2894
2895
2896
2897 if (!(rd->reg || rd->get_user))
2898 return 0;
2899
2900 if (sysreg_hidden(vcpu, rd))
2901 return 0;
2902
2903 if (!copy_reg_to_user(rd, uind))
2904 return -EFAULT;
2905
2906 (*total)++;
2907 return 0;
2908 }
2909
2910
2911 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2912 {
2913 const struct sys_reg_desc *i2, *end2;
2914 unsigned int total = 0;
2915 int err;
2916
2917 i2 = sys_reg_descs;
2918 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2919
2920 while (i2 != end2) {
2921 err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
2922 if (err)
2923 return err;
2924 }
2925 return total;
2926 }
2927
2928 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2929 {
2930 return ARRAY_SIZE(invariant_sys_regs)
2931 + num_demux_regs()
2932 + walk_sys_regs(vcpu, (u64 __user *)NULL);
2933 }
2934
2935 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2936 {
2937 unsigned int i;
2938 int err;
2939
2940
2941 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2942 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2943 return -EFAULT;
2944 uindices++;
2945 }
2946
2947 err = walk_sys_regs(vcpu, uindices);
2948 if (err < 0)
2949 return err;
2950 uindices += err;
2951
2952 return write_demux_regids(uindices);
2953 }
2954
2955 int kvm_sys_reg_table_init(void)
2956 {
2957 bool valid = true;
2958 unsigned int i;
2959 struct sys_reg_desc clidr;
2960
2961
2962 valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
2963 valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
2964 valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
2965 valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
2966 valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
2967 valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
2968
2969 if (!valid)
2970 return -EINVAL;
2971
2972
2973 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2974 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986 get_clidr_el1(NULL, &clidr);
2987 cache_levels = clidr.val;
2988 for (i = 0; i < 7; i++)
2989 if (((cache_levels >> (i*3)) & 7) == 0)
2990 break;
2991
2992 cache_levels &= (1 << (i*3))-1;
2993
2994 return 0;
2995 }