0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #ifndef __ARM64_KVM_HOST_H__
0012 #define __ARM64_KVM_HOST_H__
0013
0014 #include <linux/arm-smccc.h>
0015 #include <linux/bitmap.h>
0016 #include <linux/types.h>
0017 #include <linux/jump_label.h>
0018 #include <linux/kvm_types.h>
0019 #include <linux/percpu.h>
0020 #include <linux/psci.h>
0021 #include <asm/arch_gicv3.h>
0022 #include <asm/barrier.h>
0023 #include <asm/cpufeature.h>
0024 #include <asm/cputype.h>
0025 #include <asm/daifflags.h>
0026 #include <asm/fpsimd.h>
0027 #include <asm/kvm.h>
0028 #include <asm/kvm_asm.h>
0029
0030 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
0031
0032 #define KVM_HALT_POLL_NS_DEFAULT 500000
0033
0034 #include <kvm/arm_vgic.h>
0035 #include <kvm/arm_arch_timer.h>
0036 #include <kvm/arm_pmu.h>
0037
0038 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
0039
0040 #define KVM_VCPU_MAX_FEATURES 7
0041
0042 #define KVM_REQ_SLEEP \
0043 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
0044 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
0045 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
0046 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
0047 #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
0048 #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
0049 #define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
0050
0051 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
0052 KVM_DIRTY_LOG_INITIALLY_SET)
0053
0054 #define KVM_HAVE_MMU_RWLOCK
0055
0056
0057
0058
0059
0060 enum kvm_mode {
0061 KVM_MODE_DEFAULT,
0062 KVM_MODE_PROTECTED,
0063 KVM_MODE_NONE,
0064 };
0065 enum kvm_mode kvm_get_mode(void);
0066
0067 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
0068
0069 extern unsigned int kvm_sve_max_vl;
0070 int kvm_arm_init_sve(void);
0071
0072 u32 __attribute_const__ kvm_target_cpu(void);
0073 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
0074 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
0075
0076 struct kvm_vmid {
0077 atomic64_t id;
0078 };
0079
0080 struct kvm_s2_mmu {
0081 struct kvm_vmid vmid;
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093 phys_addr_t pgd_phys;
0094 struct kvm_pgtable *pgt;
0095
0096
0097 int __percpu *last_vcpu_ran;
0098
0099 struct kvm_arch *arch;
0100 };
0101
0102 struct kvm_arch_memory_slot {
0103 };
0104
0105
0106
0107
0108
0109
0110
0111
0112 struct kvm_smccc_features {
0113 unsigned long std_bmap;
0114 unsigned long std_hyp_bmap;
0115 unsigned long vendor_hyp_bmap;
0116 };
0117
0118 struct kvm_arch {
0119 struct kvm_s2_mmu mmu;
0120
0121
0122 u64 vtcr;
0123
0124
0125 struct vgic_dist vgic;
0126
0127
0128 u32 psci_version;
0129
0130
0131
0132
0133
0134
0135
0136 #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0
0137
0138 #define KVM_ARCH_FLAG_MTE_ENABLED 1
0139
0140 #define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
0141
0142
0143
0144
0145
0146
0147
0148 #define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED 3
0149 #define KVM_ARCH_FLAG_EL1_32BIT 4
0150
0151 #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 5
0152
0153 unsigned long flags;
0154
0155
0156
0157
0158
0159 unsigned long *pmu_filter;
0160 struct arm_pmu *arm_pmu;
0161
0162 cpumask_var_t supported_cpus;
0163
0164 u8 pfr0_csv2;
0165 u8 pfr0_csv3;
0166
0167
0168 struct kvm_smccc_features smccc_feat;
0169 };
0170
0171 struct kvm_vcpu_fault_info {
0172 u64 esr_el2;
0173 u64 far_el2;
0174 u64 hpfar_el2;
0175 u64 disr_el1;
0176 };
0177
0178 enum vcpu_sysreg {
0179 __INVALID_SYSREG__,
0180 MPIDR_EL1,
0181 CSSELR_EL1,
0182 SCTLR_EL1,
0183 ACTLR_EL1,
0184 CPACR_EL1,
0185 ZCR_EL1,
0186 TTBR0_EL1,
0187 TTBR1_EL1,
0188 TCR_EL1,
0189 ESR_EL1,
0190 AFSR0_EL1,
0191 AFSR1_EL1,
0192 FAR_EL1,
0193 MAIR_EL1,
0194 VBAR_EL1,
0195 CONTEXTIDR_EL1,
0196 TPIDR_EL0,
0197 TPIDRRO_EL0,
0198 TPIDR_EL1,
0199 AMAIR_EL1,
0200 CNTKCTL_EL1,
0201 PAR_EL1,
0202 MDSCR_EL1,
0203 MDCCINT_EL1,
0204 OSLSR_EL1,
0205 DISR_EL1,
0206
0207
0208 PMCR_EL0,
0209 PMSELR_EL0,
0210 PMEVCNTR0_EL0,
0211 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
0212 PMCCNTR_EL0,
0213 PMEVTYPER0_EL0,
0214 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
0215 PMCCFILTR_EL0,
0216 PMCNTENSET_EL0,
0217 PMINTENSET_EL1,
0218 PMOVSSET_EL0,
0219 PMUSERENR_EL0,
0220
0221
0222 APIAKEYLO_EL1,
0223 APIAKEYHI_EL1,
0224 APIBKEYLO_EL1,
0225 APIBKEYHI_EL1,
0226 APDAKEYLO_EL1,
0227 APDAKEYHI_EL1,
0228 APDBKEYLO_EL1,
0229 APDBKEYHI_EL1,
0230 APGAKEYLO_EL1,
0231 APGAKEYHI_EL1,
0232
0233 ELR_EL1,
0234 SP_EL1,
0235 SPSR_EL1,
0236
0237 CNTVOFF_EL2,
0238 CNTV_CVAL_EL0,
0239 CNTV_CTL_EL0,
0240 CNTP_CVAL_EL0,
0241 CNTP_CTL_EL0,
0242
0243
0244 RGSR_EL1,
0245 GCR_EL1,
0246 TFSR_EL1,
0247 TFSRE0_EL1,
0248
0249
0250 DACR32_EL2,
0251 IFSR32_EL2,
0252 FPEXC32_EL2,
0253 DBGVCR32_EL2,
0254
0255 NR_SYS_REGS
0256 };
0257
0258 struct kvm_cpu_context {
0259 struct user_pt_regs regs;
0260
0261 u64 spsr_abt;
0262 u64 spsr_und;
0263 u64 spsr_irq;
0264 u64 spsr_fiq;
0265
0266 struct user_fpsimd_state fp_regs;
0267
0268 u64 sys_regs[NR_SYS_REGS];
0269
0270 struct kvm_vcpu *__hyp_running_vcpu;
0271 };
0272
0273 struct kvm_host_data {
0274 struct kvm_cpu_context host_ctxt;
0275 };
0276
0277 struct kvm_host_psci_config {
0278
0279 u32 version;
0280
0281
0282 struct psci_0_1_function_ids function_ids_0_1;
0283
0284 bool psci_0_1_cpu_suspend_implemented;
0285 bool psci_0_1_cpu_on_implemented;
0286 bool psci_0_1_cpu_off_implemented;
0287 bool psci_0_1_migrate_implemented;
0288 };
0289
0290 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
0291 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
0292
0293 extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
0294 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
0295
0296 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
0297 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
0298
0299 struct vcpu_reset_state {
0300 unsigned long pc;
0301 unsigned long r0;
0302 bool be;
0303 bool reset;
0304 };
0305
0306 struct kvm_vcpu_arch {
0307 struct kvm_cpu_context ctxt;
0308
0309
0310 void *sve_state;
0311 unsigned int sve_max_vl;
0312 u64 svcr;
0313
0314
0315 struct kvm_s2_mmu *hw_mmu;
0316
0317
0318 u64 hcr_el2;
0319 u64 mdcr_el2;
0320 u64 cptr_el2;
0321
0322
0323 u64 mdcr_el2_host;
0324
0325
0326 struct kvm_vcpu_fault_info fault;
0327
0328
0329 enum {
0330 FP_STATE_FREE,
0331 FP_STATE_HOST_OWNED,
0332 FP_STATE_GUEST_OWNED,
0333 } fp_state;
0334
0335
0336 u8 cflags;
0337
0338
0339 u8 iflags;
0340
0341
0342 u8 sflags;
0343
0344
0345
0346
0347
0348
0349
0350
0351 bool pause;
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366 struct kvm_guest_debug_arch *debug_ptr;
0367 struct kvm_guest_debug_arch vcpu_debug_state;
0368 struct kvm_guest_debug_arch external_debug_state;
0369
0370 struct user_fpsimd_state *host_fpsimd_state;
0371 struct task_struct *parent_task;
0372
0373 struct {
0374
0375 struct kvm_guest_debug_arch regs;
0376
0377 u64 pmscr_el1;
0378
0379 u64 trfcr_el1;
0380 } host_debug_state;
0381
0382
0383 struct vgic_cpu vgic_cpu;
0384 struct arch_timer_cpu timer_cpu;
0385 struct kvm_pmu pmu;
0386
0387
0388
0389
0390
0391
0392
0393
0394 struct {
0395 u32 mdscr_el1;
0396 } guest_debug_preserved;
0397
0398
0399 struct kvm_mp_state mp_state;
0400
0401
0402 struct kvm_mmu_memory_cache mmu_page_cache;
0403
0404
0405 int target;
0406 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
0407
0408
0409 u64 vsesr_el2;
0410
0411
0412 struct vcpu_reset_state reset_state;
0413
0414
0415 struct {
0416 u64 last_steal;
0417 gpa_t base;
0418 } steal;
0419 };
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432 #define __vcpu_single_flag(_set, _f) _set, (_f), (_f)
0433
0434 #define __unpack_flag(_set, _f, _m) _f
0435 #define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__)
0436
0437 #define __build_check_flag(v, flagset, f, m) \
0438 do { \
0439 typeof(v->arch.flagset) *_fset; \
0440 \
0441 \
0442 BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \
0443 \
0444 BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m)); \
0445 } while (0)
0446
0447 #define __vcpu_get_flag(v, flagset, f, m) \
0448 ({ \
0449 __build_check_flag(v, flagset, f, m); \
0450 \
0451 v->arch.flagset & (m); \
0452 })
0453
0454 #define __vcpu_set_flag(v, flagset, f, m) \
0455 do { \
0456 typeof(v->arch.flagset) *fset; \
0457 \
0458 __build_check_flag(v, flagset, f, m); \
0459 \
0460 fset = &v->arch.flagset; \
0461 if (HWEIGHT(m) > 1) \
0462 *fset &= ~(m); \
0463 *fset |= (f); \
0464 } while (0)
0465
0466 #define __vcpu_clear_flag(v, flagset, f, m) \
0467 do { \
0468 typeof(v->arch.flagset) *fset; \
0469 \
0470 __build_check_flag(v, flagset, f, m); \
0471 \
0472 fset = &v->arch.flagset; \
0473 *fset &= ~(m); \
0474 } while (0)
0475
0476 #define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
0477 #define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
0478 #define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
0479
0480
0481 #define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0))
0482
0483 #define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
0484
0485 #define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2))
0486
0487
0488 #define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
0489
0490
0491
0492
0493 #define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1))
0494
0495 #define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1))
0496
0497
0498 #define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK)
0499 #define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL)
0500 #define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
0501
0502
0503
0504
0505
0506
0507
0508 #define EXCEPT_AA32_UND __vcpu_except_flags(0)
0509 #define EXCEPT_AA32_IABT __vcpu_except_flags(1)
0510 #define EXCEPT_AA32_DABT __vcpu_except_flags(2)
0511
0512 #define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0)
0513 #define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1)
0514 #define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2)
0515 #define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3)
0516
0517 #define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4)
0518 #define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5)
0519 #define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6)
0520 #define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7)
0521
0522 #define DEBUG_DIRTY __vcpu_single_flag(iflags, BIT(4))
0523
0524 #define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5))
0525
0526 #define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
0527
0528
0529 #define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
0530
0531 #define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1))
0532
0533 #define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2))
0534
0535 #define IN_WFIT __vcpu_single_flag(sflags, BIT(3))
0536
0537 #define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4))
0538
0539
0540 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
0541 sve_ffr_offset((vcpu)->arch.sve_max_vl))
0542
0543 #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
0544
0545 #define vcpu_sve_state_size(vcpu) ({ \
0546 size_t __size_ret; \
0547 unsigned int __vcpu_vq; \
0548 \
0549 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
0550 __size_ret = 0; \
0551 } else { \
0552 __vcpu_vq = vcpu_sve_max_vq(vcpu); \
0553 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
0554 } \
0555 \
0556 __size_ret; \
0557 })
0558
0559 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
0560 KVM_GUESTDBG_USE_SW_BP | \
0561 KVM_GUESTDBG_USE_HW | \
0562 KVM_GUESTDBG_SINGLESTEP)
0563
0564 #define vcpu_has_sve(vcpu) (system_supports_sve() && \
0565 vcpu_get_flag(vcpu, GUEST_HAS_SVE))
0566
0567 #ifdef CONFIG_ARM64_PTR_AUTH
0568 #define vcpu_has_ptrauth(vcpu) \
0569 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
0570 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
0571 vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
0572 #else
0573 #define vcpu_has_ptrauth(vcpu) false
0574 #endif
0575
0576 #define vcpu_on_unsupported_cpu(vcpu) \
0577 vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU)
0578
0579 #define vcpu_set_on_unsupported_cpu(vcpu) \
0580 vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU)
0581
0582 #define vcpu_clear_on_unsupported_cpu(vcpu) \
0583 vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU)
0584
0585 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
0586
0587
0588
0589
0590
0591
0592
0593
0594 #define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)])
0595
0596 #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
0597
0598 #define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
0599
0600 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
0601 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
0602
0603 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
0604 {
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616 if (!has_vhe())
0617 return false;
0618
0619 switch (reg) {
0620 case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break;
0621 case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
0622 case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
0623 case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
0624 case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
0625 case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
0626 case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
0627 case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
0628 case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
0629 case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break;
0630 case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break;
0631 case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break;
0632 case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
0633 case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
0634 case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
0635 case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
0636 case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
0637 case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
0638 case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
0639 case PAR_EL1: *val = read_sysreg_par(); break;
0640 case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
0641 case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
0642 case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
0643 default: return false;
0644 }
0645
0646 return true;
0647 }
0648
0649 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
0650 {
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661 if (!has_vhe())
0662 return false;
0663
0664 switch (reg) {
0665 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break;
0666 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
0667 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
0668 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
0669 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
0670 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
0671 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
0672 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
0673 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
0674 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
0675 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
0676 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
0677 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
0678 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
0679 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
0680 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
0681 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
0682 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
0683 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
0684 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
0685 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
0686 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
0687 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
0688 default: return false;
0689 }
0690
0691 return true;
0692 }
0693
0694 struct kvm_vm_stat {
0695 struct kvm_vm_stat_generic generic;
0696 };
0697
0698 struct kvm_vcpu_stat {
0699 struct kvm_vcpu_stat_generic generic;
0700 u64 hvc_exit_stat;
0701 u64 wfe_exit_stat;
0702 u64 wfi_exit_stat;
0703 u64 mmio_exit_user;
0704 u64 mmio_exit_kernel;
0705 u64 signal_exits;
0706 u64 exits;
0707 };
0708
0709 void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
0710 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
0711 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
0712 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
0713 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
0714
0715 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
0716 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
0717
0718 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
0719 struct kvm_vcpu_events *events);
0720
0721 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
0722 struct kvm_vcpu_events *events);
0723
0724 #define KVM_ARCH_WANT_MMU_NOTIFIER
0725
0726 void kvm_arm_halt_guest(struct kvm *kvm);
0727 void kvm_arm_resume_guest(struct kvm *kvm);
0728
0729 #define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid)
0730
0731 #ifndef __KVM_NVHE_HYPERVISOR__
0732 #define kvm_call_hyp_nvhe(f, ...) \
0733 ({ \
0734 struct arm_smccc_res res; \
0735 \
0736 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \
0737 ##__VA_ARGS__, &res); \
0738 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \
0739 \
0740 res.a1; \
0741 })
0742
0743
0744
0745
0746
0747
0748 #define kvm_call_hyp(f, ...) \
0749 do { \
0750 if (has_vhe()) { \
0751 f(__VA_ARGS__); \
0752 isb(); \
0753 } else { \
0754 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
0755 } \
0756 } while(0)
0757
0758 #define kvm_call_hyp_ret(f, ...) \
0759 ({ \
0760 typeof(f(__VA_ARGS__)) ret; \
0761 \
0762 if (has_vhe()) { \
0763 ret = f(__VA_ARGS__); \
0764 isb(); \
0765 } else { \
0766 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
0767 } \
0768 \
0769 ret; \
0770 })
0771 #else
0772 #define kvm_call_hyp(f, ...) f(__VA_ARGS__)
0773 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
0774 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
0775 #endif
0776
0777 void force_vm_exit(const cpumask_t *mask);
0778
0779 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
0780 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
0781
0782 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
0783 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
0784 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
0785 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
0786 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
0787 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
0788 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
0789
0790 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
0791
0792 int kvm_sys_reg_table_init(void);
0793
0794
0795 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
0796 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
0797
0798 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
0799 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
0800
0801
0802
0803
0804
0805
0806 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
0807 {
0808 return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
0809 }
0810
0811 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
0812 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
0813 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
0814
0815 bool kvm_arm_pvtime_supported(void);
0816 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
0817 struct kvm_device_attr *attr);
0818 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
0819 struct kvm_device_attr *attr);
0820 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
0821 struct kvm_device_attr *attr);
0822
0823 extern unsigned int kvm_arm_vmid_bits;
0824 int kvm_arm_vmid_alloc_init(void);
0825 void kvm_arm_vmid_alloc_free(void);
0826 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
0827 void kvm_arm_vmid_clear_active(void);
0828
0829 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
0830 {
0831 vcpu_arch->steal.base = GPA_INVALID;
0832 }
0833
0834 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
0835 {
0836 return (vcpu_arch->steal.base != GPA_INVALID);
0837 }
0838
0839 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
0840
0841 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
0842
0843 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
0844
0845 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
0846 {
0847
0848 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
0849 }
0850
0851 static inline bool kvm_system_needs_idmapped_vectors(void)
0852 {
0853 return cpus_have_const_cap(ARM64_SPECTRE_V3A);
0854 }
0855
0856 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
0857
0858 static inline void kvm_arch_hardware_unsetup(void) {}
0859 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
0860 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
0861
0862 void kvm_arm_init_debug(void);
0863 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
0864 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
0865 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
0866 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
0867
0868 #define kvm_vcpu_os_lock_enabled(vcpu) \
0869 (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & SYS_OSLSR_OSLK))
0870
0871 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
0872 struct kvm_device_attr *attr);
0873 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
0874 struct kvm_device_attr *attr);
0875 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
0876 struct kvm_device_attr *attr);
0877
0878 long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
0879 struct kvm_arm_copy_mte_tags *copy_tags);
0880
0881
0882 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
0883 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
0884 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
0885 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
0886 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
0887 void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu);
0888
0889 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
0890 {
0891 return (!has_vhe() && attr->exclude_host);
0892 }
0893
0894
0895 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
0896 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
0897
0898 #ifdef CONFIG_KVM
0899 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
0900 void kvm_clr_pmu_events(u32 clr);
0901 #else
0902 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
0903 static inline void kvm_clr_pmu_events(u32 clr) {}
0904 #endif
0905
0906 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
0907 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
0908
0909 int kvm_set_ipa_limit(void);
0910
0911 #define __KVM_HAVE_ARCH_VM_ALLOC
0912 struct kvm *kvm_arch_alloc_vm(void);
0913
0914 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
0915
0916 static inline bool kvm_vm_is_protected(struct kvm *kvm)
0917 {
0918 return false;
0919 }
0920
0921 void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
0922
0923 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
0924 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
0925
0926 #define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
0927
0928 #define kvm_has_mte(kvm) \
0929 (system_supports_mte() && \
0930 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
0931
0932 #define kvm_supports_32bit_el0() \
0933 (system_supports_32bit_el0() && \
0934 !static_branch_unlikely(&arm64_mismatched_32bit_el0))
0935
0936 int kvm_trng_call(struct kvm_vcpu *vcpu);
0937 #ifdef CONFIG_KVM
0938 extern phys_addr_t hyp_mem_base;
0939 extern phys_addr_t hyp_mem_size;
0940 void __init kvm_hyp_reserve(void);
0941 #else
0942 static inline void kvm_hyp_reserve(void) { }
0943 #endif
0944
0945 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
0946 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
0947
0948 #endif