0001
0002
0003
0004
0005
0006
0007 #ifndef SELFTEST_KVM_PROCESSOR_H
0008 #define SELFTEST_KVM_PROCESSOR_H
0009
0010 #include "kvm_util.h"
0011 #include <linux/stringify.h>
0012 #include <linux/types.h>
0013 #include <asm/sysreg.h>
0014
0015
0016 #define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
0017 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
0018
0019
0020
0021
0022
0023
0024 #define KVM_ARM64_SYS_REG(sys_reg_id) \
0025 ARM64_SYS_REG(sys_reg_Op0(sys_reg_id), \
0026 sys_reg_Op1(sys_reg_id), \
0027 sys_reg_CRn(sys_reg_id), \
0028 sys_reg_CRm(sys_reg_id), \
0029 sys_reg_Op2(sys_reg_id))
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041 #define DEFAULT_MAIR_EL1 ((0x00ul << (0 * 8)) | \
0042 (0x04ul << (1 * 8)) | \
0043 (0x0cul << (2 * 8)) | \
0044 (0x44ul << (3 * 8)) | \
0045 (0xfful << (4 * 8)) | \
0046 (0xbbul << (5 * 8)))
0047
0048 #define MPIDR_HWID_BITMASK (0xff00fffffful)
0049
0050 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init);
0051 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
0052 struct kvm_vcpu_init *init, void *guest_code);
0053
0054 struct ex_regs {
0055 u64 regs[31];
0056 u64 sp;
0057 u64 pc;
0058 u64 pstate;
0059 };
0060
0061 #define VECTOR_NUM 16
0062
0063 enum {
0064 VECTOR_SYNC_CURRENT_SP0,
0065 VECTOR_IRQ_CURRENT_SP0,
0066 VECTOR_FIQ_CURRENT_SP0,
0067 VECTOR_ERROR_CURRENT_SP0,
0068
0069 VECTOR_SYNC_CURRENT,
0070 VECTOR_IRQ_CURRENT,
0071 VECTOR_FIQ_CURRENT,
0072 VECTOR_ERROR_CURRENT,
0073
0074 VECTOR_SYNC_LOWER_64,
0075 VECTOR_IRQ_LOWER_64,
0076 VECTOR_FIQ_LOWER_64,
0077 VECTOR_ERROR_LOWER_64,
0078
0079 VECTOR_SYNC_LOWER_32,
0080 VECTOR_IRQ_LOWER_32,
0081 VECTOR_FIQ_LOWER_32,
0082 VECTOR_ERROR_LOWER_32,
0083 };
0084
0085 #define VECTOR_IS_SYNC(v) ((v) == VECTOR_SYNC_CURRENT_SP0 || \
0086 (v) == VECTOR_SYNC_CURRENT || \
0087 (v) == VECTOR_SYNC_LOWER_64 || \
0088 (v) == VECTOR_SYNC_LOWER_32)
0089
0090 #define ESR_EC_NUM 64
0091 #define ESR_EC_SHIFT 26
0092 #define ESR_EC_MASK (ESR_EC_NUM - 1)
0093
0094 #define ESR_EC_SVC64 0x15
0095 #define ESR_EC_HW_BP_CURRENT 0x31
0096 #define ESR_EC_SSTEP_CURRENT 0x33
0097 #define ESR_EC_WP_CURRENT 0x35
0098 #define ESR_EC_BRK_INS 0x3c
0099
0100 void aarch64_get_supported_page_sizes(uint32_t ipa,
0101 bool *ps4k, bool *ps16k, bool *ps64k);
0102
0103 void vm_init_descriptor_tables(struct kvm_vm *vm);
0104 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
0105
0106 typedef void(*handler_fn)(struct ex_regs *);
0107 void vm_install_exception_handler(struct kvm_vm *vm,
0108 int vector, handler_fn handler);
0109 void vm_install_sync_handler(struct kvm_vm *vm,
0110 int vector, int ec, handler_fn handler);
0111
0112 static inline void cpu_relax(void)
0113 {
0114 asm volatile("yield" ::: "memory");
0115 }
0116
0117 #define isb() asm volatile("isb" : : : "memory")
0118 #define dsb(opt) asm volatile("dsb " #opt : : : "memory")
0119 #define dmb(opt) asm volatile("dmb " #opt : : : "memory")
0120
0121 #define dma_wmb() dmb(oshst)
0122 #define __iowmb() dma_wmb()
0123
0124 #define dma_rmb() dmb(oshld)
0125
0126 #define __iormb(v) \
0127 ({ \
0128 unsigned long tmp; \
0129 \
0130 dma_rmb(); \
0131 \
0132
0133
0134
0135
0136
0137 \
0138 asm volatile("eor %0, %1, %1\n" \
0139 "cbnz %0, ." \
0140 : "=r" (tmp) : "r" ((unsigned long)(v)) \
0141 : "memory"); \
0142 })
0143
0144 static __always_inline void __raw_writel(u32 val, volatile void *addr)
0145 {
0146 asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
0147 }
0148
0149 static __always_inline u32 __raw_readl(const volatile void *addr)
0150 {
0151 u32 val;
0152 asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr));
0153 return val;
0154 }
0155
0156 #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
0157 #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
0158
0159 #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c));})
0160 #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })
0161
0162 static inline void local_irq_enable(void)
0163 {
0164 asm volatile("msr daifclr, #3" : : : "memory");
0165 }
0166
0167 static inline void local_irq_disable(void)
0168 {
0169 asm volatile("msr daifset, #3" : : : "memory");
0170 }
0171
0172
0173
0174
0175
0176 struct arm_smccc_res {
0177 unsigned long a0;
0178 unsigned long a1;
0179 unsigned long a2;
0180 unsigned long a3;
0181 };
0182
0183
0184
0185
0186
0187
0188
0189
0190 void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
0191 uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
0192 uint64_t arg6, struct arm_smccc_res *res);
0193
0194 uint32_t guest_get_vcpuid(void);
0195
0196 #endif