0001
0002
0003
0004
0005
0006
0007 #include <asm/kvm_asm.h>
0008 #include <asm/kvm_hyp.h>
0009 #include <asm/kvm_mmu.h>
0010 #include <linux/arm-smccc.h>
0011 #include <linux/kvm_host.h>
0012 #include <uapi/linux/psci.h>
0013
0014 #include <nvhe/memory.h>
0015 #include <nvhe/trap_handler.h>
0016
0017 void kvm_hyp_cpu_entry(unsigned long r0);
0018 void kvm_hyp_cpu_resume(unsigned long r0);
0019
0020 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
0021
0022
0023 struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
0024
0025 #define INVALID_CPU_ID UINT_MAX
0026
0027 struct psci_boot_args {
0028 atomic_t lock;
0029 unsigned long pc;
0030 unsigned long r0;
0031 };
0032
0033 #define PSCI_BOOT_ARGS_UNLOCKED 0
0034 #define PSCI_BOOT_ARGS_LOCKED 1
0035
0036 #define PSCI_BOOT_ARGS_INIT \
0037 ((struct psci_boot_args){ \
0038 .lock = ATOMIC_INIT(PSCI_BOOT_ARGS_UNLOCKED), \
0039 })
0040
0041 static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
0042 static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;
0043
0044 #define is_psci_0_1(what, func_id) \
0045 (kvm_host_psci_config.psci_0_1_ ## what ## _implemented && \
0046 (func_id) == kvm_host_psci_config.function_ids_0_1.what)
0047
0048 static bool is_psci_0_1_call(u64 func_id)
0049 {
0050 return (is_psci_0_1(cpu_suspend, func_id) ||
0051 is_psci_0_1(cpu_on, func_id) ||
0052 is_psci_0_1(cpu_off, func_id) ||
0053 is_psci_0_1(migrate, func_id));
0054 }
0055
0056 static bool is_psci_0_2_call(u64 func_id)
0057 {
0058
0059 return (PSCI_0_2_FN(0) <= func_id && func_id <= PSCI_0_2_FN(31)) ||
0060 (PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
0061 }
0062
0063 static unsigned long psci_call(unsigned long fn, unsigned long arg0,
0064 unsigned long arg1, unsigned long arg2)
0065 {
0066 struct arm_smccc_res res;
0067
0068 arm_smccc_1_1_smc(fn, arg0, arg1, arg2, &res);
0069 return res.a0;
0070 }
0071
0072 static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt)
0073 {
0074 return psci_call(cpu_reg(host_ctxt, 0), cpu_reg(host_ctxt, 1),
0075 cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
0076 }
0077
0078 static unsigned int find_cpu_id(u64 mpidr)
0079 {
0080 unsigned int i;
0081
0082
0083 if (mpidr & ~MPIDR_HWID_BITMASK)
0084 return INVALID_CPU_ID;
0085
0086 for (i = 0; i < NR_CPUS; i++) {
0087 if (cpu_logical_map(i) == mpidr)
0088 return i;
0089 }
0090
0091 return INVALID_CPU_ID;
0092 }
0093
0094 static __always_inline bool try_acquire_boot_args(struct psci_boot_args *args)
0095 {
0096 return atomic_cmpxchg_acquire(&args->lock,
0097 PSCI_BOOT_ARGS_UNLOCKED,
0098 PSCI_BOOT_ARGS_LOCKED) ==
0099 PSCI_BOOT_ARGS_UNLOCKED;
0100 }
0101
0102 static __always_inline void release_boot_args(struct psci_boot_args *args)
0103 {
0104 atomic_set_release(&args->lock, PSCI_BOOT_ARGS_UNLOCKED);
0105 }
0106
0107 static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
0108 {
0109 DECLARE_REG(u64, mpidr, host_ctxt, 1);
0110 DECLARE_REG(unsigned long, pc, host_ctxt, 2);
0111 DECLARE_REG(unsigned long, r0, host_ctxt, 3);
0112
0113 unsigned int cpu_id;
0114 struct psci_boot_args *boot_args;
0115 struct kvm_nvhe_init_params *init_params;
0116 int ret;
0117
0118
0119
0120
0121
0122
0123
0124
0125 cpu_id = find_cpu_id(mpidr);
0126 if (cpu_id == INVALID_CPU_ID)
0127 return PSCI_RET_INVALID_PARAMS;
0128
0129 boot_args = per_cpu_ptr(&cpu_on_args, cpu_id);
0130 init_params = per_cpu_ptr(&kvm_init_params, cpu_id);
0131
0132
0133 if (!try_acquire_boot_args(boot_args))
0134 return PSCI_RET_ALREADY_ON;
0135
0136 boot_args->pc = pc;
0137 boot_args->r0 = r0;
0138 wmb();
0139
0140 ret = psci_call(func_id, mpidr,
0141 __hyp_pa(&kvm_hyp_cpu_entry),
0142 __hyp_pa(init_params));
0143
0144
0145 if (ret != PSCI_RET_SUCCESS)
0146 release_boot_args(boot_args);
0147
0148 return ret;
0149 }
0150
0151 static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
0152 {
0153 DECLARE_REG(u64, power_state, host_ctxt, 1);
0154 DECLARE_REG(unsigned long, pc, host_ctxt, 2);
0155 DECLARE_REG(unsigned long, r0, host_ctxt, 3);
0156
0157 struct psci_boot_args *boot_args;
0158 struct kvm_nvhe_init_params *init_params;
0159
0160 boot_args = this_cpu_ptr(&suspend_args);
0161 init_params = this_cpu_ptr(&kvm_init_params);
0162
0163
0164
0165
0166
0167 boot_args->pc = pc;
0168 boot_args->r0 = r0;
0169
0170
0171
0172
0173
0174 return psci_call(func_id, power_state,
0175 __hyp_pa(&kvm_hyp_cpu_resume),
0176 __hyp_pa(init_params));
0177 }
0178
0179 static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
0180 {
0181 DECLARE_REG(unsigned long, pc, host_ctxt, 1);
0182 DECLARE_REG(unsigned long, r0, host_ctxt, 2);
0183
0184 struct psci_boot_args *boot_args;
0185 struct kvm_nvhe_init_params *init_params;
0186
0187 boot_args = this_cpu_ptr(&suspend_args);
0188 init_params = this_cpu_ptr(&kvm_init_params);
0189
0190
0191
0192
0193
0194 boot_args->pc = pc;
0195 boot_args->r0 = r0;
0196
0197
0198 return psci_call(func_id,
0199 __hyp_pa(&kvm_hyp_cpu_resume),
0200 __hyp_pa(init_params), 0);
0201 }
0202
0203 asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
0204 {
0205 struct psci_boot_args *boot_args;
0206 struct kvm_cpu_context *host_ctxt;
0207
0208 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
0209
0210 if (is_cpu_on)
0211 boot_args = this_cpu_ptr(&cpu_on_args);
0212 else
0213 boot_args = this_cpu_ptr(&suspend_args);
0214
0215 cpu_reg(host_ctxt, 0) = boot_args->r0;
0216 write_sysreg_el2(boot_args->pc, SYS_ELR);
0217
0218 if (is_cpu_on)
0219 release_boot_args(boot_args);
0220
0221 __host_enter(host_ctxt);
0222 }
0223
0224 static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
0225 {
0226 if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
0227 return psci_forward(host_ctxt);
0228 if (is_psci_0_1(cpu_on, func_id))
0229 return psci_cpu_on(func_id, host_ctxt);
0230 if (is_psci_0_1(cpu_suspend, func_id))
0231 return psci_cpu_suspend(func_id, host_ctxt);
0232
0233 return PSCI_RET_NOT_SUPPORTED;
0234 }
0235
0236 static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
0237 {
0238 switch (func_id) {
0239 case PSCI_0_2_FN_PSCI_VERSION:
0240 case PSCI_0_2_FN_CPU_OFF:
0241 case PSCI_0_2_FN64_AFFINITY_INFO:
0242 case PSCI_0_2_FN64_MIGRATE:
0243 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
0244 case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
0245 return psci_forward(host_ctxt);
0246
0247
0248
0249
0250 case PSCI_0_2_FN_SYSTEM_OFF:
0251 case PSCI_0_2_FN_SYSTEM_RESET:
0252 return psci_forward(host_ctxt);
0253 case PSCI_0_2_FN64_CPU_SUSPEND:
0254 return psci_cpu_suspend(func_id, host_ctxt);
0255 case PSCI_0_2_FN64_CPU_ON:
0256 return psci_cpu_on(func_id, host_ctxt);
0257 default:
0258 return PSCI_RET_NOT_SUPPORTED;
0259 }
0260 }
0261
0262 static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
0263 {
0264 switch (func_id) {
0265 case PSCI_1_0_FN_PSCI_FEATURES:
0266 case PSCI_1_0_FN_SET_SUSPEND_MODE:
0267 case PSCI_1_1_FN64_SYSTEM_RESET2:
0268 return psci_forward(host_ctxt);
0269 case PSCI_1_0_FN64_SYSTEM_SUSPEND:
0270 return psci_system_suspend(func_id, host_ctxt);
0271 default:
0272 return psci_0_2_handler(func_id, host_ctxt);
0273 }
0274 }
0275
0276 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
0277 {
0278 DECLARE_REG(u64, func_id, host_ctxt, 0);
0279 unsigned long ret;
0280
0281 switch (kvm_host_psci_config.version) {
0282 case PSCI_VERSION(0, 1):
0283 if (!is_psci_0_1_call(func_id))
0284 return false;
0285 ret = psci_0_1_handler(func_id, host_ctxt);
0286 break;
0287 case PSCI_VERSION(0, 2):
0288 if (!is_psci_0_2_call(func_id))
0289 return false;
0290 ret = psci_0_2_handler(func_id, host_ctxt);
0291 break;
0292 default:
0293 if (!is_psci_0_2_call(func_id))
0294 return false;
0295 ret = psci_1_0_handler(func_id, host_ctxt);
0296 break;
0297 }
0298
0299 cpu_reg(host_ctxt, 0) = ret;
0300 cpu_reg(host_ctxt, 1) = 0;
0301 cpu_reg(host_ctxt, 2) = 0;
0302 cpu_reg(host_ctxt, 3) = 0;
0303 return true;
0304 }