Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2012 - ARM Ltd
0004  * Author: Marc Zyngier <marc.zyngier@arm.com>
0005  */
0006 
0007 #include <linux/arm-smccc.h>
0008 #include <linux/preempt.h>
0009 #include <linux/kvm_host.h>
0010 #include <linux/uaccess.h>
0011 #include <linux/wait.h>
0012 
0013 #include <asm/cputype.h>
0014 #include <asm/kvm_emulate.h>
0015 
0016 #include <kvm/arm_psci.h>
0017 #include <kvm/arm_hypercalls.h>
0018 
0019 /*
0020  * This is an implementation of the Power State Coordination Interface
0021  * as described in ARM document number ARM DEN 0022A.
0022  */
0023 
0024 #define AFFINITY_MASK(level)    ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
0025 
0026 static unsigned long psci_affinity_mask(unsigned long affinity_level)
0027 {
0028     if (affinity_level <= 3)
0029         return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
0030 
0031     return 0;
0032 }
0033 
0034 static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
0035 {
0036     /*
0037      * NOTE: For simplicity, we make VCPU suspend emulation to be
0038      * same-as WFI (Wait-for-interrupt) emulation.
0039      *
0040      * This means for KVM the wakeup events are interrupts and
0041      * this is consistent with intended use of StateID as described
0042      * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
0043      *
0044      * Further, we also treat power-down request to be same as
0045      * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
0046      * specification (ARM DEN 0022A). This means all suspend states
0047      * for KVM will preserve the register state.
0048      */
0049     kvm_vcpu_wfi(vcpu);
0050 
0051     return PSCI_RET_SUCCESS;
0052 }
0053 
0054 static inline bool kvm_psci_valid_affinity(struct kvm_vcpu *vcpu,
0055                        unsigned long affinity)
0056 {
0057     return !(affinity & ~MPIDR_HWID_BITMASK);
0058 }
0059 
0060 static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
0061 {
0062     struct vcpu_reset_state *reset_state;
0063     struct kvm *kvm = source_vcpu->kvm;
0064     struct kvm_vcpu *vcpu = NULL;
0065     unsigned long cpu_id;
0066 
0067     cpu_id = smccc_get_arg1(source_vcpu);
0068     if (!kvm_psci_valid_affinity(source_vcpu, cpu_id))
0069         return PSCI_RET_INVALID_PARAMS;
0070 
0071     vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
0072 
0073     /*
0074      * Make sure the caller requested a valid CPU and that the CPU is
0075      * turned off.
0076      */
0077     if (!vcpu)
0078         return PSCI_RET_INVALID_PARAMS;
0079     if (!kvm_arm_vcpu_stopped(vcpu)) {
0080         if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
0081             return PSCI_RET_ALREADY_ON;
0082         else
0083             return PSCI_RET_INVALID_PARAMS;
0084     }
0085 
0086     reset_state = &vcpu->arch.reset_state;
0087 
0088     reset_state->pc = smccc_get_arg2(source_vcpu);
0089 
0090     /* Propagate caller endianness */
0091     reset_state->be = kvm_vcpu_is_be(source_vcpu);
0092 
0093     /*
0094      * NOTE: We always update r0 (or x0) because for PSCI v0.1
0095      * the general purpose registers are undefined upon CPU_ON.
0096      */
0097     reset_state->r0 = smccc_get_arg3(source_vcpu);
0098 
0099     WRITE_ONCE(reset_state->reset, true);
0100     kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
0101 
0102     /*
0103      * Make sure the reset request is observed if the RUNNABLE mp_state is
0104      * observed.
0105      */
0106     smp_wmb();
0107 
0108     vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
0109     kvm_vcpu_wake_up(vcpu);
0110 
0111     return PSCI_RET_SUCCESS;
0112 }
0113 
0114 static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
0115 {
0116     int matching_cpus = 0;
0117     unsigned long i, mpidr;
0118     unsigned long target_affinity;
0119     unsigned long target_affinity_mask;
0120     unsigned long lowest_affinity_level;
0121     struct kvm *kvm = vcpu->kvm;
0122     struct kvm_vcpu *tmp;
0123 
0124     target_affinity = smccc_get_arg1(vcpu);
0125     lowest_affinity_level = smccc_get_arg2(vcpu);
0126 
0127     if (!kvm_psci_valid_affinity(vcpu, target_affinity))
0128         return PSCI_RET_INVALID_PARAMS;
0129 
0130     /* Determine target affinity mask */
0131     target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
0132     if (!target_affinity_mask)
0133         return PSCI_RET_INVALID_PARAMS;
0134 
0135     /* Ignore other bits of target affinity */
0136     target_affinity &= target_affinity_mask;
0137 
0138     /*
0139      * If one or more VCPU matching target affinity are running
0140      * then ON else OFF
0141      */
0142     kvm_for_each_vcpu(i, tmp, kvm) {
0143         mpidr = kvm_vcpu_get_mpidr_aff(tmp);
0144         if ((mpidr & target_affinity_mask) == target_affinity) {
0145             matching_cpus++;
0146             if (!kvm_arm_vcpu_stopped(tmp))
0147                 return PSCI_0_2_AFFINITY_LEVEL_ON;
0148         }
0149     }
0150 
0151     if (!matching_cpus)
0152         return PSCI_RET_INVALID_PARAMS;
0153 
0154     return PSCI_0_2_AFFINITY_LEVEL_OFF;
0155 }
0156 
0157 static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type, u64 flags)
0158 {
0159     unsigned long i;
0160     struct kvm_vcpu *tmp;
0161 
0162     /*
0163      * The KVM ABI specifies that a system event exit may call KVM_RUN
0164      * again and may perform shutdown/reboot at a later time that when the
0165      * actual request is made.  Since we are implementing PSCI and a
0166      * caller of PSCI reboot and shutdown expects that the system shuts
0167      * down or reboots immediately, let's make sure that VCPUs are not run
0168      * after this call is handled and before the VCPUs have been
0169      * re-initialized.
0170      */
0171     kvm_for_each_vcpu(i, tmp, vcpu->kvm)
0172         tmp->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
0173     kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
0174 
0175     memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
0176     vcpu->run->system_event.type = type;
0177     vcpu->run->system_event.ndata = 1;
0178     vcpu->run->system_event.data[0] = flags;
0179     vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
0180 }
0181 
0182 static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
0183 {
0184     kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN, 0);
0185 }
0186 
0187 static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
0188 {
0189     kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET, 0);
0190 }
0191 
0192 static void kvm_psci_system_reset2(struct kvm_vcpu *vcpu)
0193 {
0194     kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET,
0195                  KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2);
0196 }
0197 
0198 static void kvm_psci_system_suspend(struct kvm_vcpu *vcpu)
0199 {
0200     struct kvm_run *run = vcpu->run;
0201 
0202     memset(&run->system_event, 0, sizeof(vcpu->run->system_event));
0203     run->system_event.type = KVM_SYSTEM_EVENT_SUSPEND;
0204     run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
0205 }
0206 
0207 static void kvm_psci_narrow_to_32bit(struct kvm_vcpu *vcpu)
0208 {
0209     int i;
0210 
0211     /*
0212      * Zero the input registers' upper 32 bits. They will be fully
0213      * zeroed on exit, so we're fine changing them in place.
0214      */
0215     for (i = 1; i < 4; i++)
0216         vcpu_set_reg(vcpu, i, lower_32_bits(vcpu_get_reg(vcpu, i)));
0217 }
0218 
0219 static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32 fn)
0220 {
0221     /*
0222      * Prevent 32 bit guests from calling 64 bit PSCI functions.
0223      */
0224     if ((fn & PSCI_0_2_64BIT) && vcpu_mode_is_32bit(vcpu))
0225         return PSCI_RET_NOT_SUPPORTED;
0226 
0227     return 0;
0228 }
0229 
0230 static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
0231 {
0232     struct kvm *kvm = vcpu->kvm;
0233     u32 psci_fn = smccc_get_function(vcpu);
0234     unsigned long val;
0235     int ret = 1;
0236 
0237     switch (psci_fn) {
0238     case PSCI_0_2_FN_PSCI_VERSION:
0239         /*
0240          * Bits[31:16] = Major Version = 0
0241          * Bits[15:0] = Minor Version = 2
0242          */
0243         val = KVM_ARM_PSCI_0_2;
0244         break;
0245     case PSCI_0_2_FN_CPU_SUSPEND:
0246     case PSCI_0_2_FN64_CPU_SUSPEND:
0247         val = kvm_psci_vcpu_suspend(vcpu);
0248         break;
0249     case PSCI_0_2_FN_CPU_OFF:
0250         kvm_arm_vcpu_power_off(vcpu);
0251         val = PSCI_RET_SUCCESS;
0252         break;
0253     case PSCI_0_2_FN_CPU_ON:
0254         kvm_psci_narrow_to_32bit(vcpu);
0255         fallthrough;
0256     case PSCI_0_2_FN64_CPU_ON:
0257         mutex_lock(&kvm->lock);
0258         val = kvm_psci_vcpu_on(vcpu);
0259         mutex_unlock(&kvm->lock);
0260         break;
0261     case PSCI_0_2_FN_AFFINITY_INFO:
0262         kvm_psci_narrow_to_32bit(vcpu);
0263         fallthrough;
0264     case PSCI_0_2_FN64_AFFINITY_INFO:
0265         val = kvm_psci_vcpu_affinity_info(vcpu);
0266         break;
0267     case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
0268         /*
0269          * Trusted OS is MP hence does not require migration
0270              * or
0271          * Trusted OS is not present
0272          */
0273         val = PSCI_0_2_TOS_MP;
0274         break;
0275     case PSCI_0_2_FN_SYSTEM_OFF:
0276         kvm_psci_system_off(vcpu);
0277         /*
0278          * We shouldn't be going back to guest VCPU after
0279          * receiving SYSTEM_OFF request.
0280          *
0281          * If user space accidentally/deliberately resumes
0282          * guest VCPU after SYSTEM_OFF request then guest
0283          * VCPU should see internal failure from PSCI return
0284          * value. To achieve this, we preload r0 (or x0) with
0285          * PSCI return value INTERNAL_FAILURE.
0286          */
0287         val = PSCI_RET_INTERNAL_FAILURE;
0288         ret = 0;
0289         break;
0290     case PSCI_0_2_FN_SYSTEM_RESET:
0291         kvm_psci_system_reset(vcpu);
0292         /*
0293          * Same reason as SYSTEM_OFF for preloading r0 (or x0)
0294          * with PSCI return value INTERNAL_FAILURE.
0295          */
0296         val = PSCI_RET_INTERNAL_FAILURE;
0297         ret = 0;
0298         break;
0299     default:
0300         val = PSCI_RET_NOT_SUPPORTED;
0301         break;
0302     }
0303 
0304     smccc_set_retval(vcpu, val, 0, 0, 0);
0305     return ret;
0306 }
0307 
0308 static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
0309 {
0310     unsigned long val = PSCI_RET_NOT_SUPPORTED;
0311     u32 psci_fn = smccc_get_function(vcpu);
0312     struct kvm *kvm = vcpu->kvm;
0313     u32 arg;
0314     int ret = 1;
0315 
0316     switch(psci_fn) {
0317     case PSCI_0_2_FN_PSCI_VERSION:
0318         val = minor == 0 ? KVM_ARM_PSCI_1_0 : KVM_ARM_PSCI_1_1;
0319         break;
0320     case PSCI_1_0_FN_PSCI_FEATURES:
0321         arg = smccc_get_arg1(vcpu);
0322         val = kvm_psci_check_allowed_function(vcpu, arg);
0323         if (val)
0324             break;
0325 
0326         val = PSCI_RET_NOT_SUPPORTED;
0327 
0328         switch(arg) {
0329         case PSCI_0_2_FN_PSCI_VERSION:
0330         case PSCI_0_2_FN_CPU_SUSPEND:
0331         case PSCI_0_2_FN64_CPU_SUSPEND:
0332         case PSCI_0_2_FN_CPU_OFF:
0333         case PSCI_0_2_FN_CPU_ON:
0334         case PSCI_0_2_FN64_CPU_ON:
0335         case PSCI_0_2_FN_AFFINITY_INFO:
0336         case PSCI_0_2_FN64_AFFINITY_INFO:
0337         case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
0338         case PSCI_0_2_FN_SYSTEM_OFF:
0339         case PSCI_0_2_FN_SYSTEM_RESET:
0340         case PSCI_1_0_FN_PSCI_FEATURES:
0341         case ARM_SMCCC_VERSION_FUNC_ID:
0342             val = 0;
0343             break;
0344         case PSCI_1_0_FN_SYSTEM_SUSPEND:
0345         case PSCI_1_0_FN64_SYSTEM_SUSPEND:
0346             if (test_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags))
0347                 val = 0;
0348             break;
0349         case PSCI_1_1_FN_SYSTEM_RESET2:
0350         case PSCI_1_1_FN64_SYSTEM_RESET2:
0351             if (minor >= 1)
0352                 val = 0;
0353             break;
0354         }
0355         break;
0356     case PSCI_1_0_FN_SYSTEM_SUSPEND:
0357         kvm_psci_narrow_to_32bit(vcpu);
0358         fallthrough;
0359     case PSCI_1_0_FN64_SYSTEM_SUSPEND:
0360         /*
0361          * Return directly to userspace without changing the vCPU's
0362          * registers. Userspace depends on reading the SMCCC parameters
0363          * to implement SYSTEM_SUSPEND.
0364          */
0365         if (test_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags)) {
0366             kvm_psci_system_suspend(vcpu);
0367             return 0;
0368         }
0369         break;
0370     case PSCI_1_1_FN_SYSTEM_RESET2:
0371         kvm_psci_narrow_to_32bit(vcpu);
0372         fallthrough;
0373     case PSCI_1_1_FN64_SYSTEM_RESET2:
0374         if (minor >= 1) {
0375             arg = smccc_get_arg1(vcpu);
0376 
0377             if (arg <= PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET ||
0378                 arg >= PSCI_1_1_RESET_TYPE_VENDOR_START) {
0379                 kvm_psci_system_reset2(vcpu);
0380                 vcpu_set_reg(vcpu, 0, PSCI_RET_INTERNAL_FAILURE);
0381                 return 0;
0382             }
0383 
0384             val = PSCI_RET_INVALID_PARAMS;
0385             break;
0386         }
0387         break;
0388     default:
0389         return kvm_psci_0_2_call(vcpu);
0390     }
0391 
0392     smccc_set_retval(vcpu, val, 0, 0, 0);
0393     return ret;
0394 }
0395 
0396 static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
0397 {
0398     struct kvm *kvm = vcpu->kvm;
0399     u32 psci_fn = smccc_get_function(vcpu);
0400     unsigned long val;
0401 
0402     switch (psci_fn) {
0403     case KVM_PSCI_FN_CPU_OFF:
0404         kvm_arm_vcpu_power_off(vcpu);
0405         val = PSCI_RET_SUCCESS;
0406         break;
0407     case KVM_PSCI_FN_CPU_ON:
0408         mutex_lock(&kvm->lock);
0409         val = kvm_psci_vcpu_on(vcpu);
0410         mutex_unlock(&kvm->lock);
0411         break;
0412     default:
0413         val = PSCI_RET_NOT_SUPPORTED;
0414         break;
0415     }
0416 
0417     smccc_set_retval(vcpu, val, 0, 0, 0);
0418     return 1;
0419 }
0420 
0421 /**
0422  * kvm_psci_call - handle PSCI call if r0 value is in range
0423  * @vcpu: Pointer to the VCPU struct
0424  *
0425  * Handle PSCI calls from guests through traps from HVC instructions.
0426  * The calling convention is similar to SMC calls to the secure world
0427  * where the function number is placed in r0.
0428  *
0429  * This function returns: > 0 (success), 0 (success but exit to user
0430  * space), and < 0 (errors)
0431  *
0432  * Errors:
0433  * -EINVAL: Unrecognized PSCI function
0434  */
0435 int kvm_psci_call(struct kvm_vcpu *vcpu)
0436 {
0437     u32 psci_fn = smccc_get_function(vcpu);
0438     unsigned long val;
0439 
0440     val = kvm_psci_check_allowed_function(vcpu, psci_fn);
0441     if (val) {
0442         smccc_set_retval(vcpu, val, 0, 0, 0);
0443         return 1;
0444     }
0445 
0446     switch (kvm_psci_version(vcpu)) {
0447     case KVM_ARM_PSCI_1_1:
0448         return kvm_psci_1_x_call(vcpu, 1);
0449     case KVM_ARM_PSCI_1_0:
0450         return kvm_psci_1_x_call(vcpu, 0);
0451     case KVM_ARM_PSCI_0_2:
0452         return kvm_psci_0_2_call(vcpu);
0453     case KVM_ARM_PSCI_0_1:
0454         return kvm_psci_0_1_call(vcpu);
0455     default:
0456         return -EINVAL;
0457     }
0458 }