Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 // Copyright (C) 2019 Arm Ltd.
0003 
0004 #include <linux/arm-smccc.h>
0005 #include <linux/kvm_host.h>
0006 #include <linux/sched/stat.h>
0007 
0008 #include <asm/kvm_mmu.h>
0009 #include <asm/pvclock-abi.h>
0010 
0011 #include <kvm/arm_hypercalls.h>
0012 
0013 void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
0014 {
0015     struct kvm *kvm = vcpu->kvm;
0016     u64 base = vcpu->arch.steal.base;
0017     u64 last_steal = vcpu->arch.steal.last_steal;
0018     u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
0019     u64 steal = 0;
0020     int idx;
0021 
0022     if (base == GPA_INVALID)
0023         return;
0024 
0025     idx = srcu_read_lock(&kvm->srcu);
0026     if (!kvm_get_guest(kvm, base + offset, steal)) {
0027         steal = le64_to_cpu(steal);
0028         vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
0029         steal += vcpu->arch.steal.last_steal - last_steal;
0030         kvm_put_guest(kvm, base + offset, cpu_to_le64(steal));
0031     }
0032     srcu_read_unlock(&kvm->srcu, idx);
0033 }
0034 
0035 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
0036 {
0037     u32 feature = smccc_get_arg1(vcpu);
0038     long val = SMCCC_RET_NOT_SUPPORTED;
0039 
0040     switch (feature) {
0041     case ARM_SMCCC_HV_PV_TIME_FEATURES:
0042     case ARM_SMCCC_HV_PV_TIME_ST:
0043         if (vcpu->arch.steal.base != GPA_INVALID)
0044             val = SMCCC_RET_SUCCESS;
0045         break;
0046     }
0047 
0048     return val;
0049 }
0050 
0051 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
0052 {
0053     struct pvclock_vcpu_stolen_time init_values = {};
0054     struct kvm *kvm = vcpu->kvm;
0055     u64 base = vcpu->arch.steal.base;
0056 
0057     if (base == GPA_INVALID)
0058         return base;
0059 
0060     /*
0061      * Start counting stolen time from the time the guest requests
0062      * the feature enabled.
0063      */
0064     vcpu->arch.steal.last_steal = current->sched_info.run_delay;
0065     kvm_write_guest_lock(kvm, base, &init_values, sizeof(init_values));
0066 
0067     return base;
0068 }
0069 
0070 bool kvm_arm_pvtime_supported(void)
0071 {
0072     return !!sched_info_on();
0073 }
0074 
0075 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
0076                 struct kvm_device_attr *attr)
0077 {
0078     u64 __user *user = (u64 __user *)attr->addr;
0079     struct kvm *kvm = vcpu->kvm;
0080     u64 ipa;
0081     int ret = 0;
0082     int idx;
0083 
0084     if (!kvm_arm_pvtime_supported() ||
0085         attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
0086         return -ENXIO;
0087 
0088     if (get_user(ipa, user))
0089         return -EFAULT;
0090     if (!IS_ALIGNED(ipa, 64))
0091         return -EINVAL;
0092     if (vcpu->arch.steal.base != GPA_INVALID)
0093         return -EEXIST;
0094 
0095     /* Check the address is in a valid memslot */
0096     idx = srcu_read_lock(&kvm->srcu);
0097     if (kvm_is_error_hva(gfn_to_hva(kvm, ipa >> PAGE_SHIFT)))
0098         ret = -EINVAL;
0099     srcu_read_unlock(&kvm->srcu, idx);
0100 
0101     if (!ret)
0102         vcpu->arch.steal.base = ipa;
0103 
0104     return ret;
0105 }
0106 
0107 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
0108                 struct kvm_device_attr *attr)
0109 {
0110     u64 __user *user = (u64 __user *)attr->addr;
0111     u64 ipa;
0112 
0113     if (!kvm_arm_pvtime_supported() ||
0114         attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
0115         return -ENXIO;
0116 
0117     ipa = vcpu->arch.steal.base;
0118 
0119     if (put_user(ipa, user))
0120         return -EFAULT;
0121     return 0;
0122 }
0123 
0124 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
0125                 struct kvm_device_attr *attr)
0126 {
0127     switch (attr->attr) {
0128     case KVM_ARM_VCPU_PVTIME_IPA:
0129         if (kvm_arm_pvtime_supported())
0130             return 0;
0131     }
0132     return -ENXIO;
0133 }