Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * VGIC: KVM DEVICE API
0004  *
0005  * Copyright (C) 2015 ARM Ltd.
0006  * Author: Marc Zyngier <marc.zyngier@arm.com>
0007  */
0008 #include <linux/kvm_host.h>
0009 #include <kvm/arm_vgic.h>
0010 #include <linux/uaccess.h>
0011 #include <asm/kvm_mmu.h>
0012 #include <asm/cputype.h>
0013 #include "vgic.h"
0014 
0015 /* common helpers */
0016 
0017 int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr,
0018                phys_addr_t addr, phys_addr_t alignment,
0019                phys_addr_t size)
0020 {
0021     if (!IS_VGIC_ADDR_UNDEF(ioaddr))
0022         return -EEXIST;
0023 
0024     if (!IS_ALIGNED(addr, alignment) || !IS_ALIGNED(size, alignment))
0025         return -EINVAL;
0026 
0027     if (addr + size < addr)
0028         return -EINVAL;
0029 
0030     if (addr & ~kvm_phys_mask(kvm) || addr + size > kvm_phys_size(kvm))
0031         return -E2BIG;
0032 
0033     return 0;
0034 }
0035 
0036 static int vgic_check_type(struct kvm *kvm, int type_needed)
0037 {
0038     if (kvm->arch.vgic.vgic_model != type_needed)
0039         return -ENODEV;
0040     else
0041         return 0;
0042 }
0043 
0044 int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr)
0045 {
0046     struct vgic_dist *vgic = &kvm->arch.vgic;
0047     int r;
0048 
0049     mutex_lock(&kvm->lock);
0050     switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
0051     case KVM_VGIC_V2_ADDR_TYPE_DIST:
0052         r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
0053         if (!r)
0054             r = vgic_check_iorange(kvm, vgic->vgic_dist_base, dev_addr->addr,
0055                            SZ_4K, KVM_VGIC_V2_DIST_SIZE);
0056         if (!r)
0057             vgic->vgic_dist_base = dev_addr->addr;
0058         break;
0059     case KVM_VGIC_V2_ADDR_TYPE_CPU:
0060         r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
0061         if (!r)
0062             r = vgic_check_iorange(kvm, vgic->vgic_cpu_base, dev_addr->addr,
0063                            SZ_4K, KVM_VGIC_V2_CPU_SIZE);
0064         if (!r)
0065             vgic->vgic_cpu_base = dev_addr->addr;
0066         break;
0067     default:
0068         r = -ENODEV;
0069     }
0070 
0071     mutex_unlock(&kvm->lock);
0072 
0073     return r;
0074 }
0075 
0076 /**
0077  * kvm_vgic_addr - set or get vgic VM base addresses
0078  * @kvm:   pointer to the vm struct
0079  * @attr:  pointer to the attribute being retrieved/updated
0080  * @write: if true set the address in the VM address space, if false read the
0081  *          address
0082  *
0083  * Set or get the vgic base addresses for the distributor and the virtual CPU
0084  * interface in the VM physical address space.  These addresses are properties
0085  * of the emulated core/SoC and therefore user space initially knows this
0086  * information.
0087  * Check them for sanity (alignment, double assignment). We can't check for
0088  * overlapping regions in case of a virtual GICv3 here, since we don't know
0089  * the number of VCPUs yet, so we defer this check to map_resources().
0090  */
0091 static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool write)
0092 {
0093     u64 __user *uaddr = (u64 __user *)attr->addr;
0094     struct vgic_dist *vgic = &kvm->arch.vgic;
0095     phys_addr_t *addr_ptr, alignment, size;
0096     u64 undef_value = VGIC_ADDR_UNDEF;
0097     u64 addr;
0098     int r;
0099 
0100     /* Reading a redistributor region addr implies getting the index */
0101     if (write || attr->attr == KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION)
0102         if (get_user(addr, uaddr))
0103             return -EFAULT;
0104 
0105     mutex_lock(&kvm->lock);
0106     switch (attr->attr) {
0107     case KVM_VGIC_V2_ADDR_TYPE_DIST:
0108         r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
0109         addr_ptr = &vgic->vgic_dist_base;
0110         alignment = SZ_4K;
0111         size = KVM_VGIC_V2_DIST_SIZE;
0112         break;
0113     case KVM_VGIC_V2_ADDR_TYPE_CPU:
0114         r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
0115         addr_ptr = &vgic->vgic_cpu_base;
0116         alignment = SZ_4K;
0117         size = KVM_VGIC_V2_CPU_SIZE;
0118         break;
0119     case KVM_VGIC_V3_ADDR_TYPE_DIST:
0120         r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
0121         addr_ptr = &vgic->vgic_dist_base;
0122         alignment = SZ_64K;
0123         size = KVM_VGIC_V3_DIST_SIZE;
0124         break;
0125     case KVM_VGIC_V3_ADDR_TYPE_REDIST: {
0126         struct vgic_redist_region *rdreg;
0127 
0128         r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
0129         if (r)
0130             break;
0131         if (write) {
0132             r = vgic_v3_set_redist_base(kvm, 0, addr, 0);
0133             goto out;
0134         }
0135         rdreg = list_first_entry_or_null(&vgic->rd_regions,
0136                          struct vgic_redist_region, list);
0137         if (!rdreg)
0138             addr_ptr = &undef_value;
0139         else
0140             addr_ptr = &rdreg->base;
0141         break;
0142     }
0143     case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
0144     {
0145         struct vgic_redist_region *rdreg;
0146         u8 index;
0147 
0148         r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
0149         if (r)
0150             break;
0151 
0152         index = addr & KVM_VGIC_V3_RDIST_INDEX_MASK;
0153 
0154         if (write) {
0155             gpa_t base = addr & KVM_VGIC_V3_RDIST_BASE_MASK;
0156             u32 count = FIELD_GET(KVM_VGIC_V3_RDIST_COUNT_MASK, addr);
0157             u8 flags = FIELD_GET(KVM_VGIC_V3_RDIST_FLAGS_MASK, addr);
0158 
0159             if (!count || flags)
0160                 r = -EINVAL;
0161             else
0162                 r = vgic_v3_set_redist_base(kvm, index,
0163                                 base, count);
0164             goto out;
0165         }
0166 
0167         rdreg = vgic_v3_rdist_region_from_index(kvm, index);
0168         if (!rdreg) {
0169             r = -ENOENT;
0170             goto out;
0171         }
0172 
0173         addr = index;
0174         addr |= rdreg->base;
0175         addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT;
0176         goto out;
0177     }
0178     default:
0179         r = -ENODEV;
0180     }
0181 
0182     if (r)
0183         goto out;
0184 
0185     if (write) {
0186         r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
0187         if (!r)
0188             *addr_ptr = addr;
0189     } else {
0190         addr = *addr_ptr;
0191     }
0192 
0193 out:
0194     mutex_unlock(&kvm->lock);
0195 
0196     if (!r && !write)
0197         r =  put_user(addr, uaddr);
0198 
0199     return r;
0200 }
0201 
0202 static int vgic_set_common_attr(struct kvm_device *dev,
0203                 struct kvm_device_attr *attr)
0204 {
0205     int r;
0206 
0207     switch (attr->group) {
0208     case KVM_DEV_ARM_VGIC_GRP_ADDR:
0209         r = kvm_vgic_addr(dev->kvm, attr, true);
0210         return (r == -ENODEV) ? -ENXIO : r;
0211     case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
0212         u32 __user *uaddr = (u32 __user *)(long)attr->addr;
0213         u32 val;
0214         int ret = 0;
0215 
0216         if (get_user(val, uaddr))
0217             return -EFAULT;
0218 
0219         /*
0220          * We require:
0221          * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
0222          * - at most 1024 interrupts
0223          * - a multiple of 32 interrupts
0224          */
0225         if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
0226             val > VGIC_MAX_RESERVED ||
0227             (val & 31))
0228             return -EINVAL;
0229 
0230         mutex_lock(&dev->kvm->lock);
0231 
0232         if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
0233             ret = -EBUSY;
0234         else
0235             dev->kvm->arch.vgic.nr_spis =
0236                 val - VGIC_NR_PRIVATE_IRQS;
0237 
0238         mutex_unlock(&dev->kvm->lock);
0239 
0240         return ret;
0241     }
0242     case KVM_DEV_ARM_VGIC_GRP_CTRL: {
0243         switch (attr->attr) {
0244         case KVM_DEV_ARM_VGIC_CTRL_INIT:
0245             mutex_lock(&dev->kvm->lock);
0246             r = vgic_init(dev->kvm);
0247             mutex_unlock(&dev->kvm->lock);
0248             return r;
0249         case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
0250             /*
0251              * OK, this one isn't common at all, but we
0252              * want to handle all control group attributes
0253              * in a single place.
0254              */
0255             if (vgic_check_type(dev->kvm, KVM_DEV_TYPE_ARM_VGIC_V3))
0256                 return -ENXIO;
0257             mutex_lock(&dev->kvm->lock);
0258 
0259             if (!lock_all_vcpus(dev->kvm)) {
0260                 mutex_unlock(&dev->kvm->lock);
0261                 return -EBUSY;
0262             }
0263             r = vgic_v3_save_pending_tables(dev->kvm);
0264             unlock_all_vcpus(dev->kvm);
0265             mutex_unlock(&dev->kvm->lock);
0266             return r;
0267         }
0268         break;
0269     }
0270     }
0271 
0272     return -ENXIO;
0273 }
0274 
0275 static int vgic_get_common_attr(struct kvm_device *dev,
0276                 struct kvm_device_attr *attr)
0277 {
0278     int r = -ENXIO;
0279 
0280     switch (attr->group) {
0281     case KVM_DEV_ARM_VGIC_GRP_ADDR:
0282         r = kvm_vgic_addr(dev->kvm, attr, false);
0283         return (r == -ENODEV) ? -ENXIO : r;
0284     case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
0285         u32 __user *uaddr = (u32 __user *)(long)attr->addr;
0286 
0287         r = put_user(dev->kvm->arch.vgic.nr_spis +
0288                  VGIC_NR_PRIVATE_IRQS, uaddr);
0289         break;
0290     }
0291     }
0292 
0293     return r;
0294 }
0295 
0296 static int vgic_create(struct kvm_device *dev, u32 type)
0297 {
0298     return kvm_vgic_create(dev->kvm, type);
0299 }
0300 
0301 static void vgic_destroy(struct kvm_device *dev)
0302 {
0303     kfree(dev);
0304 }
0305 
0306 int kvm_register_vgic_device(unsigned long type)
0307 {
0308     int ret = -ENODEV;
0309 
0310     switch (type) {
0311     case KVM_DEV_TYPE_ARM_VGIC_V2:
0312         ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
0313                           KVM_DEV_TYPE_ARM_VGIC_V2);
0314         break;
0315     case KVM_DEV_TYPE_ARM_VGIC_V3:
0316         ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
0317                           KVM_DEV_TYPE_ARM_VGIC_V3);
0318 
0319         if (ret)
0320             break;
0321         ret = kvm_vgic_register_its_device();
0322         break;
0323     }
0324 
0325     return ret;
0326 }
0327 
0328 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
0329                struct vgic_reg_attr *reg_attr)
0330 {
0331     int cpuid;
0332 
0333     cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
0334          KVM_DEV_ARM_VGIC_CPUID_SHIFT;
0335 
0336     if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
0337         return -EINVAL;
0338 
0339     reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
0340     reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
0341 
0342     return 0;
0343 }
0344 
0345 /* unlocks vcpus from @vcpu_lock_idx and smaller */
0346 static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
0347 {
0348     struct kvm_vcpu *tmp_vcpu;
0349 
0350     for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
0351         tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
0352         mutex_unlock(&tmp_vcpu->mutex);
0353     }
0354 }
0355 
0356 void unlock_all_vcpus(struct kvm *kvm)
0357 {
0358     unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
0359 }
0360 
0361 /* Returns true if all vcpus were locked, false otherwise */
0362 bool lock_all_vcpus(struct kvm *kvm)
0363 {
0364     struct kvm_vcpu *tmp_vcpu;
0365     unsigned long c;
0366 
0367     /*
0368      * Any time a vcpu is run, vcpu_load is called which tries to grab the
0369      * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
0370      * that no other VCPUs are run and fiddle with the vgic state while we
0371      * access it.
0372      */
0373     kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
0374         if (!mutex_trylock(&tmp_vcpu->mutex)) {
0375             unlock_vcpus(kvm, c - 1);
0376             return false;
0377         }
0378     }
0379 
0380     return true;
0381 }
0382 
0383 /**
0384  * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
0385  *
0386  * @dev:      kvm device handle
0387  * @attr:     kvm device attribute
0388  * @is_write: true if userspace is writing a register
0389  */
0390 static int vgic_v2_attr_regs_access(struct kvm_device *dev,
0391                     struct kvm_device_attr *attr,
0392                     bool is_write)
0393 {
0394     u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
0395     struct vgic_reg_attr reg_attr;
0396     gpa_t addr;
0397     struct kvm_vcpu *vcpu;
0398     int ret;
0399     u32 val;
0400 
0401     ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
0402     if (ret)
0403         return ret;
0404 
0405     vcpu = reg_attr.vcpu;
0406     addr = reg_attr.addr;
0407 
0408     if (is_write)
0409         if (get_user(val, uaddr))
0410             return -EFAULT;
0411 
0412     mutex_lock(&dev->kvm->lock);
0413 
0414     ret = vgic_init(dev->kvm);
0415     if (ret)
0416         goto out;
0417 
0418     if (!lock_all_vcpus(dev->kvm)) {
0419         ret = -EBUSY;
0420         goto out;
0421     }
0422 
0423     switch (attr->group) {
0424     case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
0425         ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
0426         break;
0427     case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
0428         ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, &val);
0429         break;
0430     default:
0431         ret = -EINVAL;
0432         break;
0433     }
0434 
0435     unlock_all_vcpus(dev->kvm);
0436 out:
0437     mutex_unlock(&dev->kvm->lock);
0438 
0439     if (!ret && !is_write)
0440         ret = put_user(val, uaddr);
0441 
0442     return ret;
0443 }
0444 
0445 static int vgic_v2_set_attr(struct kvm_device *dev,
0446                 struct kvm_device_attr *attr)
0447 {
0448     switch (attr->group) {
0449     case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
0450     case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
0451         return vgic_v2_attr_regs_access(dev, attr, true);
0452     default:
0453         return vgic_set_common_attr(dev, attr);
0454     }
0455 }
0456 
0457 static int vgic_v2_get_attr(struct kvm_device *dev,
0458                 struct kvm_device_attr *attr)
0459 {
0460     switch (attr->group) {
0461     case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
0462     case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
0463         return vgic_v2_attr_regs_access(dev, attr, false);
0464     default:
0465         return vgic_get_common_attr(dev, attr);
0466     }
0467 }
0468 
0469 static int vgic_v2_has_attr(struct kvm_device *dev,
0470                 struct kvm_device_attr *attr)
0471 {
0472     switch (attr->group) {
0473     case KVM_DEV_ARM_VGIC_GRP_ADDR:
0474         switch (attr->attr) {
0475         case KVM_VGIC_V2_ADDR_TYPE_DIST:
0476         case KVM_VGIC_V2_ADDR_TYPE_CPU:
0477             return 0;
0478         }
0479         break;
0480     case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
0481     case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
0482         return vgic_v2_has_attr_regs(dev, attr);
0483     case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
0484         return 0;
0485     case KVM_DEV_ARM_VGIC_GRP_CTRL:
0486         switch (attr->attr) {
0487         case KVM_DEV_ARM_VGIC_CTRL_INIT:
0488             return 0;
0489         }
0490     }
0491     return -ENXIO;
0492 }
0493 
0494 struct kvm_device_ops kvm_arm_vgic_v2_ops = {
0495     .name = "kvm-arm-vgic-v2",
0496     .create = vgic_create,
0497     .destroy = vgic_destroy,
0498     .set_attr = vgic_v2_set_attr,
0499     .get_attr = vgic_v2_get_attr,
0500     .has_attr = vgic_v2_has_attr,
0501 };
0502 
0503 int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
0504                struct vgic_reg_attr *reg_attr)
0505 {
0506     unsigned long vgic_mpidr, mpidr_reg;
0507 
0508     /*
0509      * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
0510      * attr might not hold MPIDR. Hence assume vcpu0.
0511      */
0512     if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
0513         vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
0514                   KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
0515 
0516         mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
0517         reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
0518     } else {
0519         reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
0520     }
0521 
0522     if (!reg_attr->vcpu)
0523         return -EINVAL;
0524 
0525     reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
0526 
0527     return 0;
0528 }
0529 
0530 /*
0531  * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
0532  *
0533  * @dev:      kvm device handle
0534  * @attr:     kvm device attribute
0535  * @is_write: true if userspace is writing a register
0536  */
0537 static int vgic_v3_attr_regs_access(struct kvm_device *dev,
0538                     struct kvm_device_attr *attr,
0539                     bool is_write)
0540 {
0541     struct vgic_reg_attr reg_attr;
0542     gpa_t addr;
0543     struct kvm_vcpu *vcpu;
0544     bool uaccess;
0545     u32 val;
0546     int ret;
0547 
0548     ret = vgic_v3_parse_attr(dev, attr, &reg_attr);
0549     if (ret)
0550         return ret;
0551 
0552     vcpu = reg_attr.vcpu;
0553     addr = reg_attr.addr;
0554 
0555     switch (attr->group) {
0556     case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
0557         /* Sysregs uaccess is performed by the sysreg handling code */
0558         uaccess = false;
0559         break;
0560     default:
0561         uaccess = true;
0562     }
0563 
0564     if (uaccess && is_write) {
0565         u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
0566         if (get_user(val, uaddr))
0567             return -EFAULT;
0568     }
0569 
0570     mutex_lock(&dev->kvm->lock);
0571 
0572     if (unlikely(!vgic_initialized(dev->kvm))) {
0573         ret = -EBUSY;
0574         goto out;
0575     }
0576 
0577     if (!lock_all_vcpus(dev->kvm)) {
0578         ret = -EBUSY;
0579         goto out;
0580     }
0581 
0582     switch (attr->group) {
0583     case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
0584         ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &val);
0585         break;
0586     case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
0587         ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &val);
0588         break;
0589     case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
0590         ret = vgic_v3_cpu_sysregs_uaccess(vcpu, attr, is_write);
0591         break;
0592     case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
0593         unsigned int info, intid;
0594 
0595         info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
0596             KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT;
0597         if (info == VGIC_LEVEL_INFO_LINE_LEVEL) {
0598             intid = attr->attr &
0599                 KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
0600             ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
0601                                   intid, &val);
0602         } else {
0603             ret = -EINVAL;
0604         }
0605         break;
0606     }
0607     default:
0608         ret = -EINVAL;
0609         break;
0610     }
0611 
0612     unlock_all_vcpus(dev->kvm);
0613 out:
0614     mutex_unlock(&dev->kvm->lock);
0615 
0616     if (!ret && uaccess && !is_write) {
0617         u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
0618         ret = put_user(val, uaddr);
0619     }
0620 
0621     return ret;
0622 }
0623 
0624 static int vgic_v3_set_attr(struct kvm_device *dev,
0625                 struct kvm_device_attr *attr)
0626 {
0627     switch (attr->group) {
0628     case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
0629     case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
0630     case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
0631     case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
0632         return vgic_v3_attr_regs_access(dev, attr, true);
0633     default:
0634         return vgic_set_common_attr(dev, attr);
0635     }
0636 }
0637 
0638 static int vgic_v3_get_attr(struct kvm_device *dev,
0639                 struct kvm_device_attr *attr)
0640 {
0641     switch (attr->group) {
0642     case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
0643     case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
0644     case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
0645     case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
0646         return vgic_v3_attr_regs_access(dev, attr, false);
0647     default:
0648         return vgic_get_common_attr(dev, attr);
0649     }
0650 }
0651 
0652 static int vgic_v3_has_attr(struct kvm_device *dev,
0653                 struct kvm_device_attr *attr)
0654 {
0655     switch (attr->group) {
0656     case KVM_DEV_ARM_VGIC_GRP_ADDR:
0657         switch (attr->attr) {
0658         case KVM_VGIC_V3_ADDR_TYPE_DIST:
0659         case KVM_VGIC_V3_ADDR_TYPE_REDIST:
0660         case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
0661             return 0;
0662         }
0663         break;
0664     case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
0665     case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
0666     case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
0667         return vgic_v3_has_attr_regs(dev, attr);
0668     case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
0669         return 0;
0670     case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
0671         if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
0672               KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) ==
0673               VGIC_LEVEL_INFO_LINE_LEVEL)
0674             return 0;
0675         break;
0676     }
0677     case KVM_DEV_ARM_VGIC_GRP_CTRL:
0678         switch (attr->attr) {
0679         case KVM_DEV_ARM_VGIC_CTRL_INIT:
0680             return 0;
0681         case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
0682             return 0;
0683         }
0684     }
0685     return -ENXIO;
0686 }
0687 
0688 struct kvm_device_ops kvm_arm_vgic_v3_ops = {
0689     .name = "kvm-arm-vgic-v3",
0690     .create = vgic_create,
0691     .destroy = vgic_destroy,
0692     .set_attr = vgic_v3_set_attr,
0693     .get_attr = vgic_v3_get_attr,
0694     .has_attr = vgic_v3_has_attr,
0695 };