Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (c) 2017-2019, IBM Corporation.
0004  */
0005 
0006 #define pr_fmt(fmt) "xive-kvm: " fmt
0007 
0008 #include <linux/kernel.h>
0009 #include <linux/kvm_host.h>
0010 #include <linux/err.h>
0011 #include <linux/gfp.h>
0012 #include <linux/spinlock.h>
0013 #include <linux/delay.h>
0014 #include <linux/file.h>
0015 #include <linux/irqdomain.h>
0016 #include <asm/uaccess.h>
0017 #include <asm/kvm_book3s.h>
0018 #include <asm/kvm_ppc.h>
0019 #include <asm/hvcall.h>
0020 #include <asm/xive.h>
0021 #include <asm/xive-regs.h>
0022 #include <asm/debug.h>
0023 #include <asm/opal.h>
0024 
0025 #include <linux/debugfs.h>
0026 #include <linux/seq_file.h>
0027 
0028 #include "book3s_xive.h"
0029 
0030 static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset)
0031 {
0032     u64 val;
0033 
0034     /*
0035      * The KVM XIVE native device does not use the XIVE_ESB_SET_PQ_10
0036      * load operation, so there is no need to enforce load-after-store
0037      * ordering.
0038      */
0039 
0040     val = in_be64(xd->eoi_mmio + offset);
0041     return (u8)val;
0042 }
0043 
0044 static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio)
0045 {
0046     struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
0047     struct xive_q *q = &xc->queues[prio];
0048 
0049     xive_native_disable_queue(xc->vp_id, q, prio);
0050     if (q->qpage) {
0051         put_page(virt_to_page(q->qpage));
0052         q->qpage = NULL;
0053     }
0054 }
0055 
0056 static int kvmppc_xive_native_configure_queue(u32 vp_id, struct xive_q *q,
0057                           u8 prio, __be32 *qpage,
0058                           u32 order, bool can_escalate)
0059 {
0060     int rc;
0061     __be32 *qpage_prev = q->qpage;
0062 
0063     rc = xive_native_configure_queue(vp_id, q, prio, qpage, order,
0064                      can_escalate);
0065     if (rc)
0066         return rc;
0067 
0068     if (qpage_prev)
0069         put_page(virt_to_page(qpage_prev));
0070 
0071     return rc;
0072 }
0073 
0074 void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu)
0075 {
0076     struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
0077     int i;
0078 
0079     if (!kvmppc_xive_enabled(vcpu))
0080         return;
0081 
0082     if (!xc)
0083         return;
0084 
0085     pr_devel("native_cleanup_vcpu(cpu=%d)\n", xc->server_num);
0086 
0087     /* Ensure no interrupt is still routed to that VP */
0088     xc->valid = false;
0089     kvmppc_xive_disable_vcpu_interrupts(vcpu);
0090 
0091     /* Free escalations */
0092     for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
0093         /* Free the escalation irq */
0094         if (xc->esc_virq[i]) {
0095             if (kvmppc_xive_has_single_escalation(xc->xive))
0096                 xive_cleanup_single_escalation(vcpu, xc,
0097                             xc->esc_virq[i]);
0098             free_irq(xc->esc_virq[i], vcpu);
0099             irq_dispose_mapping(xc->esc_virq[i]);
0100             kfree(xc->esc_virq_names[i]);
0101             xc->esc_virq[i] = 0;
0102         }
0103     }
0104 
0105     /* Disable the VP */
0106     xive_native_disable_vp(xc->vp_id);
0107 
0108     /* Clear the cam word so guest entry won't try to push context */
0109     vcpu->arch.xive_cam_word = 0;
0110 
0111     /* Free the queues */
0112     for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
0113         kvmppc_xive_native_cleanup_queue(vcpu, i);
0114     }
0115 
0116     /* Free the VP */
0117     kfree(xc);
0118 
0119     /* Cleanup the vcpu */
0120     vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
0121     vcpu->arch.xive_vcpu = NULL;
0122 }
0123 
0124 int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
0125                     struct kvm_vcpu *vcpu, u32 server_num)
0126 {
0127     struct kvmppc_xive *xive = dev->private;
0128     struct kvmppc_xive_vcpu *xc = NULL;
0129     int rc;
0130     u32 vp_id;
0131 
0132     pr_devel("native_connect_vcpu(server=%d)\n", server_num);
0133 
0134     if (dev->ops != &kvm_xive_native_ops) {
0135         pr_devel("Wrong ops !\n");
0136         return -EPERM;
0137     }
0138     if (xive->kvm != vcpu->kvm)
0139         return -EPERM;
0140     if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
0141         return -EBUSY;
0142 
0143     mutex_lock(&xive->lock);
0144 
0145     rc = kvmppc_xive_compute_vp_id(xive, server_num, &vp_id);
0146     if (rc)
0147         goto bail;
0148 
0149     xc = kzalloc(sizeof(*xc), GFP_KERNEL);
0150     if (!xc) {
0151         rc = -ENOMEM;
0152         goto bail;
0153     }
0154 
0155     vcpu->arch.xive_vcpu = xc;
0156     xc->xive = xive;
0157     xc->vcpu = vcpu;
0158     xc->server_num = server_num;
0159 
0160     xc->vp_id = vp_id;
0161     xc->valid = true;
0162     vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
0163 
0164     rc = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
0165     if (rc) {
0166         pr_err("Failed to get VP info from OPAL: %d\n", rc);
0167         goto bail;
0168     }
0169 
0170     if (!kvmppc_xive_check_save_restore(vcpu)) {
0171         pr_err("inconsistent save-restore setup for VCPU %d\n", server_num);
0172         rc = -EIO;
0173         goto bail;
0174     }
0175 
0176     /*
0177      * Enable the VP first as the single escalation mode will
0178      * affect escalation interrupts numbering
0179      */
0180     rc = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive));
0181     if (rc) {
0182         pr_err("Failed to enable VP in OPAL: %d\n", rc);
0183         goto bail;
0184     }
0185 
0186     /* Configure VCPU fields for use by assembly push/pull */
0187     vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
0188     vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
0189 
0190     /* TODO: reset all queues to a clean state ? */
0191 bail:
0192     mutex_unlock(&xive->lock);
0193     if (rc)
0194         kvmppc_xive_native_cleanup_vcpu(vcpu);
0195 
0196     return rc;
0197 }
0198 
0199 /*
0200  * Device passthrough support
0201  */
0202 static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq)
0203 {
0204     struct kvmppc_xive *xive = kvm->arch.xive;
0205     pgoff_t esb_pgoff = KVM_XIVE_ESB_PAGE_OFFSET + irq * 2;
0206 
0207     if (irq >= KVMPPC_XIVE_NR_IRQS)
0208         return -EINVAL;
0209 
0210     /*
0211      * Clear the ESB pages of the IRQ number being mapped (or
0212      * unmapped) into the guest and let the VM fault handler
0213      * repopulate with the appropriate ESB pages (device or IC)
0214      */
0215     pr_debug("clearing esb pages for girq 0x%lx\n", irq);
0216     mutex_lock(&xive->mapping_lock);
0217     if (xive->mapping)
0218         unmap_mapping_range(xive->mapping,
0219                     esb_pgoff << PAGE_SHIFT,
0220                     2ull << PAGE_SHIFT, 1);
0221     mutex_unlock(&xive->mapping_lock);
0222     return 0;
0223 }
0224 
0225 static struct kvmppc_xive_ops kvmppc_xive_native_ops =  {
0226     .reset_mapped = kvmppc_xive_native_reset_mapped,
0227 };
0228 
0229 static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
0230 {
0231     struct vm_area_struct *vma = vmf->vma;
0232     struct kvm_device *dev = vma->vm_file->private_data;
0233     struct kvmppc_xive *xive = dev->private;
0234     struct kvmppc_xive_src_block *sb;
0235     struct kvmppc_xive_irq_state *state;
0236     struct xive_irq_data *xd;
0237     u32 hw_num;
0238     u16 src;
0239     u64 page;
0240     unsigned long irq;
0241     u64 page_offset;
0242 
0243     /*
0244      * Linux/KVM uses a two pages ESB setting, one for trigger and
0245      * one for EOI
0246      */
0247     page_offset = vmf->pgoff - vma->vm_pgoff;
0248     irq = page_offset / 2;
0249 
0250     sb = kvmppc_xive_find_source(xive, irq, &src);
0251     if (!sb) {
0252         pr_devel("%s: source %lx not found !\n", __func__, irq);
0253         return VM_FAULT_SIGBUS;
0254     }
0255 
0256     state = &sb->irq_state[src];
0257 
0258     /* Some sanity checking */
0259     if (!state->valid) {
0260         pr_devel("%s: source %lx invalid !\n", __func__, irq);
0261         return VM_FAULT_SIGBUS;
0262     }
0263 
0264     kvmppc_xive_select_irq(state, &hw_num, &xd);
0265 
0266     arch_spin_lock(&sb->lock);
0267 
0268     /*
0269      * first/even page is for trigger
0270      * second/odd page is for EOI and management.
0271      */
0272     page = page_offset % 2 ? xd->eoi_page : xd->trig_page;
0273     arch_spin_unlock(&sb->lock);
0274 
0275     if (WARN_ON(!page)) {
0276         pr_err("%s: accessing invalid ESB page for source %lx !\n",
0277                __func__, irq);
0278         return VM_FAULT_SIGBUS;
0279     }
0280 
0281     vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT);
0282     return VM_FAULT_NOPAGE;
0283 }
0284 
0285 static const struct vm_operations_struct xive_native_esb_vmops = {
0286     .fault = xive_native_esb_fault,
0287 };
0288 
0289 static vm_fault_t xive_native_tima_fault(struct vm_fault *vmf)
0290 {
0291     struct vm_area_struct *vma = vmf->vma;
0292 
0293     switch (vmf->pgoff - vma->vm_pgoff) {
0294     case 0: /* HW - forbid access */
0295     case 1: /* HV - forbid access */
0296         return VM_FAULT_SIGBUS;
0297     case 2: /* OS */
0298         vmf_insert_pfn(vma, vmf->address, xive_tima_os >> PAGE_SHIFT);
0299         return VM_FAULT_NOPAGE;
0300     case 3: /* USER - TODO */
0301     default:
0302         return VM_FAULT_SIGBUS;
0303     }
0304 }
0305 
0306 static const struct vm_operations_struct xive_native_tima_vmops = {
0307     .fault = xive_native_tima_fault,
0308 };
0309 
0310 static int kvmppc_xive_native_mmap(struct kvm_device *dev,
0311                    struct vm_area_struct *vma)
0312 {
0313     struct kvmppc_xive *xive = dev->private;
0314 
0315     /* We only allow mappings at fixed offset for now */
0316     if (vma->vm_pgoff == KVM_XIVE_TIMA_PAGE_OFFSET) {
0317         if (vma_pages(vma) > 4)
0318             return -EINVAL;
0319         vma->vm_ops = &xive_native_tima_vmops;
0320     } else if (vma->vm_pgoff == KVM_XIVE_ESB_PAGE_OFFSET) {
0321         if (vma_pages(vma) > KVMPPC_XIVE_NR_IRQS * 2)
0322             return -EINVAL;
0323         vma->vm_ops = &xive_native_esb_vmops;
0324     } else {
0325         return -EINVAL;
0326     }
0327 
0328     vma->vm_flags |= VM_IO | VM_PFNMAP;
0329     vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
0330 
0331     /*
0332      * Grab the KVM device file address_space to be able to clear
0333      * the ESB pages mapping when a device is passed-through into
0334      * the guest.
0335      */
0336     xive->mapping = vma->vm_file->f_mapping;
0337     return 0;
0338 }
0339 
0340 static int kvmppc_xive_native_set_source(struct kvmppc_xive *xive, long irq,
0341                      u64 addr)
0342 {
0343     struct kvmppc_xive_src_block *sb;
0344     struct kvmppc_xive_irq_state *state;
0345     u64 __user *ubufp = (u64 __user *) addr;
0346     u64 val;
0347     u16 idx;
0348     int rc;
0349 
0350     pr_devel("%s irq=0x%lx\n", __func__, irq);
0351 
0352     if (irq < KVMPPC_XIVE_FIRST_IRQ || irq >= KVMPPC_XIVE_NR_IRQS)
0353         return -E2BIG;
0354 
0355     sb = kvmppc_xive_find_source(xive, irq, &idx);
0356     if (!sb) {
0357         pr_debug("No source, creating source block...\n");
0358         sb = kvmppc_xive_create_src_block(xive, irq);
0359         if (!sb) {
0360             pr_err("Failed to create block...\n");
0361             return -ENOMEM;
0362         }
0363     }
0364     state = &sb->irq_state[idx];
0365 
0366     if (get_user(val, ubufp)) {
0367         pr_err("fault getting user info !\n");
0368         return -EFAULT;
0369     }
0370 
0371     arch_spin_lock(&sb->lock);
0372 
0373     /*
0374      * If the source doesn't already have an IPI, allocate
0375      * one and get the corresponding data
0376      */
0377     if (!state->ipi_number) {
0378         state->ipi_number = xive_native_alloc_irq();
0379         if (state->ipi_number == 0) {
0380             pr_err("Failed to allocate IRQ !\n");
0381             rc = -ENXIO;
0382             goto unlock;
0383         }
0384         xive_native_populate_irq_data(state->ipi_number,
0385                           &state->ipi_data);
0386         pr_debug("%s allocated hw_irq=0x%x for irq=0x%lx\n", __func__,
0387              state->ipi_number, irq);
0388     }
0389 
0390     /* Restore LSI state */
0391     if (val & KVM_XIVE_LEVEL_SENSITIVE) {
0392         state->lsi = true;
0393         if (val & KVM_XIVE_LEVEL_ASSERTED)
0394             state->asserted = true;
0395         pr_devel("  LSI ! Asserted=%d\n", state->asserted);
0396     }
0397 
0398     /* Mask IRQ to start with */
0399     state->act_server = 0;
0400     state->act_priority = MASKED;
0401     xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
0402     xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
0403 
0404     /* Increment the number of valid sources and mark this one valid */
0405     if (!state->valid)
0406         xive->src_count++;
0407     state->valid = true;
0408 
0409     rc = 0;
0410 
0411 unlock:
0412     arch_spin_unlock(&sb->lock);
0413 
0414     return rc;
0415 }
0416 
0417 static int kvmppc_xive_native_update_source_config(struct kvmppc_xive *xive,
0418                     struct kvmppc_xive_src_block *sb,
0419                     struct kvmppc_xive_irq_state *state,
0420                     u32 server, u8 priority, bool masked,
0421                     u32 eisn)
0422 {
0423     struct kvm *kvm = xive->kvm;
0424     u32 hw_num;
0425     int rc = 0;
0426 
0427     arch_spin_lock(&sb->lock);
0428 
0429     if (state->act_server == server && state->act_priority == priority &&
0430         state->eisn == eisn)
0431         goto unlock;
0432 
0433     pr_devel("new_act_prio=%d new_act_server=%d mask=%d act_server=%d act_prio=%d\n",
0434          priority, server, masked, state->act_server,
0435          state->act_priority);
0436 
0437     kvmppc_xive_select_irq(state, &hw_num, NULL);
0438 
0439     if (priority != MASKED && !masked) {
0440         rc = kvmppc_xive_select_target(kvm, &server, priority);
0441         if (rc)
0442             goto unlock;
0443 
0444         state->act_priority = priority;
0445         state->act_server = server;
0446         state->eisn = eisn;
0447 
0448         rc = xive_native_configure_irq(hw_num,
0449                            kvmppc_xive_vp(xive, server),
0450                            priority, eisn);
0451     } else {
0452         state->act_priority = MASKED;
0453         state->act_server = 0;
0454         state->eisn = 0;
0455 
0456         rc = xive_native_configure_irq(hw_num, 0, MASKED, 0);
0457     }
0458 
0459 unlock:
0460     arch_spin_unlock(&sb->lock);
0461     return rc;
0462 }
0463 
0464 static int kvmppc_xive_native_set_source_config(struct kvmppc_xive *xive,
0465                         long irq, u64 addr)
0466 {
0467     struct kvmppc_xive_src_block *sb;
0468     struct kvmppc_xive_irq_state *state;
0469     u64 __user *ubufp = (u64 __user *) addr;
0470     u16 src;
0471     u64 kvm_cfg;
0472     u32 server;
0473     u8 priority;
0474     bool masked;
0475     u32 eisn;
0476 
0477     sb = kvmppc_xive_find_source(xive, irq, &src);
0478     if (!sb)
0479         return -ENOENT;
0480 
0481     state = &sb->irq_state[src];
0482 
0483     if (!state->valid)
0484         return -EINVAL;
0485 
0486     if (get_user(kvm_cfg, ubufp))
0487         return -EFAULT;
0488 
0489     pr_devel("%s irq=0x%lx cfg=%016llx\n", __func__, irq, kvm_cfg);
0490 
0491     priority = (kvm_cfg & KVM_XIVE_SOURCE_PRIORITY_MASK) >>
0492         KVM_XIVE_SOURCE_PRIORITY_SHIFT;
0493     server = (kvm_cfg & KVM_XIVE_SOURCE_SERVER_MASK) >>
0494         KVM_XIVE_SOURCE_SERVER_SHIFT;
0495     masked = (kvm_cfg & KVM_XIVE_SOURCE_MASKED_MASK) >>
0496         KVM_XIVE_SOURCE_MASKED_SHIFT;
0497     eisn = (kvm_cfg & KVM_XIVE_SOURCE_EISN_MASK) >>
0498         KVM_XIVE_SOURCE_EISN_SHIFT;
0499 
0500     if (priority != xive_prio_from_guest(priority)) {
0501         pr_err("invalid priority for queue %d for VCPU %d\n",
0502                priority, server);
0503         return -EINVAL;
0504     }
0505 
0506     return kvmppc_xive_native_update_source_config(xive, sb, state, server,
0507                                priority, masked, eisn);
0508 }
0509 
0510 static int kvmppc_xive_native_sync_source(struct kvmppc_xive *xive,
0511                       long irq, u64 addr)
0512 {
0513     struct kvmppc_xive_src_block *sb;
0514     struct kvmppc_xive_irq_state *state;
0515     struct xive_irq_data *xd;
0516     u32 hw_num;
0517     u16 src;
0518     int rc = 0;
0519 
0520     pr_devel("%s irq=0x%lx", __func__, irq);
0521 
0522     sb = kvmppc_xive_find_source(xive, irq, &src);
0523     if (!sb)
0524         return -ENOENT;
0525 
0526     state = &sb->irq_state[src];
0527 
0528     rc = -EINVAL;
0529 
0530     arch_spin_lock(&sb->lock);
0531 
0532     if (state->valid) {
0533         kvmppc_xive_select_irq(state, &hw_num, &xd);
0534         xive_native_sync_source(hw_num);
0535         rc = 0;
0536     }
0537 
0538     arch_spin_unlock(&sb->lock);
0539     return rc;
0540 }
0541 
0542 static int xive_native_validate_queue_size(u32 qshift)
0543 {
0544     /*
0545      * We only support 64K pages for the moment. This is also
0546      * advertised in the DT property "ibm,xive-eq-sizes"
0547      */
0548     switch (qshift) {
0549     case 0: /* EQ reset */
0550     case 16:
0551         return 0;
0552     case 12:
0553     case 21:
0554     case 24:
0555     default:
0556         return -EINVAL;
0557     }
0558 }
0559 
0560 static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
0561                            long eq_idx, u64 addr)
0562 {
0563     struct kvm *kvm = xive->kvm;
0564     struct kvm_vcpu *vcpu;
0565     struct kvmppc_xive_vcpu *xc;
0566     void __user *ubufp = (void __user *) addr;
0567     u32 server;
0568     u8 priority;
0569     struct kvm_ppc_xive_eq kvm_eq;
0570     int rc;
0571     __be32 *qaddr = 0;
0572     struct page *page;
0573     struct xive_q *q;
0574     gfn_t gfn;
0575     unsigned long page_size;
0576     int srcu_idx;
0577 
0578     /*
0579      * Demangle priority/server tuple from the EQ identifier
0580      */
0581     priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
0582         KVM_XIVE_EQ_PRIORITY_SHIFT;
0583     server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
0584         KVM_XIVE_EQ_SERVER_SHIFT;
0585 
0586     if (copy_from_user(&kvm_eq, ubufp, sizeof(kvm_eq)))
0587         return -EFAULT;
0588 
0589     vcpu = kvmppc_xive_find_server(kvm, server);
0590     if (!vcpu) {
0591         pr_err("Can't find server %d\n", server);
0592         return -ENOENT;
0593     }
0594     xc = vcpu->arch.xive_vcpu;
0595 
0596     if (priority != xive_prio_from_guest(priority)) {
0597         pr_err("Trying to restore invalid queue %d for VCPU %d\n",
0598                priority, server);
0599         return -EINVAL;
0600     }
0601     q = &xc->queues[priority];
0602 
0603     pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n",
0604          __func__, server, priority, kvm_eq.flags,
0605          kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
0606 
0607     /* reset queue and disable queueing */
0608     if (!kvm_eq.qshift) {
0609         q->guest_qaddr  = 0;
0610         q->guest_qshift = 0;
0611 
0612         rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
0613                             NULL, 0, true);
0614         if (rc) {
0615             pr_err("Failed to reset queue %d for VCPU %d: %d\n",
0616                    priority, xc->server_num, rc);
0617             return rc;
0618         }
0619 
0620         return 0;
0621     }
0622 
0623     /*
0624      * sPAPR specifies a "Unconditional Notify (n) flag" for the
0625      * H_INT_SET_QUEUE_CONFIG hcall which forces notification
0626      * without using the coalescing mechanisms provided by the
0627      * XIVE END ESBs. This is required on KVM as notification
0628      * using the END ESBs is not supported.
0629      */
0630     if (kvm_eq.flags != KVM_XIVE_EQ_ALWAYS_NOTIFY) {
0631         pr_err("invalid flags %d\n", kvm_eq.flags);
0632         return -EINVAL;
0633     }
0634 
0635     rc = xive_native_validate_queue_size(kvm_eq.qshift);
0636     if (rc) {
0637         pr_err("invalid queue size %d\n", kvm_eq.qshift);
0638         return rc;
0639     }
0640 
0641     if (kvm_eq.qaddr & ((1ull << kvm_eq.qshift) - 1)) {
0642         pr_err("queue page is not aligned %llx/%llx\n", kvm_eq.qaddr,
0643                1ull << kvm_eq.qshift);
0644         return -EINVAL;
0645     }
0646 
0647     srcu_idx = srcu_read_lock(&kvm->srcu);
0648     gfn = gpa_to_gfn(kvm_eq.qaddr);
0649 
0650     page_size = kvm_host_page_size(vcpu, gfn);
0651     if (1ull << kvm_eq.qshift > page_size) {
0652         srcu_read_unlock(&kvm->srcu, srcu_idx);
0653         pr_warn("Incompatible host page size %lx!\n", page_size);
0654         return -EINVAL;
0655     }
0656 
0657     page = gfn_to_page(kvm, gfn);
0658     if (is_error_page(page)) {
0659         srcu_read_unlock(&kvm->srcu, srcu_idx);
0660         pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
0661         return -EINVAL;
0662     }
0663 
0664     qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK);
0665     srcu_read_unlock(&kvm->srcu, srcu_idx);
0666 
0667     /*
0668      * Backup the queue page guest address to the mark EQ page
0669      * dirty for migration.
0670      */
0671     q->guest_qaddr  = kvm_eq.qaddr;
0672     q->guest_qshift = kvm_eq.qshift;
0673 
0674      /*
0675       * Unconditional Notification is forced by default at the
0676       * OPAL level because the use of END ESBs is not supported by
0677       * Linux.
0678       */
0679     rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
0680                     (__be32 *) qaddr, kvm_eq.qshift, true);
0681     if (rc) {
0682         pr_err("Failed to configure queue %d for VCPU %d: %d\n",
0683                priority, xc->server_num, rc);
0684         put_page(page);
0685         return rc;
0686     }
0687 
0688     /*
0689      * Only restore the queue state when needed. When doing the
0690      * H_INT_SET_SOURCE_CONFIG hcall, it should not.
0691      */
0692     if (kvm_eq.qtoggle != 1 || kvm_eq.qindex != 0) {
0693         rc = xive_native_set_queue_state(xc->vp_id, priority,
0694                          kvm_eq.qtoggle,
0695                          kvm_eq.qindex);
0696         if (rc)
0697             goto error;
0698     }
0699 
0700     rc = kvmppc_xive_attach_escalation(vcpu, priority,
0701                        kvmppc_xive_has_single_escalation(xive));
0702 error:
0703     if (rc)
0704         kvmppc_xive_native_cleanup_queue(vcpu, priority);
0705     return rc;
0706 }
0707 
0708 static int kvmppc_xive_native_get_queue_config(struct kvmppc_xive *xive,
0709                            long eq_idx, u64 addr)
0710 {
0711     struct kvm *kvm = xive->kvm;
0712     struct kvm_vcpu *vcpu;
0713     struct kvmppc_xive_vcpu *xc;
0714     struct xive_q *q;
0715     void __user *ubufp = (u64 __user *) addr;
0716     u32 server;
0717     u8 priority;
0718     struct kvm_ppc_xive_eq kvm_eq;
0719     u64 qaddr;
0720     u64 qshift;
0721     u64 qeoi_page;
0722     u32 escalate_irq;
0723     u64 qflags;
0724     int rc;
0725 
0726     /*
0727      * Demangle priority/server tuple from the EQ identifier
0728      */
0729     priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
0730         KVM_XIVE_EQ_PRIORITY_SHIFT;
0731     server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
0732         KVM_XIVE_EQ_SERVER_SHIFT;
0733 
0734     vcpu = kvmppc_xive_find_server(kvm, server);
0735     if (!vcpu) {
0736         pr_err("Can't find server %d\n", server);
0737         return -ENOENT;
0738     }
0739     xc = vcpu->arch.xive_vcpu;
0740 
0741     if (priority != xive_prio_from_guest(priority)) {
0742         pr_err("invalid priority for queue %d for VCPU %d\n",
0743                priority, server);
0744         return -EINVAL;
0745     }
0746     q = &xc->queues[priority];
0747 
0748     memset(&kvm_eq, 0, sizeof(kvm_eq));
0749 
0750     if (!q->qpage)
0751         return 0;
0752 
0753     rc = xive_native_get_queue_info(xc->vp_id, priority, &qaddr, &qshift,
0754                     &qeoi_page, &escalate_irq, &qflags);
0755     if (rc)
0756         return rc;
0757 
0758     kvm_eq.flags = 0;
0759     if (qflags & OPAL_XIVE_EQ_ALWAYS_NOTIFY)
0760         kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY;
0761 
0762     kvm_eq.qshift = q->guest_qshift;
0763     kvm_eq.qaddr  = q->guest_qaddr;
0764 
0765     rc = xive_native_get_queue_state(xc->vp_id, priority, &kvm_eq.qtoggle,
0766                      &kvm_eq.qindex);
0767     if (rc)
0768         return rc;
0769 
0770     pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n",
0771          __func__, server, priority, kvm_eq.flags,
0772          kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
0773 
0774     if (copy_to_user(ubufp, &kvm_eq, sizeof(kvm_eq)))
0775         return -EFAULT;
0776 
0777     return 0;
0778 }
0779 
0780 static void kvmppc_xive_reset_sources(struct kvmppc_xive_src_block *sb)
0781 {
0782     int i;
0783 
0784     for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
0785         struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
0786 
0787         if (!state->valid)
0788             continue;
0789 
0790         if (state->act_priority == MASKED)
0791             continue;
0792 
0793         state->eisn = 0;
0794         state->act_server = 0;
0795         state->act_priority = MASKED;
0796         xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
0797         xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
0798         if (state->pt_number) {
0799             xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
0800             xive_native_configure_irq(state->pt_number,
0801                           0, MASKED, 0);
0802         }
0803     }
0804 }
0805 
0806 static int kvmppc_xive_reset(struct kvmppc_xive *xive)
0807 {
0808     struct kvm *kvm = xive->kvm;
0809     struct kvm_vcpu *vcpu;
0810     unsigned long i;
0811 
0812     pr_devel("%s\n", __func__);
0813 
0814     mutex_lock(&xive->lock);
0815 
0816     kvm_for_each_vcpu(i, vcpu, kvm) {
0817         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
0818         unsigned int prio;
0819 
0820         if (!xc)
0821             continue;
0822 
0823         kvmppc_xive_disable_vcpu_interrupts(vcpu);
0824 
0825         for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
0826 
0827             /* Single escalation, no queue 7 */
0828             if (prio == 7 && kvmppc_xive_has_single_escalation(xive))
0829                 break;
0830 
0831             if (xc->esc_virq[prio]) {
0832                 free_irq(xc->esc_virq[prio], vcpu);
0833                 irq_dispose_mapping(xc->esc_virq[prio]);
0834                 kfree(xc->esc_virq_names[prio]);
0835                 xc->esc_virq[prio] = 0;
0836             }
0837 
0838             kvmppc_xive_native_cleanup_queue(vcpu, prio);
0839         }
0840     }
0841 
0842     for (i = 0; i <= xive->max_sbid; i++) {
0843         struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
0844 
0845         if (sb) {
0846             arch_spin_lock(&sb->lock);
0847             kvmppc_xive_reset_sources(sb);
0848             arch_spin_unlock(&sb->lock);
0849         }
0850     }
0851 
0852     mutex_unlock(&xive->lock);
0853 
0854     return 0;
0855 }
0856 
0857 static void kvmppc_xive_native_sync_sources(struct kvmppc_xive_src_block *sb)
0858 {
0859     int j;
0860 
0861     for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
0862         struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
0863         struct xive_irq_data *xd;
0864         u32 hw_num;
0865 
0866         if (!state->valid)
0867             continue;
0868 
0869         /*
0870          * The struct kvmppc_xive_irq_state reflects the state
0871          * of the EAS configuration and not the state of the
0872          * source. The source is masked setting the PQ bits to
0873          * '-Q', which is what is being done before calling
0874          * the KVM_DEV_XIVE_EQ_SYNC control.
0875          *
0876          * If a source EAS is configured, OPAL syncs the XIVE
0877          * IC of the source and the XIVE IC of the previous
0878          * target if any.
0879          *
0880          * So it should be fine ignoring MASKED sources as
0881          * they have been synced already.
0882          */
0883         if (state->act_priority == MASKED)
0884             continue;
0885 
0886         kvmppc_xive_select_irq(state, &hw_num, &xd);
0887         xive_native_sync_source(hw_num);
0888         xive_native_sync_queue(hw_num);
0889     }
0890 }
0891 
0892 static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu)
0893 {
0894     struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
0895     unsigned int prio;
0896     int srcu_idx;
0897 
0898     if (!xc)
0899         return -ENOENT;
0900 
0901     for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
0902         struct xive_q *q = &xc->queues[prio];
0903 
0904         if (!q->qpage)
0905             continue;
0906 
0907         /* Mark EQ page dirty for migration */
0908         srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
0909         mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr));
0910         srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
0911     }
0912     return 0;
0913 }
0914 
0915 static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive)
0916 {
0917     struct kvm *kvm = xive->kvm;
0918     struct kvm_vcpu *vcpu;
0919     unsigned long i;
0920 
0921     pr_devel("%s\n", __func__);
0922 
0923     mutex_lock(&xive->lock);
0924     for (i = 0; i <= xive->max_sbid; i++) {
0925         struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
0926 
0927         if (sb) {
0928             arch_spin_lock(&sb->lock);
0929             kvmppc_xive_native_sync_sources(sb);
0930             arch_spin_unlock(&sb->lock);
0931         }
0932     }
0933 
0934     kvm_for_each_vcpu(i, vcpu, kvm) {
0935         kvmppc_xive_native_vcpu_eq_sync(vcpu);
0936     }
0937     mutex_unlock(&xive->lock);
0938 
0939     return 0;
0940 }
0941 
0942 static int kvmppc_xive_native_set_attr(struct kvm_device *dev,
0943                        struct kvm_device_attr *attr)
0944 {
0945     struct kvmppc_xive *xive = dev->private;
0946 
0947     switch (attr->group) {
0948     case KVM_DEV_XIVE_GRP_CTRL:
0949         switch (attr->attr) {
0950         case KVM_DEV_XIVE_RESET:
0951             return kvmppc_xive_reset(xive);
0952         case KVM_DEV_XIVE_EQ_SYNC:
0953             return kvmppc_xive_native_eq_sync(xive);
0954         case KVM_DEV_XIVE_NR_SERVERS:
0955             return kvmppc_xive_set_nr_servers(xive, attr->addr);
0956         }
0957         break;
0958     case KVM_DEV_XIVE_GRP_SOURCE:
0959         return kvmppc_xive_native_set_source(xive, attr->attr,
0960                              attr->addr);
0961     case KVM_DEV_XIVE_GRP_SOURCE_CONFIG:
0962         return kvmppc_xive_native_set_source_config(xive, attr->attr,
0963                                 attr->addr);
0964     case KVM_DEV_XIVE_GRP_EQ_CONFIG:
0965         return kvmppc_xive_native_set_queue_config(xive, attr->attr,
0966                                attr->addr);
0967     case KVM_DEV_XIVE_GRP_SOURCE_SYNC:
0968         return kvmppc_xive_native_sync_source(xive, attr->attr,
0969                               attr->addr);
0970     }
0971     return -ENXIO;
0972 }
0973 
0974 static int kvmppc_xive_native_get_attr(struct kvm_device *dev,
0975                        struct kvm_device_attr *attr)
0976 {
0977     struct kvmppc_xive *xive = dev->private;
0978 
0979     switch (attr->group) {
0980     case KVM_DEV_XIVE_GRP_EQ_CONFIG:
0981         return kvmppc_xive_native_get_queue_config(xive, attr->attr,
0982                                attr->addr);
0983     }
0984     return -ENXIO;
0985 }
0986 
0987 static int kvmppc_xive_native_has_attr(struct kvm_device *dev,
0988                        struct kvm_device_attr *attr)
0989 {
0990     switch (attr->group) {
0991     case KVM_DEV_XIVE_GRP_CTRL:
0992         switch (attr->attr) {
0993         case KVM_DEV_XIVE_RESET:
0994         case KVM_DEV_XIVE_EQ_SYNC:
0995         case KVM_DEV_XIVE_NR_SERVERS:
0996             return 0;
0997         }
0998         break;
0999     case KVM_DEV_XIVE_GRP_SOURCE:
1000     case KVM_DEV_XIVE_GRP_SOURCE_CONFIG:
1001     case KVM_DEV_XIVE_GRP_SOURCE_SYNC:
1002         if (attr->attr >= KVMPPC_XIVE_FIRST_IRQ &&
1003             attr->attr < KVMPPC_XIVE_NR_IRQS)
1004             return 0;
1005         break;
1006     case KVM_DEV_XIVE_GRP_EQ_CONFIG:
1007         return 0;
1008     }
1009     return -ENXIO;
1010 }
1011 
1012 /*
1013  * Called when device fd is closed.  kvm->lock is held.
1014  */
1015 static void kvmppc_xive_native_release(struct kvm_device *dev)
1016 {
1017     struct kvmppc_xive *xive = dev->private;
1018     struct kvm *kvm = xive->kvm;
1019     struct kvm_vcpu *vcpu;
1020     unsigned long i;
1021 
1022     pr_devel("Releasing xive native device\n");
1023 
1024     /*
1025      * Clear the KVM device file address_space which is used to
1026      * unmap the ESB pages when a device is passed-through.
1027      */
1028     mutex_lock(&xive->mapping_lock);
1029     xive->mapping = NULL;
1030     mutex_unlock(&xive->mapping_lock);
1031 
1032     /*
1033      * Since this is the device release function, we know that
1034      * userspace does not have any open fd or mmap referring to
1035      * the device.  Therefore there can not be any of the
1036      * device attribute set/get, mmap, or page fault functions
1037      * being executed concurrently, and similarly, the
1038      * connect_vcpu and set/clr_mapped functions also cannot
1039      * be being executed.
1040      */
1041 
1042     debugfs_remove(xive->dentry);
1043 
1044     /*
1045      * We should clean up the vCPU interrupt presenters first.
1046      */
1047     kvm_for_each_vcpu(i, vcpu, kvm) {
1048         /*
1049          * Take vcpu->mutex to ensure that no one_reg get/set ioctl
1050          * (i.e. kvmppc_xive_native_[gs]et_vp) can be being done.
1051          * Holding the vcpu->mutex also means that the vcpu cannot
1052          * be executing the KVM_RUN ioctl, and therefore it cannot
1053          * be executing the XIVE push or pull code or accessing
1054          * the XIVE MMIO regions.
1055          */
1056         mutex_lock(&vcpu->mutex);
1057         kvmppc_xive_native_cleanup_vcpu(vcpu);
1058         mutex_unlock(&vcpu->mutex);
1059     }
1060 
1061     /*
1062      * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
1063      * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
1064      * against xive code getting called during vcpu execution or
1065      * set/get one_reg operations.
1066      */
1067     kvm->arch.xive = NULL;
1068 
1069     for (i = 0; i <= xive->max_sbid; i++) {
1070         if (xive->src_blocks[i])
1071             kvmppc_xive_free_sources(xive->src_blocks[i]);
1072         kfree(xive->src_blocks[i]);
1073         xive->src_blocks[i] = NULL;
1074     }
1075 
1076     if (xive->vp_base != XIVE_INVALID_VP)
1077         xive_native_free_vp_block(xive->vp_base);
1078 
1079     /*
1080      * A reference of the kvmppc_xive pointer is now kept under
1081      * the xive_devices struct of the machine for reuse. It is
1082      * freed when the VM is destroyed for now until we fix all the
1083      * execution paths.
1084      */
1085 
1086     kfree(dev);
1087 }
1088 
1089 /*
1090  * Create a XIVE device.  kvm->lock is held.
1091  */
1092 static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
1093 {
1094     struct kvmppc_xive *xive;
1095     struct kvm *kvm = dev->kvm;
1096 
1097     pr_devel("Creating xive native device\n");
1098 
1099     if (kvm->arch.xive)
1100         return -EEXIST;
1101 
1102     xive = kvmppc_xive_get_device(kvm, type);
1103     if (!xive)
1104         return -ENOMEM;
1105 
1106     dev->private = xive;
1107     xive->dev = dev;
1108     xive->kvm = kvm;
1109     mutex_init(&xive->mapping_lock);
1110     mutex_init(&xive->lock);
1111 
1112     /* VP allocation is delayed to the first call to connect_vcpu */
1113     xive->vp_base = XIVE_INVALID_VP;
1114     /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets
1115      * on a POWER9 system.
1116      */
1117     xive->nr_servers = KVM_MAX_VCPUS;
1118 
1119     if (xive_native_has_single_escalation())
1120         xive->flags |= KVMPPC_XIVE_FLAG_SINGLE_ESCALATION;
1121 
1122     if (xive_native_has_save_restore())
1123         xive->flags |= KVMPPC_XIVE_FLAG_SAVE_RESTORE;
1124 
1125     xive->ops = &kvmppc_xive_native_ops;
1126 
1127     kvm->arch.xive = xive;
1128     return 0;
1129 }
1130 
1131 /*
1132  * Interrupt Pending Buffer (IPB) offset
1133  */
1134 #define TM_IPB_SHIFT 40
1135 #define TM_IPB_MASK  (((u64) 0xFF) << TM_IPB_SHIFT)
1136 
1137 int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
1138 {
1139     struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1140     u64 opal_state;
1141     int rc;
1142 
1143     if (!kvmppc_xive_enabled(vcpu))
1144         return -EPERM;
1145 
1146     if (!xc)
1147         return -ENOENT;
1148 
1149     /* Thread context registers. We only care about IPB and CPPR */
1150     val->xive_timaval[0] = vcpu->arch.xive_saved_state.w01;
1151 
1152     /* Get the VP state from OPAL */
1153     rc = xive_native_get_vp_state(xc->vp_id, &opal_state);
1154     if (rc)
1155         return rc;
1156 
1157     /*
1158      * Capture the backup of IPB register in the NVT structure and
1159      * merge it in our KVM VP state.
1160      */
1161     val->xive_timaval[0] |= cpu_to_be64(opal_state & TM_IPB_MASK);
1162 
1163     pr_devel("%s NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x opal=%016llx\n",
1164          __func__,
1165          vcpu->arch.xive_saved_state.nsr,
1166          vcpu->arch.xive_saved_state.cppr,
1167          vcpu->arch.xive_saved_state.ipb,
1168          vcpu->arch.xive_saved_state.pipr,
1169          vcpu->arch.xive_saved_state.w01,
1170          (u32) vcpu->arch.xive_cam_word, opal_state);
1171 
1172     return 0;
1173 }
1174 
1175 int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
1176 {
1177     struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1178     struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1179 
1180     pr_devel("%s w01=%016llx vp=%016llx\n", __func__,
1181          val->xive_timaval[0], val->xive_timaval[1]);
1182 
1183     if (!kvmppc_xive_enabled(vcpu))
1184         return -EPERM;
1185 
1186     if (!xc || !xive)
1187         return -ENOENT;
1188 
1189     /* We can't update the state of a "pushed" VCPU  */
1190     if (WARN_ON(vcpu->arch.xive_pushed))
1191         return -EBUSY;
1192 
1193     /*
1194      * Restore the thread context registers. IPB and CPPR should
1195      * be the only ones that matter.
1196      */
1197     vcpu->arch.xive_saved_state.w01 = val->xive_timaval[0];
1198 
1199     /*
1200      * There is no need to restore the XIVE internal state (IPB
1201      * stored in the NVT) as the IPB register was merged in KVM VP
1202      * state when captured.
1203      */
1204     return 0;
1205 }
1206 
1207 bool kvmppc_xive_native_supported(void)
1208 {
1209     return xive_native_has_queue_state_support();
1210 }
1211 
1212 static int xive_native_debug_show(struct seq_file *m, void *private)
1213 {
1214     struct kvmppc_xive *xive = m->private;
1215     struct kvm *kvm = xive->kvm;
1216     struct kvm_vcpu *vcpu;
1217     unsigned long i;
1218 
1219     if (!kvm)
1220         return 0;
1221 
1222     seq_puts(m, "=========\nVCPU state\n=========\n");
1223 
1224     kvm_for_each_vcpu(i, vcpu, kvm) {
1225         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1226 
1227         if (!xc)
1228             continue;
1229 
1230         seq_printf(m, "VCPU %d: VP=%#x/%02x\n"
1231                "    NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n",
1232                xc->server_num, xc->vp_id, xc->vp_chip_id,
1233                vcpu->arch.xive_saved_state.nsr,
1234                vcpu->arch.xive_saved_state.cppr,
1235                vcpu->arch.xive_saved_state.ipb,
1236                vcpu->arch.xive_saved_state.pipr,
1237                be64_to_cpu(vcpu->arch.xive_saved_state.w01),
1238                be32_to_cpu(vcpu->arch.xive_cam_word));
1239 
1240         kvmppc_xive_debug_show_queues(m, vcpu);
1241     }
1242 
1243     seq_puts(m, "=========\nSources\n=========\n");
1244 
1245     for (i = 0; i <= xive->max_sbid; i++) {
1246         struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1247 
1248         if (sb) {
1249             arch_spin_lock(&sb->lock);
1250             kvmppc_xive_debug_show_sources(m, sb);
1251             arch_spin_unlock(&sb->lock);
1252         }
1253     }
1254 
1255     return 0;
1256 }
1257 
1258 DEFINE_SHOW_ATTRIBUTE(xive_native_debug);
1259 
1260 static void xive_native_debugfs_init(struct kvmppc_xive *xive)
1261 {
1262     xive->dentry = debugfs_create_file("xive", 0444, xive->kvm->debugfs_dentry,
1263                        xive, &xive_native_debug_fops);
1264 
1265     pr_debug("%s: created\n", __func__);
1266 }
1267 
1268 static void kvmppc_xive_native_init(struct kvm_device *dev)
1269 {
1270     struct kvmppc_xive *xive = dev->private;
1271 
1272     /* Register some debug interfaces */
1273     xive_native_debugfs_init(xive);
1274 }
1275 
1276 struct kvm_device_ops kvm_xive_native_ops = {
1277     .name = "kvm-xive-native",
1278     .create = kvmppc_xive_native_create,
1279     .init = kvmppc_xive_native_init,
1280     .release = kvmppc_xive_native_release,
1281     .set_attr = kvmppc_xive_native_set_attr,
1282     .get_attr = kvmppc_xive_native_get_attr,
1283     .has_attr = kvmppc_xive_native_has_attr,
1284     .mmap = kvmppc_xive_native_mmap,
1285 };