Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *
0004  * Copyright IBM Corp. 2007
0005  *
0006  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
0007  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
0008  */
0009 
0010 #include <linux/errno.h>
0011 #include <linux/err.h>
0012 #include <linux/kvm_host.h>
0013 #include <linux/vmalloc.h>
0014 #include <linux/hrtimer.h>
0015 #include <linux/sched/signal.h>
0016 #include <linux/fs.h>
0017 #include <linux/slab.h>
0018 #include <linux/file.h>
0019 #include <linux/module.h>
0020 #include <linux/irqbypass.h>
0021 #include <linux/kvm_irqfd.h>
0022 #include <linux/of.h>
0023 #include <asm/cputable.h>
0024 #include <linux/uaccess.h>
0025 #include <asm/kvm_ppc.h>
0026 #include <asm/cputhreads.h>
0027 #include <asm/irqflags.h>
0028 #include <asm/iommu.h>
0029 #include <asm/switch_to.h>
0030 #include <asm/xive.h>
0031 #ifdef CONFIG_PPC_PSERIES
0032 #include <asm/hvcall.h>
0033 #include <asm/plpar_wrappers.h>
0034 #endif
0035 #include <asm/ultravisor.h>
0036 #include <asm/setup.h>
0037 
0038 #include "timing.h"
0039 #include "irq.h"
0040 #include "../mm/mmu_decl.h"
0041 
0042 #define CREATE_TRACE_POINTS
0043 #include "trace.h"
0044 
0045 struct kvmppc_ops *kvmppc_hv_ops;
0046 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
0047 struct kvmppc_ops *kvmppc_pr_ops;
0048 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
0049 
0050 
0051 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
0052 {
0053     return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
0054 }
0055 
0056 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
0057 {
0058     return kvm_arch_vcpu_runnable(vcpu);
0059 }
0060 
0061 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
0062 {
0063     return false;
0064 }
0065 
0066 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
0067 {
0068     return 1;
0069 }
0070 
0071 /*
0072  * Common checks before entering the guest world.  Call with interrupts
0073  * disabled.
0074  *
0075  * returns:
0076  *
0077  * == 1 if we're ready to go into guest state
0078  * <= 0 if we need to go back to the host with return value
0079  */
0080 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
0081 {
0082     int r;
0083 
0084     WARN_ON(irqs_disabled());
0085     hard_irq_disable();
0086 
0087     while (true) {
0088         if (need_resched()) {
0089             local_irq_enable();
0090             cond_resched();
0091             hard_irq_disable();
0092             continue;
0093         }
0094 
0095         if (signal_pending(current)) {
0096             kvmppc_account_exit(vcpu, SIGNAL_EXITS);
0097             vcpu->run->exit_reason = KVM_EXIT_INTR;
0098             r = -EINTR;
0099             break;
0100         }
0101 
0102         vcpu->mode = IN_GUEST_MODE;
0103 
0104         /*
0105          * Reading vcpu->requests must happen after setting vcpu->mode,
0106          * so we don't miss a request because the requester sees
0107          * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
0108          * before next entering the guest (and thus doesn't IPI).
0109          * This also orders the write to mode from any reads
0110          * to the page tables done while the VCPU is running.
0111          * Please see the comment in kvm_flush_remote_tlbs.
0112          */
0113         smp_mb();
0114 
0115         if (kvm_request_pending(vcpu)) {
0116             /* Make sure we process requests preemptable */
0117             local_irq_enable();
0118             trace_kvm_check_requests(vcpu);
0119             r = kvmppc_core_check_requests(vcpu);
0120             hard_irq_disable();
0121             if (r > 0)
0122                 continue;
0123             break;
0124         }
0125 
0126         if (kvmppc_core_prepare_to_enter(vcpu)) {
0127             /* interrupts got enabled in between, so we
0128                are back at square 1 */
0129             continue;
0130         }
0131 
0132         guest_enter_irqoff();
0133         return 1;
0134     }
0135 
0136     /* return to host */
0137     local_irq_enable();
0138     return r;
0139 }
0140 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
0141 
0142 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
0143 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
0144 {
0145     struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
0146     int i;
0147 
0148     shared->sprg0 = swab64(shared->sprg0);
0149     shared->sprg1 = swab64(shared->sprg1);
0150     shared->sprg2 = swab64(shared->sprg2);
0151     shared->sprg3 = swab64(shared->sprg3);
0152     shared->srr0 = swab64(shared->srr0);
0153     shared->srr1 = swab64(shared->srr1);
0154     shared->dar = swab64(shared->dar);
0155     shared->msr = swab64(shared->msr);
0156     shared->dsisr = swab32(shared->dsisr);
0157     shared->int_pending = swab32(shared->int_pending);
0158     for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
0159         shared->sr[i] = swab32(shared->sr[i]);
0160 }
0161 #endif
0162 
0163 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
0164 {
0165     int nr = kvmppc_get_gpr(vcpu, 11);
0166     int r;
0167     unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
0168     unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
0169     unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
0170     unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
0171     unsigned long r2 = 0;
0172 
0173     if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
0174         /* 32 bit mode */
0175         param1 &= 0xffffffff;
0176         param2 &= 0xffffffff;
0177         param3 &= 0xffffffff;
0178         param4 &= 0xffffffff;
0179     }
0180 
0181     switch (nr) {
0182     case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
0183     {
0184 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
0185         /* Book3S can be little endian, find it out here */
0186         int shared_big_endian = true;
0187         if (vcpu->arch.intr_msr & MSR_LE)
0188             shared_big_endian = false;
0189         if (shared_big_endian != vcpu->arch.shared_big_endian)
0190             kvmppc_swab_shared(vcpu);
0191         vcpu->arch.shared_big_endian = shared_big_endian;
0192 #endif
0193 
0194         if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
0195             /*
0196              * Older versions of the Linux magic page code had
0197              * a bug where they would map their trampoline code
0198              * NX. If that's the case, remove !PR NX capability.
0199              */
0200             vcpu->arch.disable_kernel_nx = true;
0201             kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
0202         }
0203 
0204         vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
0205         vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
0206 
0207 #ifdef CONFIG_PPC_64K_PAGES
0208         /*
0209          * Make sure our 4k magic page is in the same window of a 64k
0210          * page within the guest and within the host's page.
0211          */
0212         if ((vcpu->arch.magic_page_pa & 0xf000) !=
0213             ((ulong)vcpu->arch.shared & 0xf000)) {
0214             void *old_shared = vcpu->arch.shared;
0215             ulong shared = (ulong)vcpu->arch.shared;
0216             void *new_shared;
0217 
0218             shared &= PAGE_MASK;
0219             shared |= vcpu->arch.magic_page_pa & 0xf000;
0220             new_shared = (void*)shared;
0221             memcpy(new_shared, old_shared, 0x1000);
0222             vcpu->arch.shared = new_shared;
0223         }
0224 #endif
0225 
0226         r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
0227 
0228         r = EV_SUCCESS;
0229         break;
0230     }
0231     case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
0232         r = EV_SUCCESS;
0233 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
0234         r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
0235 #endif
0236 
0237         /* Second return value is in r4 */
0238         break;
0239     case EV_HCALL_TOKEN(EV_IDLE):
0240         r = EV_SUCCESS;
0241         kvm_vcpu_halt(vcpu);
0242         kvm_clear_request(KVM_REQ_UNHALT, vcpu);
0243         break;
0244     default:
0245         r = EV_UNIMPLEMENTED;
0246         break;
0247     }
0248 
0249     kvmppc_set_gpr(vcpu, 4, r2);
0250 
0251     return r;
0252 }
0253 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
0254 
0255 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
0256 {
0257     int r = false;
0258 
0259     /* We have to know what CPU to virtualize */
0260     if (!vcpu->arch.pvr)
0261         goto out;
0262 
0263     /* PAPR only works with book3s_64 */
0264     if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
0265         goto out;
0266 
0267     /* HV KVM can only do PAPR mode for now */
0268     if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
0269         goto out;
0270 
0271 #ifdef CONFIG_KVM_BOOKE_HV
0272     if (!cpu_has_feature(CPU_FTR_EMB_HV))
0273         goto out;
0274 #endif
0275 
0276     r = true;
0277 
0278 out:
0279     vcpu->arch.sane = r;
0280     return r ? 0 : -EINVAL;
0281 }
0282 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
0283 
0284 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
0285 {
0286     enum emulation_result er;
0287     int r;
0288 
0289     er = kvmppc_emulate_loadstore(vcpu);
0290     switch (er) {
0291     case EMULATE_DONE:
0292         /* Future optimization: only reload non-volatiles if they were
0293          * actually modified. */
0294         r = RESUME_GUEST_NV;
0295         break;
0296     case EMULATE_AGAIN:
0297         r = RESUME_GUEST;
0298         break;
0299     case EMULATE_DO_MMIO:
0300         vcpu->run->exit_reason = KVM_EXIT_MMIO;
0301         /* We must reload nonvolatiles because "update" load/store
0302          * instructions modify register state. */
0303         /* Future optimization: only reload non-volatiles if they were
0304          * actually modified. */
0305         r = RESUME_HOST_NV;
0306         break;
0307     case EMULATE_FAIL:
0308     {
0309         u32 last_inst;
0310 
0311         kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
0312         kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n",
0313                       last_inst);
0314 
0315         /*
0316          * Injecting a Data Storage here is a bit more
0317          * accurate since the instruction that caused the
0318          * access could still be a valid one.
0319          */
0320         if (!IS_ENABLED(CONFIG_BOOKE)) {
0321             ulong dsisr = DSISR_BADACCESS;
0322 
0323             if (vcpu->mmio_is_write)
0324                 dsisr |= DSISR_ISSTORE;
0325 
0326             kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr);
0327         } else {
0328             /*
0329              * BookE does not send a SIGBUS on a bad
0330              * fault, so use a Program interrupt instead
0331              * to avoid a fault loop.
0332              */
0333             kvmppc_core_queue_program(vcpu, 0);
0334         }
0335 
0336         r = RESUME_GUEST;
0337         break;
0338     }
0339     default:
0340         WARN_ON(1);
0341         r = RESUME_GUEST;
0342     }
0343 
0344     return r;
0345 }
0346 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
0347 
0348 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
0349           bool data)
0350 {
0351     ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
0352     struct kvmppc_pte pte;
0353     int r = -EINVAL;
0354 
0355     vcpu->stat.st++;
0356 
0357     if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
0358         r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
0359                                 size);
0360 
0361     if ((!r) || (r == -EAGAIN))
0362         return r;
0363 
0364     r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
0365              XLATE_WRITE, &pte);
0366     if (r < 0)
0367         return r;
0368 
0369     *eaddr = pte.raddr;
0370 
0371     if (!pte.may_write)
0372         return -EPERM;
0373 
0374     /* Magic page override */
0375     if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
0376         ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
0377         !(kvmppc_get_msr(vcpu) & MSR_PR)) {
0378         void *magic = vcpu->arch.shared;
0379         magic += pte.eaddr & 0xfff;
0380         memcpy(magic, ptr, size);
0381         return EMULATE_DONE;
0382     }
0383 
0384     if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
0385         return EMULATE_DO_MMIO;
0386 
0387     return EMULATE_DONE;
0388 }
0389 EXPORT_SYMBOL_GPL(kvmppc_st);
0390 
0391 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
0392               bool data)
0393 {
0394     ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
0395     struct kvmppc_pte pte;
0396     int rc = -EINVAL;
0397 
0398     vcpu->stat.ld++;
0399 
0400     if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
0401         rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
0402                                   size);
0403 
0404     if ((!rc) || (rc == -EAGAIN))
0405         return rc;
0406 
0407     rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
0408               XLATE_READ, &pte);
0409     if (rc)
0410         return rc;
0411 
0412     *eaddr = pte.raddr;
0413 
0414     if (!pte.may_read)
0415         return -EPERM;
0416 
0417     if (!data && !pte.may_execute)
0418         return -ENOEXEC;
0419 
0420     /* Magic page override */
0421     if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
0422         ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
0423         !(kvmppc_get_msr(vcpu) & MSR_PR)) {
0424         void *magic = vcpu->arch.shared;
0425         magic += pte.eaddr & 0xfff;
0426         memcpy(ptr, magic, size);
0427         return EMULATE_DONE;
0428     }
0429 
0430     kvm_vcpu_srcu_read_lock(vcpu);
0431     rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
0432     kvm_vcpu_srcu_read_unlock(vcpu);
0433     if (rc)
0434         return EMULATE_DO_MMIO;
0435 
0436     return EMULATE_DONE;
0437 }
0438 EXPORT_SYMBOL_GPL(kvmppc_ld);
0439 
0440 int kvm_arch_hardware_enable(void)
0441 {
0442     return 0;
0443 }
0444 
0445 int kvm_arch_hardware_setup(void *opaque)
0446 {
0447     return 0;
0448 }
0449 
0450 int kvm_arch_check_processor_compat(void *opaque)
0451 {
0452     return kvmppc_core_check_processor_compat();
0453 }
0454 
0455 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
0456 {
0457     struct kvmppc_ops *kvm_ops = NULL;
0458     int r;
0459 
0460     /*
0461      * if we have both HV and PR enabled, default is HV
0462      */
0463     if (type == 0) {
0464         if (kvmppc_hv_ops)
0465             kvm_ops = kvmppc_hv_ops;
0466         else
0467             kvm_ops = kvmppc_pr_ops;
0468         if (!kvm_ops)
0469             goto err_out;
0470     } else  if (type == KVM_VM_PPC_HV) {
0471         if (!kvmppc_hv_ops)
0472             goto err_out;
0473         kvm_ops = kvmppc_hv_ops;
0474     } else if (type == KVM_VM_PPC_PR) {
0475         if (!kvmppc_pr_ops)
0476             goto err_out;
0477         kvm_ops = kvmppc_pr_ops;
0478     } else
0479         goto err_out;
0480 
0481     if (!try_module_get(kvm_ops->owner))
0482         return -ENOENT;
0483 
0484     kvm->arch.kvm_ops = kvm_ops;
0485     r = kvmppc_core_init_vm(kvm);
0486     if (r)
0487         module_put(kvm_ops->owner);
0488     return r;
0489 err_out:
0490     return -EINVAL;
0491 }
0492 
0493 void kvm_arch_destroy_vm(struct kvm *kvm)
0494 {
0495 #ifdef CONFIG_KVM_XICS
0496     /*
0497      * We call kick_all_cpus_sync() to ensure that all
0498      * CPUs have executed any pending IPIs before we
0499      * continue and free VCPUs structures below.
0500      */
0501     if (is_kvmppc_hv_enabled(kvm))
0502         kick_all_cpus_sync();
0503 #endif
0504 
0505     kvm_destroy_vcpus(kvm);
0506 
0507     mutex_lock(&kvm->lock);
0508 
0509     kvmppc_core_destroy_vm(kvm);
0510 
0511     mutex_unlock(&kvm->lock);
0512 
0513     /* drop the module reference */
0514     module_put(kvm->arch.kvm_ops->owner);
0515 }
0516 
0517 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
0518 {
0519     int r;
0520     /* Assume we're using HV mode when the HV module is loaded */
0521     int hv_enabled = kvmppc_hv_ops ? 1 : 0;
0522 
0523     if (kvm) {
0524         /*
0525          * Hooray - we know which VM type we're running on. Depend on
0526          * that rather than the guess above.
0527          */
0528         hv_enabled = is_kvmppc_hv_enabled(kvm);
0529     }
0530 
0531     switch (ext) {
0532 #ifdef CONFIG_BOOKE
0533     case KVM_CAP_PPC_BOOKE_SREGS:
0534     case KVM_CAP_PPC_BOOKE_WATCHDOG:
0535     case KVM_CAP_PPC_EPR:
0536 #else
0537     case KVM_CAP_PPC_SEGSTATE:
0538     case KVM_CAP_PPC_HIOR:
0539     case KVM_CAP_PPC_PAPR:
0540 #endif
0541     case KVM_CAP_PPC_UNSET_IRQ:
0542     case KVM_CAP_PPC_IRQ_LEVEL:
0543     case KVM_CAP_ENABLE_CAP:
0544     case KVM_CAP_ONE_REG:
0545     case KVM_CAP_IOEVENTFD:
0546     case KVM_CAP_DEVICE_CTRL:
0547     case KVM_CAP_IMMEDIATE_EXIT:
0548     case KVM_CAP_SET_GUEST_DEBUG:
0549         r = 1;
0550         break;
0551     case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
0552     case KVM_CAP_PPC_PAIRED_SINGLES:
0553     case KVM_CAP_PPC_OSI:
0554     case KVM_CAP_PPC_GET_PVINFO:
0555 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
0556     case KVM_CAP_SW_TLB:
0557 #endif
0558         /* We support this only for PR */
0559         r = !hv_enabled;
0560         break;
0561 #ifdef CONFIG_KVM_MPIC
0562     case KVM_CAP_IRQ_MPIC:
0563         r = 1;
0564         break;
0565 #endif
0566 
0567 #ifdef CONFIG_PPC_BOOK3S_64
0568     case KVM_CAP_SPAPR_TCE:
0569     case KVM_CAP_SPAPR_TCE_64:
0570         r = 1;
0571         break;
0572     case KVM_CAP_SPAPR_TCE_VFIO:
0573         r = !!cpu_has_feature(CPU_FTR_HVMODE);
0574         break;
0575     case KVM_CAP_PPC_RTAS:
0576     case KVM_CAP_PPC_FIXUP_HCALL:
0577     case KVM_CAP_PPC_ENABLE_HCALL:
0578 #ifdef CONFIG_KVM_XICS
0579     case KVM_CAP_IRQ_XICS:
0580 #endif
0581     case KVM_CAP_PPC_GET_CPU_CHAR:
0582         r = 1;
0583         break;
0584 #ifdef CONFIG_KVM_XIVE
0585     case KVM_CAP_PPC_IRQ_XIVE:
0586         /*
0587          * We need XIVE to be enabled on the platform (implies
0588          * a POWER9 processor) and the PowerNV platform, as
0589          * nested is not yet supported.
0590          */
0591         r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
0592             kvmppc_xive_native_supported();
0593         break;
0594 #endif
0595 
0596     case KVM_CAP_PPC_ALLOC_HTAB:
0597         r = hv_enabled;
0598         break;
0599 #endif /* CONFIG_PPC_BOOK3S_64 */
0600 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0601     case KVM_CAP_PPC_SMT:
0602         r = 0;
0603         if (kvm) {
0604             if (kvm->arch.emul_smt_mode > 1)
0605                 r = kvm->arch.emul_smt_mode;
0606             else
0607                 r = kvm->arch.smt_mode;
0608         } else if (hv_enabled) {
0609             if (cpu_has_feature(CPU_FTR_ARCH_300))
0610                 r = 1;
0611             else
0612                 r = threads_per_subcore;
0613         }
0614         break;
0615     case KVM_CAP_PPC_SMT_POSSIBLE:
0616         r = 1;
0617         if (hv_enabled) {
0618             if (!cpu_has_feature(CPU_FTR_ARCH_300))
0619                 r = ((threads_per_subcore << 1) - 1);
0620             else
0621                 /* P9 can emulate dbells, so allow any mode */
0622                 r = 8 | 4 | 2 | 1;
0623         }
0624         break;
0625     case KVM_CAP_PPC_RMA:
0626         r = 0;
0627         break;
0628     case KVM_CAP_PPC_HWRNG:
0629         r = kvmppc_hwrng_present();
0630         break;
0631     case KVM_CAP_PPC_MMU_RADIX:
0632         r = !!(hv_enabled && radix_enabled());
0633         break;
0634     case KVM_CAP_PPC_MMU_HASH_V3:
0635         r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
0636                kvmppc_hv_ops->hash_v3_possible());
0637         break;
0638     case KVM_CAP_PPC_NESTED_HV:
0639         r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
0640                !kvmppc_hv_ops->enable_nested(NULL));
0641         break;
0642 #endif
0643     case KVM_CAP_SYNC_MMU:
0644 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0645         r = hv_enabled;
0646 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
0647         r = 1;
0648 #else
0649         r = 0;
0650 #endif
0651         break;
0652 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0653     case KVM_CAP_PPC_HTAB_FD:
0654         r = hv_enabled;
0655         break;
0656 #endif
0657     case KVM_CAP_NR_VCPUS:
0658         /*
0659          * Recommending a number of CPUs is somewhat arbitrary; we
0660          * return the number of present CPUs for -HV (since a host
0661          * will have secondary threads "offline"), and for other KVM
0662          * implementations just count online CPUs.
0663          */
0664         if (hv_enabled)
0665             r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
0666         else
0667             r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
0668         break;
0669     case KVM_CAP_MAX_VCPUS:
0670         r = KVM_MAX_VCPUS;
0671         break;
0672     case KVM_CAP_MAX_VCPU_ID:
0673         r = KVM_MAX_VCPU_IDS;
0674         break;
0675 #ifdef CONFIG_PPC_BOOK3S_64
0676     case KVM_CAP_PPC_GET_SMMU_INFO:
0677         r = 1;
0678         break;
0679     case KVM_CAP_SPAPR_MULTITCE:
0680         r = 1;
0681         break;
0682     case KVM_CAP_SPAPR_RESIZE_HPT:
0683         r = !!hv_enabled;
0684         break;
0685 #endif
0686 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0687     case KVM_CAP_PPC_FWNMI:
0688         r = hv_enabled;
0689         break;
0690 #endif
0691 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0692     case KVM_CAP_PPC_HTM:
0693         r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
0694              (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
0695         break;
0696 #endif
0697 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
0698     case KVM_CAP_PPC_SECURE_GUEST:
0699         r = hv_enabled && kvmppc_hv_ops->enable_svm &&
0700             !kvmppc_hv_ops->enable_svm(NULL);
0701         break;
0702     case KVM_CAP_PPC_DAWR1:
0703         r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
0704                !kvmppc_hv_ops->enable_dawr1(NULL));
0705         break;
0706     case KVM_CAP_PPC_RPT_INVALIDATE:
0707         r = 1;
0708         break;
0709 #endif
0710     case KVM_CAP_PPC_AIL_MODE_3:
0711         r = 0;
0712         /*
0713          * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode.
0714          * The POWER9s can support it if the guest runs in hash mode,
0715          * but QEMU doesn't necessarily query the capability in time.
0716          */
0717         if (hv_enabled) {
0718             if (kvmhv_on_pseries()) {
0719                 if (pseries_reloc_on_exception())
0720                     r = 1;
0721             } else if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
0722                   !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
0723                 r = 1;
0724             }
0725         }
0726         break;
0727     default:
0728         r = 0;
0729         break;
0730     }
0731     return r;
0732 
0733 }
0734 
0735 long kvm_arch_dev_ioctl(struct file *filp,
0736                         unsigned int ioctl, unsigned long arg)
0737 {
0738     return -EINVAL;
0739 }
0740 
0741 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
0742 {
0743     kvmppc_core_free_memslot(kvm, slot);
0744 }
0745 
0746 int kvm_arch_prepare_memory_region(struct kvm *kvm,
0747                    const struct kvm_memory_slot *old,
0748                    struct kvm_memory_slot *new,
0749                    enum kvm_mr_change change)
0750 {
0751     return kvmppc_core_prepare_memory_region(kvm, old, new, change);
0752 }
0753 
0754 void kvm_arch_commit_memory_region(struct kvm *kvm,
0755                    struct kvm_memory_slot *old,
0756                    const struct kvm_memory_slot *new,
0757                    enum kvm_mr_change change)
0758 {
0759     kvmppc_core_commit_memory_region(kvm, old, new, change);
0760 }
0761 
0762 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
0763                    struct kvm_memory_slot *slot)
0764 {
0765     kvmppc_core_flush_memslot(kvm, slot);
0766 }
0767 
0768 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
0769 {
0770     return 0;
0771 }
0772 
0773 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
0774 {
0775     struct kvm_vcpu *vcpu;
0776 
0777     vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
0778     kvmppc_decrementer_func(vcpu);
0779 
0780     return HRTIMER_NORESTART;
0781 }
0782 
0783 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
0784 {
0785     int err;
0786 
0787     hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
0788     vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
0789     vcpu->arch.dec_expires = get_tb();
0790 
0791 #ifdef CONFIG_KVM_EXIT_TIMING
0792     mutex_init(&vcpu->arch.exit_timing_lock);
0793 #endif
0794     err = kvmppc_subarch_vcpu_init(vcpu);
0795     if (err)
0796         return err;
0797 
0798     err = kvmppc_core_vcpu_create(vcpu);
0799     if (err)
0800         goto out_vcpu_uninit;
0801 
0802     rcuwait_init(&vcpu->arch.wait);
0803     vcpu->arch.waitp = &vcpu->arch.wait;
0804     return 0;
0805 
0806 out_vcpu_uninit:
0807     kvmppc_subarch_vcpu_uninit(vcpu);
0808     return err;
0809 }
0810 
0811 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
0812 {
0813 }
0814 
0815 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
0816 {
0817     /* Make sure we're not using the vcpu anymore */
0818     hrtimer_cancel(&vcpu->arch.dec_timer);
0819 
0820     switch (vcpu->arch.irq_type) {
0821     case KVMPPC_IRQ_MPIC:
0822         kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
0823         break;
0824     case KVMPPC_IRQ_XICS:
0825         if (xics_on_xive())
0826             kvmppc_xive_cleanup_vcpu(vcpu);
0827         else
0828             kvmppc_xics_free_icp(vcpu);
0829         break;
0830     case KVMPPC_IRQ_XIVE:
0831         kvmppc_xive_native_cleanup_vcpu(vcpu);
0832         break;
0833     }
0834 
0835     kvmppc_core_vcpu_free(vcpu);
0836 
0837     kvmppc_subarch_vcpu_uninit(vcpu);
0838 }
0839 
0840 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
0841 {
0842     return kvmppc_core_pending_dec(vcpu);
0843 }
0844 
0845 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
0846 {
0847 #ifdef CONFIG_BOOKE
0848     /*
0849      * vrsave (formerly usprg0) isn't used by Linux, but may
0850      * be used by the guest.
0851      *
0852      * On non-booke this is associated with Altivec and
0853      * is handled by code in book3s.c.
0854      */
0855     mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
0856 #endif
0857     kvmppc_core_vcpu_load(vcpu, cpu);
0858 }
0859 
0860 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
0861 {
0862     kvmppc_core_vcpu_put(vcpu);
0863 #ifdef CONFIG_BOOKE
0864     vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
0865 #endif
0866 }
0867 
0868 /*
0869  * irq_bypass_add_producer and irq_bypass_del_producer are only
0870  * useful if the architecture supports PCI passthrough.
0871  * irq_bypass_stop and irq_bypass_start are not needed and so
0872  * kvm_ops are not defined for them.
0873  */
0874 bool kvm_arch_has_irq_bypass(void)
0875 {
0876     return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
0877         (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
0878 }
0879 
0880 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
0881                      struct irq_bypass_producer *prod)
0882 {
0883     struct kvm_kernel_irqfd *irqfd =
0884         container_of(cons, struct kvm_kernel_irqfd, consumer);
0885     struct kvm *kvm = irqfd->kvm;
0886 
0887     if (kvm->arch.kvm_ops->irq_bypass_add_producer)
0888         return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
0889 
0890     return 0;
0891 }
0892 
0893 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
0894                       struct irq_bypass_producer *prod)
0895 {
0896     struct kvm_kernel_irqfd *irqfd =
0897         container_of(cons, struct kvm_kernel_irqfd, consumer);
0898     struct kvm *kvm = irqfd->kvm;
0899 
0900     if (kvm->arch.kvm_ops->irq_bypass_del_producer)
0901         kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
0902 }
0903 
0904 #ifdef CONFIG_VSX
0905 static inline int kvmppc_get_vsr_dword_offset(int index)
0906 {
0907     int offset;
0908 
0909     if ((index != 0) && (index != 1))
0910         return -1;
0911 
0912 #ifdef __BIG_ENDIAN
0913     offset =  index;
0914 #else
0915     offset = 1 - index;
0916 #endif
0917 
0918     return offset;
0919 }
0920 
0921 static inline int kvmppc_get_vsr_word_offset(int index)
0922 {
0923     int offset;
0924 
0925     if ((index > 3) || (index < 0))
0926         return -1;
0927 
0928 #ifdef __BIG_ENDIAN
0929     offset = index;
0930 #else
0931     offset = 3 - index;
0932 #endif
0933     return offset;
0934 }
0935 
0936 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
0937     u64 gpr)
0938 {
0939     union kvmppc_one_reg val;
0940     int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
0941     int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
0942 
0943     if (offset == -1)
0944         return;
0945 
0946     if (index >= 32) {
0947         val.vval = VCPU_VSX_VR(vcpu, index - 32);
0948         val.vsxval[offset] = gpr;
0949         VCPU_VSX_VR(vcpu, index - 32) = val.vval;
0950     } else {
0951         VCPU_VSX_FPR(vcpu, index, offset) = gpr;
0952     }
0953 }
0954 
0955 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
0956     u64 gpr)
0957 {
0958     union kvmppc_one_reg val;
0959     int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
0960 
0961     if (index >= 32) {
0962         val.vval = VCPU_VSX_VR(vcpu, index - 32);
0963         val.vsxval[0] = gpr;
0964         val.vsxval[1] = gpr;
0965         VCPU_VSX_VR(vcpu, index - 32) = val.vval;
0966     } else {
0967         VCPU_VSX_FPR(vcpu, index, 0) = gpr;
0968         VCPU_VSX_FPR(vcpu, index, 1) = gpr;
0969     }
0970 }
0971 
0972 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
0973     u32 gpr)
0974 {
0975     union kvmppc_one_reg val;
0976     int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
0977 
0978     if (index >= 32) {
0979         val.vsx32val[0] = gpr;
0980         val.vsx32val[1] = gpr;
0981         val.vsx32val[2] = gpr;
0982         val.vsx32val[3] = gpr;
0983         VCPU_VSX_VR(vcpu, index - 32) = val.vval;
0984     } else {
0985         val.vsx32val[0] = gpr;
0986         val.vsx32val[1] = gpr;
0987         VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
0988         VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
0989     }
0990 }
0991 
0992 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
0993     u32 gpr32)
0994 {
0995     union kvmppc_one_reg val;
0996     int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
0997     int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
0998     int dword_offset, word_offset;
0999 
1000     if (offset == -1)
1001         return;
1002 
1003     if (index >= 32) {
1004         val.vval = VCPU_VSX_VR(vcpu, index - 32);
1005         val.vsx32val[offset] = gpr32;
1006         VCPU_VSX_VR(vcpu, index - 32) = val.vval;
1007     } else {
1008         dword_offset = offset / 2;
1009         word_offset = offset % 2;
1010         val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
1011         val.vsx32val[word_offset] = gpr32;
1012         VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
1013     }
1014 }
1015 #endif /* CONFIG_VSX */
1016 
1017 #ifdef CONFIG_ALTIVEC
1018 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
1019         int index, int element_size)
1020 {
1021     int offset;
1022     int elts = sizeof(vector128)/element_size;
1023 
1024     if ((index < 0) || (index >= elts))
1025         return -1;
1026 
1027     if (kvmppc_need_byteswap(vcpu))
1028         offset = elts - index - 1;
1029     else
1030         offset = index;
1031 
1032     return offset;
1033 }
1034 
1035 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
1036         int index)
1037 {
1038     return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1039 }
1040 
1041 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1042         int index)
1043 {
1044     return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1045 }
1046 
1047 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1048         int index)
1049 {
1050     return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1051 }
1052 
1053 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1054         int index)
1055 {
1056     return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1057 }
1058 
1059 
1060 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1061     u64 gpr)
1062 {
1063     union kvmppc_one_reg val;
1064     int offset = kvmppc_get_vmx_dword_offset(vcpu,
1065             vcpu->arch.mmio_vmx_offset);
1066     int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1067 
1068     if (offset == -1)
1069         return;
1070 
1071     val.vval = VCPU_VSX_VR(vcpu, index);
1072     val.vsxval[offset] = gpr;
1073     VCPU_VSX_VR(vcpu, index) = val.vval;
1074 }
1075 
1076 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1077     u32 gpr32)
1078 {
1079     union kvmppc_one_reg val;
1080     int offset = kvmppc_get_vmx_word_offset(vcpu,
1081             vcpu->arch.mmio_vmx_offset);
1082     int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1083 
1084     if (offset == -1)
1085         return;
1086 
1087     val.vval = VCPU_VSX_VR(vcpu, index);
1088     val.vsx32val[offset] = gpr32;
1089     VCPU_VSX_VR(vcpu, index) = val.vval;
1090 }
1091 
1092 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1093     u16 gpr16)
1094 {
1095     union kvmppc_one_reg val;
1096     int offset = kvmppc_get_vmx_hword_offset(vcpu,
1097             vcpu->arch.mmio_vmx_offset);
1098     int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1099 
1100     if (offset == -1)
1101         return;
1102 
1103     val.vval = VCPU_VSX_VR(vcpu, index);
1104     val.vsx16val[offset] = gpr16;
1105     VCPU_VSX_VR(vcpu, index) = val.vval;
1106 }
1107 
1108 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1109     u8 gpr8)
1110 {
1111     union kvmppc_one_reg val;
1112     int offset = kvmppc_get_vmx_byte_offset(vcpu,
1113             vcpu->arch.mmio_vmx_offset);
1114     int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1115 
1116     if (offset == -1)
1117         return;
1118 
1119     val.vval = VCPU_VSX_VR(vcpu, index);
1120     val.vsx8val[offset] = gpr8;
1121     VCPU_VSX_VR(vcpu, index) = val.vval;
1122 }
1123 #endif /* CONFIG_ALTIVEC */
1124 
1125 #ifdef CONFIG_PPC_FPU
1126 static inline u64 sp_to_dp(u32 fprs)
1127 {
1128     u64 fprd;
1129 
1130     preempt_disable();
1131     enable_kernel_fp();
1132     asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs)
1133          : "fr0");
1134     preempt_enable();
1135     return fprd;
1136 }
1137 
1138 static inline u32 dp_to_sp(u64 fprd)
1139 {
1140     u32 fprs;
1141 
1142     preempt_disable();
1143     enable_kernel_fp();
1144     asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd)
1145          : "fr0");
1146     preempt_enable();
1147     return fprs;
1148 }
1149 
1150 #else
1151 #define sp_to_dp(x) (x)
1152 #define dp_to_sp(x) (x)
1153 #endif /* CONFIG_PPC_FPU */
1154 
1155 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
1156 {
1157     struct kvm_run *run = vcpu->run;
1158     u64 gpr;
1159 
1160     if (run->mmio.len > sizeof(gpr))
1161         return;
1162 
1163     if (!vcpu->arch.mmio_host_swabbed) {
1164         switch (run->mmio.len) {
1165         case 8: gpr = *(u64 *)run->mmio.data; break;
1166         case 4: gpr = *(u32 *)run->mmio.data; break;
1167         case 2: gpr = *(u16 *)run->mmio.data; break;
1168         case 1: gpr = *(u8 *)run->mmio.data; break;
1169         }
1170     } else {
1171         switch (run->mmio.len) {
1172         case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1173         case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1174         case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1175         case 1: gpr = *(u8 *)run->mmio.data; break;
1176         }
1177     }
1178 
1179     /* conversion between single and double precision */
1180     if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1181         gpr = sp_to_dp(gpr);
1182 
1183     if (vcpu->arch.mmio_sign_extend) {
1184         switch (run->mmio.len) {
1185 #ifdef CONFIG_PPC64
1186         case 4:
1187             gpr = (s64)(s32)gpr;
1188             break;
1189 #endif
1190         case 2:
1191             gpr = (s64)(s16)gpr;
1192             break;
1193         case 1:
1194             gpr = (s64)(s8)gpr;
1195             break;
1196         }
1197     }
1198 
1199     switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1200     case KVM_MMIO_REG_GPR:
1201         kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1202         break;
1203     case KVM_MMIO_REG_FPR:
1204         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1205             vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1206 
1207         VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1208         break;
1209 #ifdef CONFIG_PPC_BOOK3S
1210     case KVM_MMIO_REG_QPR:
1211         vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1212         break;
1213     case KVM_MMIO_REG_FQPR:
1214         VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1215         vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1216         break;
1217 #endif
1218 #ifdef CONFIG_VSX
1219     case KVM_MMIO_REG_VSX:
1220         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1221             vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1222 
1223         if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1224             kvmppc_set_vsr_dword(vcpu, gpr);
1225         else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1226             kvmppc_set_vsr_word(vcpu, gpr);
1227         else if (vcpu->arch.mmio_copy_type ==
1228                 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1229             kvmppc_set_vsr_dword_dump(vcpu, gpr);
1230         else if (vcpu->arch.mmio_copy_type ==
1231                 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1232             kvmppc_set_vsr_word_dump(vcpu, gpr);
1233         break;
1234 #endif
1235 #ifdef CONFIG_ALTIVEC
1236     case KVM_MMIO_REG_VMX:
1237         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1238             vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1239 
1240         if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1241             kvmppc_set_vmx_dword(vcpu, gpr);
1242         else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1243             kvmppc_set_vmx_word(vcpu, gpr);
1244         else if (vcpu->arch.mmio_copy_type ==
1245                 KVMPPC_VMX_COPY_HWORD)
1246             kvmppc_set_vmx_hword(vcpu, gpr);
1247         else if (vcpu->arch.mmio_copy_type ==
1248                 KVMPPC_VMX_COPY_BYTE)
1249             kvmppc_set_vmx_byte(vcpu, gpr);
1250         break;
1251 #endif
1252 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1253     case KVM_MMIO_REG_NESTED_GPR:
1254         if (kvmppc_need_byteswap(vcpu))
1255             gpr = swab64(gpr);
1256         kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1257                      sizeof(gpr));
1258         break;
1259 #endif
1260     default:
1261         BUG();
1262     }
1263 }
1264 
1265 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
1266                 unsigned int rt, unsigned int bytes,
1267                 int is_default_endian, int sign_extend)
1268 {
1269     struct kvm_run *run = vcpu->run;
1270     int idx, ret;
1271     bool host_swabbed;
1272 
1273     /* Pity C doesn't have a logical XOR operator */
1274     if (kvmppc_need_byteswap(vcpu)) {
1275         host_swabbed = is_default_endian;
1276     } else {
1277         host_swabbed = !is_default_endian;
1278     }
1279 
1280     if (bytes > sizeof(run->mmio.data))
1281         return EMULATE_FAIL;
1282 
1283     run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1284     run->mmio.len = bytes;
1285     run->mmio.is_write = 0;
1286 
1287     vcpu->arch.io_gpr = rt;
1288     vcpu->arch.mmio_host_swabbed = host_swabbed;
1289     vcpu->mmio_needed = 1;
1290     vcpu->mmio_is_write = 0;
1291     vcpu->arch.mmio_sign_extend = sign_extend;
1292 
1293     idx = srcu_read_lock(&vcpu->kvm->srcu);
1294 
1295     ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1296                   bytes, &run->mmio.data);
1297 
1298     srcu_read_unlock(&vcpu->kvm->srcu, idx);
1299 
1300     if (!ret) {
1301         kvmppc_complete_mmio_load(vcpu);
1302         vcpu->mmio_needed = 0;
1303         return EMULATE_DONE;
1304     }
1305 
1306     return EMULATE_DO_MMIO;
1307 }
1308 
1309 int kvmppc_handle_load(struct kvm_vcpu *vcpu,
1310                unsigned int rt, unsigned int bytes,
1311                int is_default_endian)
1312 {
1313     return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
1314 }
1315 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1316 
1317 /* Same as above, but sign extends */
1318 int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
1319             unsigned int rt, unsigned int bytes,
1320             int is_default_endian)
1321 {
1322     return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
1323 }
1324 
1325 #ifdef CONFIG_VSX
1326 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
1327             unsigned int rt, unsigned int bytes,
1328             int is_default_endian, int mmio_sign_extend)
1329 {
1330     enum emulation_result emulated = EMULATE_DONE;
1331 
1332     /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1333     if (vcpu->arch.mmio_vsx_copy_nums > 4)
1334         return EMULATE_FAIL;
1335 
1336     while (vcpu->arch.mmio_vsx_copy_nums) {
1337         emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1338             is_default_endian, mmio_sign_extend);
1339 
1340         if (emulated != EMULATE_DONE)
1341             break;
1342 
1343         vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1344 
1345         vcpu->arch.mmio_vsx_copy_nums--;
1346         vcpu->arch.mmio_vsx_offset++;
1347     }
1348     return emulated;
1349 }
1350 #endif /* CONFIG_VSX */
1351 
1352 int kvmppc_handle_store(struct kvm_vcpu *vcpu,
1353             u64 val, unsigned int bytes, int is_default_endian)
1354 {
1355     struct kvm_run *run = vcpu->run;
1356     void *data = run->mmio.data;
1357     int idx, ret;
1358     bool host_swabbed;
1359 
1360     /* Pity C doesn't have a logical XOR operator */
1361     if (kvmppc_need_byteswap(vcpu)) {
1362         host_swabbed = is_default_endian;
1363     } else {
1364         host_swabbed = !is_default_endian;
1365     }
1366 
1367     if (bytes > sizeof(run->mmio.data))
1368         return EMULATE_FAIL;
1369 
1370     run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1371     run->mmio.len = bytes;
1372     run->mmio.is_write = 1;
1373     vcpu->mmio_needed = 1;
1374     vcpu->mmio_is_write = 1;
1375 
1376     if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1377         val = dp_to_sp(val);
1378 
1379     /* Store the value at the lowest bytes in 'data'. */
1380     if (!host_swabbed) {
1381         switch (bytes) {
1382         case 8: *(u64 *)data = val; break;
1383         case 4: *(u32 *)data = val; break;
1384         case 2: *(u16 *)data = val; break;
1385         case 1: *(u8  *)data = val; break;
1386         }
1387     } else {
1388         switch (bytes) {
1389         case 8: *(u64 *)data = swab64(val); break;
1390         case 4: *(u32 *)data = swab32(val); break;
1391         case 2: *(u16 *)data = swab16(val); break;
1392         case 1: *(u8  *)data = val; break;
1393         }
1394     }
1395 
1396     idx = srcu_read_lock(&vcpu->kvm->srcu);
1397 
1398     ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1399                    bytes, &run->mmio.data);
1400 
1401     srcu_read_unlock(&vcpu->kvm->srcu, idx);
1402 
1403     if (!ret) {
1404         vcpu->mmio_needed = 0;
1405         return EMULATE_DONE;
1406     }
1407 
1408     return EMULATE_DO_MMIO;
1409 }
1410 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1411 
1412 #ifdef CONFIG_VSX
1413 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1414 {
1415     u32 dword_offset, word_offset;
1416     union kvmppc_one_reg reg;
1417     int vsx_offset = 0;
1418     int copy_type = vcpu->arch.mmio_copy_type;
1419     int result = 0;
1420 
1421     switch (copy_type) {
1422     case KVMPPC_VSX_COPY_DWORD:
1423         vsx_offset =
1424             kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1425 
1426         if (vsx_offset == -1) {
1427             result = -1;
1428             break;
1429         }
1430 
1431         if (rs < 32) {
1432             *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1433         } else {
1434             reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1435             *val = reg.vsxval[vsx_offset];
1436         }
1437         break;
1438 
1439     case KVMPPC_VSX_COPY_WORD:
1440         vsx_offset =
1441             kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1442 
1443         if (vsx_offset == -1) {
1444             result = -1;
1445             break;
1446         }
1447 
1448         if (rs < 32) {
1449             dword_offset = vsx_offset / 2;
1450             word_offset = vsx_offset % 2;
1451             reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1452             *val = reg.vsx32val[word_offset];
1453         } else {
1454             reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1455             *val = reg.vsx32val[vsx_offset];
1456         }
1457         break;
1458 
1459     default:
1460         result = -1;
1461         break;
1462     }
1463 
1464     return result;
1465 }
1466 
1467 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
1468             int rs, unsigned int bytes, int is_default_endian)
1469 {
1470     u64 val;
1471     enum emulation_result emulated = EMULATE_DONE;
1472 
1473     vcpu->arch.io_gpr = rs;
1474 
1475     /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1476     if (vcpu->arch.mmio_vsx_copy_nums > 4)
1477         return EMULATE_FAIL;
1478 
1479     while (vcpu->arch.mmio_vsx_copy_nums) {
1480         if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1481             return EMULATE_FAIL;
1482 
1483         emulated = kvmppc_handle_store(vcpu,
1484              val, bytes, is_default_endian);
1485 
1486         if (emulated != EMULATE_DONE)
1487             break;
1488 
1489         vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1490 
1491         vcpu->arch.mmio_vsx_copy_nums--;
1492         vcpu->arch.mmio_vsx_offset++;
1493     }
1494 
1495     return emulated;
1496 }
1497 
1498 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
1499 {
1500     struct kvm_run *run = vcpu->run;
1501     enum emulation_result emulated = EMULATE_FAIL;
1502     int r;
1503 
1504     vcpu->arch.paddr_accessed += run->mmio.len;
1505 
1506     if (!vcpu->mmio_is_write) {
1507         emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
1508              run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1509     } else {
1510         emulated = kvmppc_handle_vsx_store(vcpu,
1511              vcpu->arch.io_gpr, run->mmio.len, 1);
1512     }
1513 
1514     switch (emulated) {
1515     case EMULATE_DO_MMIO:
1516         run->exit_reason = KVM_EXIT_MMIO;
1517         r = RESUME_HOST;
1518         break;
1519     case EMULATE_FAIL:
1520         pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1521         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1522         run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1523         r = RESUME_HOST;
1524         break;
1525     default:
1526         r = RESUME_GUEST;
1527         break;
1528     }
1529     return r;
1530 }
1531 #endif /* CONFIG_VSX */
1532 
1533 #ifdef CONFIG_ALTIVEC
1534 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
1535         unsigned int rt, unsigned int bytes, int is_default_endian)
1536 {
1537     enum emulation_result emulated = EMULATE_DONE;
1538 
1539     if (vcpu->arch.mmio_vmx_copy_nums > 2)
1540         return EMULATE_FAIL;
1541 
1542     while (vcpu->arch.mmio_vmx_copy_nums) {
1543         emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1544                 is_default_endian, 0);
1545 
1546         if (emulated != EMULATE_DONE)
1547             break;
1548 
1549         vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1550         vcpu->arch.mmio_vmx_copy_nums--;
1551         vcpu->arch.mmio_vmx_offset++;
1552     }
1553 
1554     return emulated;
1555 }
1556 
1557 static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1558 {
1559     union kvmppc_one_reg reg;
1560     int vmx_offset = 0;
1561     int result = 0;
1562 
1563     vmx_offset =
1564         kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1565 
1566     if (vmx_offset == -1)
1567         return -1;
1568 
1569     reg.vval = VCPU_VSX_VR(vcpu, index);
1570     *val = reg.vsxval[vmx_offset];
1571 
1572     return result;
1573 }
1574 
1575 static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1576 {
1577     union kvmppc_one_reg reg;
1578     int vmx_offset = 0;
1579     int result = 0;
1580 
1581     vmx_offset =
1582         kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1583 
1584     if (vmx_offset == -1)
1585         return -1;
1586 
1587     reg.vval = VCPU_VSX_VR(vcpu, index);
1588     *val = reg.vsx32val[vmx_offset];
1589 
1590     return result;
1591 }
1592 
1593 static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1594 {
1595     union kvmppc_one_reg reg;
1596     int vmx_offset = 0;
1597     int result = 0;
1598 
1599     vmx_offset =
1600         kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1601 
1602     if (vmx_offset == -1)
1603         return -1;
1604 
1605     reg.vval = VCPU_VSX_VR(vcpu, index);
1606     *val = reg.vsx16val[vmx_offset];
1607 
1608     return result;
1609 }
1610 
1611 static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1612 {
1613     union kvmppc_one_reg reg;
1614     int vmx_offset = 0;
1615     int result = 0;
1616 
1617     vmx_offset =
1618         kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1619 
1620     if (vmx_offset == -1)
1621         return -1;
1622 
1623     reg.vval = VCPU_VSX_VR(vcpu, index);
1624     *val = reg.vsx8val[vmx_offset];
1625 
1626     return result;
1627 }
1628 
1629 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
1630         unsigned int rs, unsigned int bytes, int is_default_endian)
1631 {
1632     u64 val = 0;
1633     unsigned int index = rs & KVM_MMIO_REG_MASK;
1634     enum emulation_result emulated = EMULATE_DONE;
1635 
1636     if (vcpu->arch.mmio_vmx_copy_nums > 2)
1637         return EMULATE_FAIL;
1638 
1639     vcpu->arch.io_gpr = rs;
1640 
1641     while (vcpu->arch.mmio_vmx_copy_nums) {
1642         switch (vcpu->arch.mmio_copy_type) {
1643         case KVMPPC_VMX_COPY_DWORD:
1644             if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1645                 return EMULATE_FAIL;
1646 
1647             break;
1648         case KVMPPC_VMX_COPY_WORD:
1649             if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1650                 return EMULATE_FAIL;
1651             break;
1652         case KVMPPC_VMX_COPY_HWORD:
1653             if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1654                 return EMULATE_FAIL;
1655             break;
1656         case KVMPPC_VMX_COPY_BYTE:
1657             if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1658                 return EMULATE_FAIL;
1659             break;
1660         default:
1661             return EMULATE_FAIL;
1662         }
1663 
1664         emulated = kvmppc_handle_store(vcpu, val, bytes,
1665                 is_default_endian);
1666         if (emulated != EMULATE_DONE)
1667             break;
1668 
1669         vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1670         vcpu->arch.mmio_vmx_copy_nums--;
1671         vcpu->arch.mmio_vmx_offset++;
1672     }
1673 
1674     return emulated;
1675 }
1676 
1677 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
1678 {
1679     struct kvm_run *run = vcpu->run;
1680     enum emulation_result emulated = EMULATE_FAIL;
1681     int r;
1682 
1683     vcpu->arch.paddr_accessed += run->mmio.len;
1684 
1685     if (!vcpu->mmio_is_write) {
1686         emulated = kvmppc_handle_vmx_load(vcpu,
1687                 vcpu->arch.io_gpr, run->mmio.len, 1);
1688     } else {
1689         emulated = kvmppc_handle_vmx_store(vcpu,
1690                 vcpu->arch.io_gpr, run->mmio.len, 1);
1691     }
1692 
1693     switch (emulated) {
1694     case EMULATE_DO_MMIO:
1695         run->exit_reason = KVM_EXIT_MMIO;
1696         r = RESUME_HOST;
1697         break;
1698     case EMULATE_FAIL:
1699         pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1700         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1701         run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1702         r = RESUME_HOST;
1703         break;
1704     default:
1705         r = RESUME_GUEST;
1706         break;
1707     }
1708     return r;
1709 }
1710 #endif /* CONFIG_ALTIVEC */
1711 
1712 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1713 {
1714     int r = 0;
1715     union kvmppc_one_reg val;
1716     int size;
1717 
1718     size = one_reg_size(reg->id);
1719     if (size > sizeof(val))
1720         return -EINVAL;
1721 
1722     r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1723     if (r == -EINVAL) {
1724         r = 0;
1725         switch (reg->id) {
1726 #ifdef CONFIG_ALTIVEC
1727         case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1728             if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1729                 r = -ENXIO;
1730                 break;
1731             }
1732             val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1733             break;
1734         case KVM_REG_PPC_VSCR:
1735             if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1736                 r = -ENXIO;
1737                 break;
1738             }
1739             val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1740             break;
1741         case KVM_REG_PPC_VRSAVE:
1742             val = get_reg_val(reg->id, vcpu->arch.vrsave);
1743             break;
1744 #endif /* CONFIG_ALTIVEC */
1745         default:
1746             r = -EINVAL;
1747             break;
1748         }
1749     }
1750 
1751     if (r)
1752         return r;
1753 
1754     if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1755         r = -EFAULT;
1756 
1757     return r;
1758 }
1759 
1760 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1761 {
1762     int r;
1763     union kvmppc_one_reg val;
1764     int size;
1765 
1766     size = one_reg_size(reg->id);
1767     if (size > sizeof(val))
1768         return -EINVAL;
1769 
1770     if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1771         return -EFAULT;
1772 
1773     r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1774     if (r == -EINVAL) {
1775         r = 0;
1776         switch (reg->id) {
1777 #ifdef CONFIG_ALTIVEC
1778         case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1779             if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1780                 r = -ENXIO;
1781                 break;
1782             }
1783             vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1784             break;
1785         case KVM_REG_PPC_VSCR:
1786             if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1787                 r = -ENXIO;
1788                 break;
1789             }
1790             vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1791             break;
1792         case KVM_REG_PPC_VRSAVE:
1793             if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1794                 r = -ENXIO;
1795                 break;
1796             }
1797             vcpu->arch.vrsave = set_reg_val(reg->id, val);
1798             break;
1799 #endif /* CONFIG_ALTIVEC */
1800         default:
1801             r = -EINVAL;
1802             break;
1803         }
1804     }
1805 
1806     return r;
1807 }
1808 
1809 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1810 {
1811     struct kvm_run *run = vcpu->run;
1812     int r;
1813 
1814     vcpu_load(vcpu);
1815 
1816     if (vcpu->mmio_needed) {
1817         vcpu->mmio_needed = 0;
1818         if (!vcpu->mmio_is_write)
1819             kvmppc_complete_mmio_load(vcpu);
1820 #ifdef CONFIG_VSX
1821         if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1822             vcpu->arch.mmio_vsx_copy_nums--;
1823             vcpu->arch.mmio_vsx_offset++;
1824         }
1825 
1826         if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1827             r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
1828             if (r == RESUME_HOST) {
1829                 vcpu->mmio_needed = 1;
1830                 goto out;
1831             }
1832         }
1833 #endif
1834 #ifdef CONFIG_ALTIVEC
1835         if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1836             vcpu->arch.mmio_vmx_copy_nums--;
1837             vcpu->arch.mmio_vmx_offset++;
1838         }
1839 
1840         if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1841             r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
1842             if (r == RESUME_HOST) {
1843                 vcpu->mmio_needed = 1;
1844                 goto out;
1845             }
1846         }
1847 #endif
1848     } else if (vcpu->arch.osi_needed) {
1849         u64 *gprs = run->osi.gprs;
1850         int i;
1851 
1852         for (i = 0; i < 32; i++)
1853             kvmppc_set_gpr(vcpu, i, gprs[i]);
1854         vcpu->arch.osi_needed = 0;
1855     } else if (vcpu->arch.hcall_needed) {
1856         int i;
1857 
1858         kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1859         for (i = 0; i < 9; ++i)
1860             kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1861         vcpu->arch.hcall_needed = 0;
1862 #ifdef CONFIG_BOOKE
1863     } else if (vcpu->arch.epr_needed) {
1864         kvmppc_set_epr(vcpu, run->epr.epr);
1865         vcpu->arch.epr_needed = 0;
1866 #endif
1867     }
1868 
1869     kvm_sigset_activate(vcpu);
1870 
1871     if (run->immediate_exit)
1872         r = -EINTR;
1873     else
1874         r = kvmppc_vcpu_run(vcpu);
1875 
1876     kvm_sigset_deactivate(vcpu);
1877 
1878 #ifdef CONFIG_ALTIVEC
1879 out:
1880 #endif
1881 
1882     /*
1883      * We're already returning to userspace, don't pass the
1884      * RESUME_HOST flags along.
1885      */
1886     if (r > 0)
1887         r = 0;
1888 
1889     vcpu_put(vcpu);
1890     return r;
1891 }
1892 
1893 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1894 {
1895     if (irq->irq == KVM_INTERRUPT_UNSET) {
1896         kvmppc_core_dequeue_external(vcpu);
1897         return 0;
1898     }
1899 
1900     kvmppc_core_queue_external(vcpu, irq);
1901 
1902     kvm_vcpu_kick(vcpu);
1903 
1904     return 0;
1905 }
1906 
1907 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1908                      struct kvm_enable_cap *cap)
1909 {
1910     int r;
1911 
1912     if (cap->flags)
1913         return -EINVAL;
1914 
1915     switch (cap->cap) {
1916     case KVM_CAP_PPC_OSI:
1917         r = 0;
1918         vcpu->arch.osi_enabled = true;
1919         break;
1920     case KVM_CAP_PPC_PAPR:
1921         r = 0;
1922         vcpu->arch.papr_enabled = true;
1923         break;
1924     case KVM_CAP_PPC_EPR:
1925         r = 0;
1926         if (cap->args[0])
1927             vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1928         else
1929             vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1930         break;
1931 #ifdef CONFIG_BOOKE
1932     case KVM_CAP_PPC_BOOKE_WATCHDOG:
1933         r = 0;
1934         vcpu->arch.watchdog_enabled = true;
1935         break;
1936 #endif
1937 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1938     case KVM_CAP_SW_TLB: {
1939         struct kvm_config_tlb cfg;
1940         void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1941 
1942         r = -EFAULT;
1943         if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1944             break;
1945 
1946         r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1947         break;
1948     }
1949 #endif
1950 #ifdef CONFIG_KVM_MPIC
1951     case KVM_CAP_IRQ_MPIC: {
1952         struct fd f;
1953         struct kvm_device *dev;
1954 
1955         r = -EBADF;
1956         f = fdget(cap->args[0]);
1957         if (!f.file)
1958             break;
1959 
1960         r = -EPERM;
1961         dev = kvm_device_from_filp(f.file);
1962         if (dev)
1963             r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1964 
1965         fdput(f);
1966         break;
1967     }
1968 #endif
1969 #ifdef CONFIG_KVM_XICS
1970     case KVM_CAP_IRQ_XICS: {
1971         struct fd f;
1972         struct kvm_device *dev;
1973 
1974         r = -EBADF;
1975         f = fdget(cap->args[0]);
1976         if (!f.file)
1977             break;
1978 
1979         r = -EPERM;
1980         dev = kvm_device_from_filp(f.file);
1981         if (dev) {
1982             if (xics_on_xive())
1983                 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1984             else
1985                 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1986         }
1987 
1988         fdput(f);
1989         break;
1990     }
1991 #endif /* CONFIG_KVM_XICS */
1992 #ifdef CONFIG_KVM_XIVE
1993     case KVM_CAP_PPC_IRQ_XIVE: {
1994         struct fd f;
1995         struct kvm_device *dev;
1996 
1997         r = -EBADF;
1998         f = fdget(cap->args[0]);
1999         if (!f.file)
2000             break;
2001 
2002         r = -ENXIO;
2003         if (!xive_enabled())
2004             break;
2005 
2006         r = -EPERM;
2007         dev = kvm_device_from_filp(f.file);
2008         if (dev)
2009             r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
2010                                 cap->args[1]);
2011 
2012         fdput(f);
2013         break;
2014     }
2015 #endif /* CONFIG_KVM_XIVE */
2016 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
2017     case KVM_CAP_PPC_FWNMI:
2018         r = -EINVAL;
2019         if (!is_kvmppc_hv_enabled(vcpu->kvm))
2020             break;
2021         r = 0;
2022         vcpu->kvm->arch.fwnmi_enabled = true;
2023         break;
2024 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
2025     default:
2026         r = -EINVAL;
2027         break;
2028     }
2029 
2030     if (!r)
2031         r = kvmppc_sanity_check(vcpu);
2032 
2033     return r;
2034 }
2035 
2036 bool kvm_arch_intc_initialized(struct kvm *kvm)
2037 {
2038 #ifdef CONFIG_KVM_MPIC
2039     if (kvm->arch.mpic)
2040         return true;
2041 #endif
2042 #ifdef CONFIG_KVM_XICS
2043     if (kvm->arch.xics || kvm->arch.xive)
2044         return true;
2045 #endif
2046     return false;
2047 }
2048 
2049 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2050                                     struct kvm_mp_state *mp_state)
2051 {
2052     return -EINVAL;
2053 }
2054 
2055 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2056                                     struct kvm_mp_state *mp_state)
2057 {
2058     return -EINVAL;
2059 }
2060 
2061 long kvm_arch_vcpu_async_ioctl(struct file *filp,
2062                    unsigned int ioctl, unsigned long arg)
2063 {
2064     struct kvm_vcpu *vcpu = filp->private_data;
2065     void __user *argp = (void __user *)arg;
2066 
2067     if (ioctl == KVM_INTERRUPT) {
2068         struct kvm_interrupt irq;
2069         if (copy_from_user(&irq, argp, sizeof(irq)))
2070             return -EFAULT;
2071         return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2072     }
2073     return -ENOIOCTLCMD;
2074 }
2075 
2076 long kvm_arch_vcpu_ioctl(struct file *filp,
2077                          unsigned int ioctl, unsigned long arg)
2078 {
2079     struct kvm_vcpu *vcpu = filp->private_data;
2080     void __user *argp = (void __user *)arg;
2081     long r;
2082 
2083     switch (ioctl) {
2084     case KVM_ENABLE_CAP:
2085     {
2086         struct kvm_enable_cap cap;
2087         r = -EFAULT;
2088         if (copy_from_user(&cap, argp, sizeof(cap)))
2089             goto out;
2090         vcpu_load(vcpu);
2091         r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2092         vcpu_put(vcpu);
2093         break;
2094     }
2095 
2096     case KVM_SET_ONE_REG:
2097     case KVM_GET_ONE_REG:
2098     {
2099         struct kvm_one_reg reg;
2100         r = -EFAULT;
2101         if (copy_from_user(&reg, argp, sizeof(reg)))
2102             goto out;
2103         if (ioctl == KVM_SET_ONE_REG)
2104             r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
2105         else
2106             r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
2107         break;
2108     }
2109 
2110 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2111     case KVM_DIRTY_TLB: {
2112         struct kvm_dirty_tlb dirty;
2113         r = -EFAULT;
2114         if (copy_from_user(&dirty, argp, sizeof(dirty)))
2115             goto out;
2116         vcpu_load(vcpu);
2117         r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2118         vcpu_put(vcpu);
2119         break;
2120     }
2121 #endif
2122     default:
2123         r = -EINVAL;
2124     }
2125 
2126 out:
2127     return r;
2128 }
2129 
2130 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2131 {
2132     return VM_FAULT_SIGBUS;
2133 }
2134 
2135 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2136 {
2137     u32 inst_nop = 0x60000000;
2138 #ifdef CONFIG_KVM_BOOKE_HV
2139     u32 inst_sc1 = 0x44000022;
2140     pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2141     pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2142     pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2143     pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2144 #else
2145     u32 inst_lis = 0x3c000000;
2146     u32 inst_ori = 0x60000000;
2147     u32 inst_sc = 0x44000002;
2148     u32 inst_imm_mask = 0xffff;
2149 
2150     /*
2151      * The hypercall to get into KVM from within guest context is as
2152      * follows:
2153      *
2154      *    lis r0, r0, KVM_SC_MAGIC_R0@h
2155      *    ori r0, KVM_SC_MAGIC_R0@l
2156      *    sc
2157      *    nop
2158      */
2159     pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2160     pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2161     pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2162     pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2163 #endif
2164 
2165     pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2166 
2167     return 0;
2168 }
2169 
2170 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2171               bool line_status)
2172 {
2173     if (!irqchip_in_kernel(kvm))
2174         return -ENXIO;
2175 
2176     irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2177                     irq_event->irq, irq_event->level,
2178                     line_status);
2179     return 0;
2180 }
2181 
2182 
2183 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2184                 struct kvm_enable_cap *cap)
2185 {
2186     int r;
2187 
2188     if (cap->flags)
2189         return -EINVAL;
2190 
2191     switch (cap->cap) {
2192 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2193     case KVM_CAP_PPC_ENABLE_HCALL: {
2194         unsigned long hcall = cap->args[0];
2195 
2196         r = -EINVAL;
2197         if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2198             cap->args[1] > 1)
2199             break;
2200         if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2201             break;
2202         if (cap->args[1])
2203             set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2204         else
2205             clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2206         r = 0;
2207         break;
2208     }
2209     case KVM_CAP_PPC_SMT: {
2210         unsigned long mode = cap->args[0];
2211         unsigned long flags = cap->args[1];
2212 
2213         r = -EINVAL;
2214         if (kvm->arch.kvm_ops->set_smt_mode)
2215             r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2216         break;
2217     }
2218 
2219     case KVM_CAP_PPC_NESTED_HV:
2220         r = -EINVAL;
2221         if (!is_kvmppc_hv_enabled(kvm) ||
2222             !kvm->arch.kvm_ops->enable_nested)
2223             break;
2224         r = kvm->arch.kvm_ops->enable_nested(kvm);
2225         break;
2226 #endif
2227 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2228     case KVM_CAP_PPC_SECURE_GUEST:
2229         r = -EINVAL;
2230         if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2231             break;
2232         r = kvm->arch.kvm_ops->enable_svm(kvm);
2233         break;
2234     case KVM_CAP_PPC_DAWR1:
2235         r = -EINVAL;
2236         if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
2237             break;
2238         r = kvm->arch.kvm_ops->enable_dawr1(kvm);
2239         break;
2240 #endif
2241     default:
2242         r = -EINVAL;
2243         break;
2244     }
2245 
2246     return r;
2247 }
2248 
2249 #ifdef CONFIG_PPC_BOOK3S_64
2250 /*
2251  * These functions check whether the underlying hardware is safe
2252  * against attacks based on observing the effects of speculatively
2253  * executed instructions, and whether it supplies instructions for
2254  * use in workarounds.  The information comes from firmware, either
2255  * via the device tree on powernv platforms or from an hcall on
2256  * pseries platforms.
2257  */
2258 #ifdef CONFIG_PPC_PSERIES
2259 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2260 {
2261     struct h_cpu_char_result c;
2262     unsigned long rc;
2263 
2264     if (!machine_is(pseries))
2265         return -ENOTTY;
2266 
2267     rc = plpar_get_cpu_characteristics(&c);
2268     if (rc == H_SUCCESS) {
2269         cp->character = c.character;
2270         cp->behaviour = c.behaviour;
2271         cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2272             KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2273             KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2274             KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2275             KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2276             KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2277             KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2278             KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2279             KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2280         cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2281             KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2282             KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2283             KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2284     }
2285     return 0;
2286 }
2287 #else
2288 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2289 {
2290     return -ENOTTY;
2291 }
2292 #endif
2293 
2294 static inline bool have_fw_feat(struct device_node *fw_features,
2295                 const char *state, const char *name)
2296 {
2297     struct device_node *np;
2298     bool r = false;
2299 
2300     np = of_get_child_by_name(fw_features, name);
2301     if (np) {
2302         r = of_property_read_bool(np, state);
2303         of_node_put(np);
2304     }
2305     return r;
2306 }
2307 
2308 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2309 {
2310     struct device_node *np, *fw_features;
2311     int r;
2312 
2313     memset(cp, 0, sizeof(*cp));
2314     r = pseries_get_cpu_char(cp);
2315     if (r != -ENOTTY)
2316         return r;
2317 
2318     np = of_find_node_by_name(NULL, "ibm,opal");
2319     if (np) {
2320         fw_features = of_get_child_by_name(np, "fw-features");
2321         of_node_put(np);
2322         if (!fw_features)
2323             return 0;
2324         if (have_fw_feat(fw_features, "enabled",
2325                  "inst-spec-barrier-ori31,31,0"))
2326             cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2327         if (have_fw_feat(fw_features, "enabled",
2328                  "fw-bcctrl-serialized"))
2329             cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2330         if (have_fw_feat(fw_features, "enabled",
2331                  "inst-l1d-flush-ori30,30,0"))
2332             cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2333         if (have_fw_feat(fw_features, "enabled",
2334                  "inst-l1d-flush-trig2"))
2335             cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2336         if (have_fw_feat(fw_features, "enabled",
2337                  "fw-l1d-thread-split"))
2338             cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2339         if (have_fw_feat(fw_features, "enabled",
2340                  "fw-count-cache-disabled"))
2341             cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2342         if (have_fw_feat(fw_features, "enabled",
2343                  "fw-count-cache-flush-bcctr2,0,0"))
2344             cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2345         cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2346             KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2347             KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2348             KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2349             KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2350             KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2351             KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2352 
2353         if (have_fw_feat(fw_features, "enabled",
2354                  "speculation-policy-favor-security"))
2355             cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2356         if (!have_fw_feat(fw_features, "disabled",
2357                   "needs-l1d-flush-msr-pr-0-to-1"))
2358             cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2359         if (!have_fw_feat(fw_features, "disabled",
2360                   "needs-spec-barrier-for-bound-checks"))
2361             cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2362         if (have_fw_feat(fw_features, "enabled",
2363                  "needs-count-cache-flush-on-context-switch"))
2364             cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2365         cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2366             KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2367             KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2368             KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2369 
2370         of_node_put(fw_features);
2371     }
2372 
2373     return 0;
2374 }
2375 #endif
2376 
2377 long kvm_arch_vm_ioctl(struct file *filp,
2378                        unsigned int ioctl, unsigned long arg)
2379 {
2380     struct kvm *kvm __maybe_unused = filp->private_data;
2381     void __user *argp = (void __user *)arg;
2382     long r;
2383 
2384     switch (ioctl) {
2385     case KVM_PPC_GET_PVINFO: {
2386         struct kvm_ppc_pvinfo pvinfo;
2387         memset(&pvinfo, 0, sizeof(pvinfo));
2388         r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2389         if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2390             r = -EFAULT;
2391             goto out;
2392         }
2393 
2394         break;
2395     }
2396 #ifdef CONFIG_SPAPR_TCE_IOMMU
2397     case KVM_CREATE_SPAPR_TCE_64: {
2398         struct kvm_create_spapr_tce_64 create_tce_64;
2399 
2400         r = -EFAULT;
2401         if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2402             goto out;
2403         if (create_tce_64.flags) {
2404             r = -EINVAL;
2405             goto out;
2406         }
2407         r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2408         goto out;
2409     }
2410     case KVM_CREATE_SPAPR_TCE: {
2411         struct kvm_create_spapr_tce create_tce;
2412         struct kvm_create_spapr_tce_64 create_tce_64;
2413 
2414         r = -EFAULT;
2415         if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2416             goto out;
2417 
2418         create_tce_64.liobn = create_tce.liobn;
2419         create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2420         create_tce_64.offset = 0;
2421         create_tce_64.size = create_tce.window_size >>
2422                 IOMMU_PAGE_SHIFT_4K;
2423         create_tce_64.flags = 0;
2424         r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2425         goto out;
2426     }
2427 #endif
2428 #ifdef CONFIG_PPC_BOOK3S_64
2429     case KVM_PPC_GET_SMMU_INFO: {
2430         struct kvm_ppc_smmu_info info;
2431         struct kvm *kvm = filp->private_data;
2432 
2433         memset(&info, 0, sizeof(info));
2434         r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2435         if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2436             r = -EFAULT;
2437         break;
2438     }
2439     case KVM_PPC_RTAS_DEFINE_TOKEN: {
2440         struct kvm *kvm = filp->private_data;
2441 
2442         r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2443         break;
2444     }
2445     case KVM_PPC_CONFIGURE_V3_MMU: {
2446         struct kvm *kvm = filp->private_data;
2447         struct kvm_ppc_mmuv3_cfg cfg;
2448 
2449         r = -EINVAL;
2450         if (!kvm->arch.kvm_ops->configure_mmu)
2451             goto out;
2452         r = -EFAULT;
2453         if (copy_from_user(&cfg, argp, sizeof(cfg)))
2454             goto out;
2455         r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2456         break;
2457     }
2458     case KVM_PPC_GET_RMMU_INFO: {
2459         struct kvm *kvm = filp->private_data;
2460         struct kvm_ppc_rmmu_info info;
2461 
2462         r = -EINVAL;
2463         if (!kvm->arch.kvm_ops->get_rmmu_info)
2464             goto out;
2465         r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2466         if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2467             r = -EFAULT;
2468         break;
2469     }
2470     case KVM_PPC_GET_CPU_CHAR: {
2471         struct kvm_ppc_cpu_char cpuchar;
2472 
2473         r = kvmppc_get_cpu_char(&cpuchar);
2474         if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2475             r = -EFAULT;
2476         break;
2477     }
2478     case KVM_PPC_SVM_OFF: {
2479         struct kvm *kvm = filp->private_data;
2480 
2481         r = 0;
2482         if (!kvm->arch.kvm_ops->svm_off)
2483             goto out;
2484 
2485         r = kvm->arch.kvm_ops->svm_off(kvm);
2486         break;
2487     }
2488     default: {
2489         struct kvm *kvm = filp->private_data;
2490         r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2491     }
2492 #else /* CONFIG_PPC_BOOK3S_64 */
2493     default:
2494         r = -ENOTTY;
2495 #endif
2496     }
2497 out:
2498     return r;
2499 }
2500 
2501 static DEFINE_IDA(lpid_inuse);
2502 static unsigned long nr_lpids;
2503 
2504 long kvmppc_alloc_lpid(void)
2505 {
2506     int lpid;
2507 
2508     /* The host LPID must always be 0 (allocation starts at 1) */
2509     lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL);
2510     if (lpid < 0) {
2511         if (lpid == -ENOMEM)
2512             pr_err("%s: Out of memory\n", __func__);
2513         else
2514             pr_err("%s: No LPIDs free\n", __func__);
2515         return -ENOMEM;
2516     }
2517 
2518     return lpid;
2519 }
2520 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2521 
2522 void kvmppc_free_lpid(long lpid)
2523 {
2524     ida_free(&lpid_inuse, lpid);
2525 }
2526 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2527 
2528 /* nr_lpids_param includes the host LPID */
2529 void kvmppc_init_lpid(unsigned long nr_lpids_param)
2530 {
2531     nr_lpids = nr_lpids_param;
2532 }
2533 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2534 
2535 int kvm_arch_init(void *opaque)
2536 {
2537     return 0;
2538 }
2539 
2540 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
2541 
2542 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
2543 {
2544     if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs)
2545         vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
2546 }
2547 
2548 int kvm_arch_create_vm_debugfs(struct kvm *kvm)
2549 {
2550     if (kvm->arch.kvm_ops->create_vm_debugfs)
2551         kvm->arch.kvm_ops->create_vm_debugfs(kvm);
2552     return 0;
2553 }