Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *
0004  * Copyright IBM Corp. 2007
0005  * Copyright 2011 Freescale Semiconductor, Inc.
0006  *
0007  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
0008  */
0009 
0010 #include <linux/jiffies.h>
0011 #include <linux/hrtimer.h>
0012 #include <linux/types.h>
0013 #include <linux/string.h>
0014 #include <linux/kvm_host.h>
0015 #include <linux/clockchips.h>
0016 
0017 #include <asm/reg.h>
0018 #include <asm/time.h>
0019 #include <asm/byteorder.h>
0020 #include <asm/kvm_ppc.h>
0021 #include <asm/disassemble.h>
0022 #include <asm/ppc-opcode.h>
0023 #include <asm/sstep.h>
0024 #include "timing.h"
0025 #include "trace.h"
0026 
0027 #ifdef CONFIG_PPC_FPU
0028 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
0029 {
0030     if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
0031         kvmppc_core_queue_fpunavail(vcpu);
0032         return true;
0033     }
0034 
0035     return false;
0036 }
0037 #endif /* CONFIG_PPC_FPU */
0038 
0039 #ifdef CONFIG_VSX
0040 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
0041 {
0042     if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
0043         kvmppc_core_queue_vsx_unavail(vcpu);
0044         return true;
0045     }
0046 
0047     return false;
0048 }
0049 #endif /* CONFIG_VSX */
0050 
0051 #ifdef CONFIG_ALTIVEC
0052 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
0053 {
0054     if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
0055         kvmppc_core_queue_vec_unavail(vcpu);
0056         return true;
0057     }
0058 
0059     return false;
0060 }
0061 #endif /* CONFIG_ALTIVEC */
0062 
0063 /*
0064  * XXX to do:
0065  * lfiwax, lfiwzx
0066  * vector loads and stores
0067  *
0068  * Instructions that trap when used on cache-inhibited mappings
0069  * are not emulated here: multiple and string instructions,
0070  * lq/stq, and the load-reserve/store-conditional instructions.
0071  */
0072 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
0073 {
0074     u32 inst;
0075     enum emulation_result emulated = EMULATE_FAIL;
0076     struct instruction_op op;
0077 
0078     /* this default type might be overwritten by subcategories */
0079     kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
0080 
0081     emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
0082     if (emulated != EMULATE_DONE)
0083         return emulated;
0084 
0085     vcpu->arch.mmio_vsx_copy_nums = 0;
0086     vcpu->arch.mmio_vsx_offset = 0;
0087     vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
0088     vcpu->arch.mmio_sp64_extend = 0;
0089     vcpu->arch.mmio_sign_extend = 0;
0090     vcpu->arch.mmio_vmx_copy_nums = 0;
0091     vcpu->arch.mmio_vmx_offset = 0;
0092     vcpu->arch.mmio_host_swabbed = 0;
0093 
0094     emulated = EMULATE_FAIL;
0095     vcpu->arch.regs.msr = vcpu->arch.shared->msr;
0096     if (analyse_instr(&op, &vcpu->arch.regs, ppc_inst(inst)) == 0) {
0097         int type = op.type & INSTR_TYPE_MASK;
0098         int size = GETSIZE(op.type);
0099 
0100         vcpu->mmio_is_write = OP_IS_STORE(type);
0101 
0102         switch (type) {
0103         case LOAD:  {
0104             int instr_byte_swap = op.type & BYTEREV;
0105 
0106             if (op.type & SIGNEXT)
0107                 emulated = kvmppc_handle_loads(vcpu,
0108                         op.reg, size, !instr_byte_swap);
0109             else
0110                 emulated = kvmppc_handle_load(vcpu,
0111                         op.reg, size, !instr_byte_swap);
0112 
0113             if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
0114                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
0115 
0116             break;
0117         }
0118 #ifdef CONFIG_PPC_FPU
0119         case LOAD_FP:
0120             if (kvmppc_check_fp_disabled(vcpu))
0121                 return EMULATE_DONE;
0122 
0123             if (op.type & FPCONV)
0124                 vcpu->arch.mmio_sp64_extend = 1;
0125 
0126             if (op.type & SIGNEXT)
0127                 emulated = kvmppc_handle_loads(vcpu,
0128                          KVM_MMIO_REG_FPR|op.reg, size, 1);
0129             else
0130                 emulated = kvmppc_handle_load(vcpu,
0131                          KVM_MMIO_REG_FPR|op.reg, size, 1);
0132 
0133             if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
0134                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
0135 
0136             break;
0137 #endif
0138 #ifdef CONFIG_ALTIVEC
0139         case LOAD_VMX:
0140             if (kvmppc_check_altivec_disabled(vcpu))
0141                 return EMULATE_DONE;
0142 
0143             /* Hardware enforces alignment of VMX accesses */
0144             vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
0145             vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
0146 
0147             if (size == 16) { /* lvx */
0148                 vcpu->arch.mmio_copy_type =
0149                         KVMPPC_VMX_COPY_DWORD;
0150             } else if (size == 4) { /* lvewx  */
0151                 vcpu->arch.mmio_copy_type =
0152                         KVMPPC_VMX_COPY_WORD;
0153             } else if (size == 2) { /* lvehx  */
0154                 vcpu->arch.mmio_copy_type =
0155                         KVMPPC_VMX_COPY_HWORD;
0156             } else if (size == 1) { /* lvebx  */
0157                 vcpu->arch.mmio_copy_type =
0158                         KVMPPC_VMX_COPY_BYTE;
0159             } else
0160                 break;
0161 
0162             vcpu->arch.mmio_vmx_offset =
0163                 (vcpu->arch.vaddr_accessed & 0xf)/size;
0164 
0165             if (size == 16) {
0166                 vcpu->arch.mmio_vmx_copy_nums = 2;
0167                 emulated = kvmppc_handle_vmx_load(vcpu,
0168                         KVM_MMIO_REG_VMX|op.reg,
0169                         8, 1);
0170             } else {
0171                 vcpu->arch.mmio_vmx_copy_nums = 1;
0172                 emulated = kvmppc_handle_vmx_load(vcpu,
0173                         KVM_MMIO_REG_VMX|op.reg,
0174                         size, 1);
0175             }
0176             break;
0177 #endif
0178 #ifdef CONFIG_VSX
0179         case LOAD_VSX: {
0180             int io_size_each;
0181 
0182             if (op.vsx_flags & VSX_CHECK_VEC) {
0183                 if (kvmppc_check_altivec_disabled(vcpu))
0184                     return EMULATE_DONE;
0185             } else {
0186                 if (kvmppc_check_vsx_disabled(vcpu))
0187                     return EMULATE_DONE;
0188             }
0189 
0190             if (op.vsx_flags & VSX_FPCONV)
0191                 vcpu->arch.mmio_sp64_extend = 1;
0192 
0193             if (op.element_size == 8)  {
0194                 if (op.vsx_flags & VSX_SPLAT)
0195                     vcpu->arch.mmio_copy_type =
0196                         KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
0197                 else
0198                     vcpu->arch.mmio_copy_type =
0199                         KVMPPC_VSX_COPY_DWORD;
0200             } else if (op.element_size == 4) {
0201                 if (op.vsx_flags & VSX_SPLAT)
0202                     vcpu->arch.mmio_copy_type =
0203                         KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
0204                 else
0205                     vcpu->arch.mmio_copy_type =
0206                         KVMPPC_VSX_COPY_WORD;
0207             } else
0208                 break;
0209 
0210             if (size < op.element_size) {
0211                 /* precision convert case: lxsspx, etc */
0212                 vcpu->arch.mmio_vsx_copy_nums = 1;
0213                 io_size_each = size;
0214             } else { /* lxvw4x, lxvd2x, etc */
0215                 vcpu->arch.mmio_vsx_copy_nums =
0216                     size/op.element_size;
0217                 io_size_each = op.element_size;
0218             }
0219 
0220             emulated = kvmppc_handle_vsx_load(vcpu,
0221                     KVM_MMIO_REG_VSX|op.reg, io_size_each,
0222                     1, op.type & SIGNEXT);
0223             break;
0224         }
0225 #endif
0226         case STORE:
0227             /* if need byte reverse, op.val has been reversed by
0228              * analyse_instr().
0229              */
0230             emulated = kvmppc_handle_store(vcpu, op.val, size, 1);
0231 
0232             if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
0233                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
0234 
0235             break;
0236 #ifdef CONFIG_PPC_FPU
0237         case STORE_FP:
0238             if (kvmppc_check_fp_disabled(vcpu))
0239                 return EMULATE_DONE;
0240 
0241             /* The FP registers need to be flushed so that
0242              * kvmppc_handle_store() can read actual FP vals
0243              * from vcpu->arch.
0244              */
0245             if (vcpu->kvm->arch.kvm_ops->giveup_ext)
0246                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
0247                         MSR_FP);
0248 
0249             if (op.type & FPCONV)
0250                 vcpu->arch.mmio_sp64_extend = 1;
0251 
0252             emulated = kvmppc_handle_store(vcpu,
0253                     VCPU_FPR(vcpu, op.reg), size, 1);
0254 
0255             if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
0256                 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
0257 
0258             break;
0259 #endif
0260 #ifdef CONFIG_ALTIVEC
0261         case STORE_VMX:
0262             if (kvmppc_check_altivec_disabled(vcpu))
0263                 return EMULATE_DONE;
0264 
0265             /* Hardware enforces alignment of VMX accesses. */
0266             vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
0267             vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
0268 
0269             if (vcpu->kvm->arch.kvm_ops->giveup_ext)
0270                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
0271                         MSR_VEC);
0272             if (size == 16) { /* stvx */
0273                 vcpu->arch.mmio_copy_type =
0274                         KVMPPC_VMX_COPY_DWORD;
0275             } else if (size == 4) { /* stvewx  */
0276                 vcpu->arch.mmio_copy_type =
0277                         KVMPPC_VMX_COPY_WORD;
0278             } else if (size == 2) { /* stvehx  */
0279                 vcpu->arch.mmio_copy_type =
0280                         KVMPPC_VMX_COPY_HWORD;
0281             } else if (size == 1) { /* stvebx  */
0282                 vcpu->arch.mmio_copy_type =
0283                         KVMPPC_VMX_COPY_BYTE;
0284             } else
0285                 break;
0286 
0287             vcpu->arch.mmio_vmx_offset =
0288                 (vcpu->arch.vaddr_accessed & 0xf)/size;
0289 
0290             if (size == 16) {
0291                 vcpu->arch.mmio_vmx_copy_nums = 2;
0292                 emulated = kvmppc_handle_vmx_store(vcpu,
0293                         op.reg, 8, 1);
0294             } else {
0295                 vcpu->arch.mmio_vmx_copy_nums = 1;
0296                 emulated = kvmppc_handle_vmx_store(vcpu,
0297                         op.reg, size, 1);
0298             }
0299 
0300             break;
0301 #endif
0302 #ifdef CONFIG_VSX
0303         case STORE_VSX: {
0304             int io_size_each;
0305 
0306             if (op.vsx_flags & VSX_CHECK_VEC) {
0307                 if (kvmppc_check_altivec_disabled(vcpu))
0308                     return EMULATE_DONE;
0309             } else {
0310                 if (kvmppc_check_vsx_disabled(vcpu))
0311                     return EMULATE_DONE;
0312             }
0313 
0314             if (vcpu->kvm->arch.kvm_ops->giveup_ext)
0315                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
0316                         MSR_VSX);
0317 
0318             if (op.vsx_flags & VSX_FPCONV)
0319                 vcpu->arch.mmio_sp64_extend = 1;
0320 
0321             if (op.element_size == 8)
0322                 vcpu->arch.mmio_copy_type =
0323                         KVMPPC_VSX_COPY_DWORD;
0324             else if (op.element_size == 4)
0325                 vcpu->arch.mmio_copy_type =
0326                         KVMPPC_VSX_COPY_WORD;
0327             else
0328                 break;
0329 
0330             if (size < op.element_size) {
0331                 /* precise conversion case, like stxsspx */
0332                 vcpu->arch.mmio_vsx_copy_nums = 1;
0333                 io_size_each = size;
0334             } else { /* stxvw4x, stxvd2x, etc */
0335                 vcpu->arch.mmio_vsx_copy_nums =
0336                         size/op.element_size;
0337                 io_size_each = op.element_size;
0338             }
0339 
0340             emulated = kvmppc_handle_vsx_store(vcpu,
0341                     op.reg, io_size_each, 1);
0342             break;
0343         }
0344 #endif
0345         case CACHEOP:
0346             /* Do nothing. The guest is performing dcbi because
0347              * hardware DMA is not snooped by the dcache, but
0348              * emulated DMA either goes through the dcache as
0349              * normal writes, or the host kernel has handled dcache
0350              * coherence.
0351              */
0352             emulated = EMULATE_DONE;
0353             break;
0354         default:
0355             break;
0356         }
0357     }
0358 
0359     trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
0360 
0361     /* Advance past emulated instruction. */
0362     if (emulated != EMULATE_FAIL)
0363         kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
0364 
0365     return emulated;
0366 }