Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *
0004  * Copyright SUSE Linux Products GmbH 2010
0005  * Copyright 2010-2011 Freescale Semiconductor, Inc.
0006  *
0007  * Authors: Alexander Graf <agraf@suse.de>
0008  */
0009 
0010 #include <asm/ppc_asm.h>
0011 #include <asm/kvm_asm.h>
0012 #include <asm/reg.h>
0013 #include <asm/page.h>
0014 #include <asm/asm-offsets.h>
0015 #include <asm/asm-compat.h>
0016 
0017 #define KVM_MAGIC_PAGE      (-4096)
0018 
0019 #ifdef CONFIG_64BIT
0020 #define LL64(reg, offs, reg2)   ld  reg, (offs)(reg2)
0021 #define STL64(reg, offs, reg2)  std reg, (offs)(reg2)
0022 #else
0023 #define LL64(reg, offs, reg2)   lwz reg, (offs + 4)(reg2)
0024 #define STL64(reg, offs, reg2)  stw reg, (offs + 4)(reg2)
0025 #endif
0026 
0027 #define SCRATCH_SAVE                            \
0028     /* Enable critical section. We are critical if          \
0029        shared->critical == r1 */                    \
0030     STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);      \
0031                                     \
0032     /* Save state */                        \
0033     PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0);      \
0034     PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0);      \
0035     mfcr    r31;                            \
0036     stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
0037 
0038 #define SCRATCH_RESTORE                         \
0039     /* Restore state */                     \
0040     PPC_LL  r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0);      \
0041     lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);      \
0042     mtcr    r30;                            \
0043     PPC_LL  r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0);      \
0044                                     \
0045     /* Disable critical section. We are critical if         \
0046        shared->critical == r1 and r2 is always != r1 */     \
0047     STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
0048 
0049 .global kvm_template_start
0050 kvm_template_start:
0051 
0052 .global kvm_emulate_mtmsrd
0053 kvm_emulate_mtmsrd:
0054 
0055     SCRATCH_SAVE
0056 
0057     /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
0058     LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
0059     lis r30, (~(MSR_EE | MSR_RI))@h
0060     ori r30, r30, (~(MSR_EE | MSR_RI))@l
0061     and r31, r31, r30
0062 
0063     /* OR the register's (MSR_EE|MSR_RI) on MSR */
0064 kvm_emulate_mtmsrd_reg:
0065     ori r30, r0, 0
0066     andi.   r30, r30, (MSR_EE|MSR_RI)
0067     or  r31, r31, r30
0068 
0069     /* Put MSR back into magic page */
0070     STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
0071 
0072     /* Check if we have to fetch an interrupt */
0073     lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
0074     cmpwi   r31, 0
0075     beq+    no_check
0076 
0077     /* Check if we may trigger an interrupt */
0078     andi.   r30, r30, MSR_EE
0079     beq no_check
0080 
0081     SCRATCH_RESTORE
0082 
0083     /* Nag hypervisor */
0084 kvm_emulate_mtmsrd_orig_ins:
0085     tlbsync
0086 
0087     b   kvm_emulate_mtmsrd_branch
0088 
0089 no_check:
0090 
0091     SCRATCH_RESTORE
0092 
0093     /* Go back to caller */
0094 kvm_emulate_mtmsrd_branch:
0095     b   .
0096 kvm_emulate_mtmsrd_end:
0097 
0098 .global kvm_emulate_mtmsrd_branch_offs
0099 kvm_emulate_mtmsrd_branch_offs:
0100     .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
0101 
0102 .global kvm_emulate_mtmsrd_reg_offs
0103 kvm_emulate_mtmsrd_reg_offs:
0104     .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
0105 
0106 .global kvm_emulate_mtmsrd_orig_ins_offs
0107 kvm_emulate_mtmsrd_orig_ins_offs:
0108     .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
0109 
0110 .global kvm_emulate_mtmsrd_len
0111 kvm_emulate_mtmsrd_len:
0112     .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
0113 
0114 
0115 #define MSR_SAFE_BITS (MSR_EE | MSR_RI)
0116 #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
0117 
0118 .global kvm_emulate_mtmsr
0119 kvm_emulate_mtmsr:
0120 
0121     SCRATCH_SAVE
0122 
0123     /* Fetch old MSR in r31 */
0124     LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
0125 
0126     /* Find the changed bits between old and new MSR */
0127 kvm_emulate_mtmsr_reg1:
0128     ori r30, r0, 0
0129     xor r31, r30, r31
0130 
0131     /* Check if we need to really do mtmsr */
0132     LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
0133     and.    r31, r31, r30
0134 
0135     /* No critical bits changed? Maybe we can stay in the guest. */
0136     beq maybe_stay_in_guest
0137 
0138 do_mtmsr:
0139 
0140     SCRATCH_RESTORE
0141 
0142     /* Just fire off the mtmsr if it's critical */
0143 kvm_emulate_mtmsr_orig_ins:
0144     mtmsr   r0
0145 
0146     b   kvm_emulate_mtmsr_branch
0147 
0148 maybe_stay_in_guest:
0149 
0150     /* Get the target register in r30 */
0151 kvm_emulate_mtmsr_reg2:
0152     ori r30, r0, 0
0153 
0154     /* Put MSR into magic page because we don't call mtmsr */
0155     STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
0156 
0157     /* Check if we have to fetch an interrupt */
0158     lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
0159     cmpwi   r31, 0
0160     beq+    no_mtmsr
0161 
0162     /* Check if we may trigger an interrupt */
0163     andi.   r31, r30, MSR_EE
0164     bne do_mtmsr
0165 
0166 no_mtmsr:
0167 
0168     SCRATCH_RESTORE
0169 
0170     /* Go back to caller */
0171 kvm_emulate_mtmsr_branch:
0172     b   .
0173 kvm_emulate_mtmsr_end:
0174 
0175 .global kvm_emulate_mtmsr_branch_offs
0176 kvm_emulate_mtmsr_branch_offs:
0177     .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
0178 
0179 .global kvm_emulate_mtmsr_reg1_offs
0180 kvm_emulate_mtmsr_reg1_offs:
0181     .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
0182 
0183 .global kvm_emulate_mtmsr_reg2_offs
0184 kvm_emulate_mtmsr_reg2_offs:
0185     .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
0186 
0187 .global kvm_emulate_mtmsr_orig_ins_offs
0188 kvm_emulate_mtmsr_orig_ins_offs:
0189     .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
0190 
0191 .global kvm_emulate_mtmsr_len
0192 kvm_emulate_mtmsr_len:
0193     .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
0194 
0195 #ifdef CONFIG_BOOKE
0196 
0197 /* also used for wrteei 1 */
0198 .global kvm_emulate_wrtee
0199 kvm_emulate_wrtee:
0200 
0201     SCRATCH_SAVE
0202 
0203     /* Fetch old MSR in r31 */
0204     LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
0205 
0206     /* Insert new MSR[EE] */
0207 kvm_emulate_wrtee_reg:
0208     ori r30, r0, 0
0209     rlwimi  r31, r30, 0, MSR_EE
0210 
0211     /*
0212      * If MSR[EE] is now set, check for a pending interrupt.
0213      * We could skip this if MSR[EE] was already on, but that
0214      * should be rare, so don't bother.
0215      */
0216     andi.   r30, r30, MSR_EE
0217 
0218     /* Put MSR into magic page because we don't call wrtee */
0219     STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
0220 
0221     beq no_wrtee
0222 
0223     /* Check if we have to fetch an interrupt */
0224     lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
0225     cmpwi   r30, 0
0226     bne do_wrtee
0227 
0228 no_wrtee:
0229     SCRATCH_RESTORE
0230 
0231     /* Go back to caller */
0232 kvm_emulate_wrtee_branch:
0233     b   .
0234 
0235 do_wrtee:
0236     SCRATCH_RESTORE
0237 
0238     /* Just fire off the wrtee if it's critical */
0239 kvm_emulate_wrtee_orig_ins:
0240     wrtee   r0
0241 
0242     b   kvm_emulate_wrtee_branch
0243 
0244 kvm_emulate_wrtee_end:
0245 
0246 .global kvm_emulate_wrtee_branch_offs
0247 kvm_emulate_wrtee_branch_offs:
0248     .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
0249 
0250 .global kvm_emulate_wrtee_reg_offs
0251 kvm_emulate_wrtee_reg_offs:
0252     .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
0253 
0254 .global kvm_emulate_wrtee_orig_ins_offs
0255 kvm_emulate_wrtee_orig_ins_offs:
0256     .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
0257 
0258 .global kvm_emulate_wrtee_len
0259 kvm_emulate_wrtee_len:
0260     .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
0261 
0262 .global kvm_emulate_wrteei_0
0263 kvm_emulate_wrteei_0:
0264     SCRATCH_SAVE
0265 
0266     /* Fetch old MSR in r31 */
0267     LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
0268 
0269     /* Remove MSR_EE from old MSR */
0270     rlwinm  r31, r31, 0, ~MSR_EE
0271 
0272     /* Write new MSR value back */
0273     STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
0274 
0275     SCRATCH_RESTORE
0276 
0277     /* Go back to caller */
0278 kvm_emulate_wrteei_0_branch:
0279     b   .
0280 kvm_emulate_wrteei_0_end:
0281 
0282 .global kvm_emulate_wrteei_0_branch_offs
0283 kvm_emulate_wrteei_0_branch_offs:
0284     .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
0285 
0286 .global kvm_emulate_wrteei_0_len
0287 kvm_emulate_wrteei_0_len:
0288     .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
0289 
0290 #endif /* CONFIG_BOOKE */
0291 
0292 #ifdef CONFIG_PPC_BOOK3S_32
0293 
0294 .global kvm_emulate_mtsrin
0295 kvm_emulate_mtsrin:
0296 
0297     SCRATCH_SAVE
0298 
0299     LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
0300     andi.   r31, r31, MSR_DR | MSR_IR
0301     beq kvm_emulate_mtsrin_reg1
0302 
0303     SCRATCH_RESTORE
0304 
0305 kvm_emulate_mtsrin_orig_ins:
0306     nop
0307     b   kvm_emulate_mtsrin_branch
0308 
0309 kvm_emulate_mtsrin_reg1:
0310     /* rX >> 26 */
0311     rlwinm  r30,r0,6,26,29
0312 
0313 kvm_emulate_mtsrin_reg2:
0314     stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
0315 
0316     SCRATCH_RESTORE
0317 
0318     /* Go back to caller */
0319 kvm_emulate_mtsrin_branch:
0320     b   .
0321 kvm_emulate_mtsrin_end:
0322 
0323 .global kvm_emulate_mtsrin_branch_offs
0324 kvm_emulate_mtsrin_branch_offs:
0325     .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
0326 
0327 .global kvm_emulate_mtsrin_reg1_offs
0328 kvm_emulate_mtsrin_reg1_offs:
0329     .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
0330 
0331 .global kvm_emulate_mtsrin_reg2_offs
0332 kvm_emulate_mtsrin_reg2_offs:
0333     .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
0334 
0335 .global kvm_emulate_mtsrin_orig_ins_offs
0336 kvm_emulate_mtsrin_orig_ins_offs:
0337     .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
0338 
0339 .global kvm_emulate_mtsrin_len
0340 kvm_emulate_mtsrin_len:
0341     .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
0342 
0343 #endif /* CONFIG_PPC_BOOK3S_32 */
0344 
0345     .balign 4
0346     .global kvm_tmp
0347 kvm_tmp:
0348     .space  (64 * 1024)
0349 
0350 .global kvm_tmp_end
0351 kvm_tmp_end:
0352 
0353 .global kvm_template_end
0354 kvm_template_end: