Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is subject to the terms and conditions of the GNU General Public
0003  * License.  See the file "COPYING" in the main directory of this archive
0004  * for more details.
0005  *
0006  * MIPS SIMD Architecture (MSA) context handling code for KVM.
0007  *
0008  * Copyright (C) 2015 Imagination Technologies Ltd.
0009  */
0010 
0011 #include <asm/asm.h>
0012 #include <asm/asm-offsets.h>
0013 #include <asm/asmmacro.h>
0014 #include <asm/regdef.h>
0015 
0016     .set    noreorder
0017     .set    noat
0018 
0019 LEAF(__kvm_save_msa)
0020     st_d    0,  VCPU_FPR0,  a0
0021     st_d    1,  VCPU_FPR1,  a0
0022     st_d    2,  VCPU_FPR2,  a0
0023     st_d    3,  VCPU_FPR3,  a0
0024     st_d    4,  VCPU_FPR4,  a0
0025     st_d    5,  VCPU_FPR5,  a0
0026     st_d    6,  VCPU_FPR6,  a0
0027     st_d    7,  VCPU_FPR7,  a0
0028     st_d    8,  VCPU_FPR8,  a0
0029     st_d    9,  VCPU_FPR9,  a0
0030     st_d    10, VCPU_FPR10, a0
0031     st_d    11, VCPU_FPR11, a0
0032     st_d    12, VCPU_FPR12, a0
0033     st_d    13, VCPU_FPR13, a0
0034     st_d    14, VCPU_FPR14, a0
0035     st_d    15, VCPU_FPR15, a0
0036     st_d    16, VCPU_FPR16, a0
0037     st_d    17, VCPU_FPR17, a0
0038     st_d    18, VCPU_FPR18, a0
0039     st_d    19, VCPU_FPR19, a0
0040     st_d    20, VCPU_FPR20, a0
0041     st_d    21, VCPU_FPR21, a0
0042     st_d    22, VCPU_FPR22, a0
0043     st_d    23, VCPU_FPR23, a0
0044     st_d    24, VCPU_FPR24, a0
0045     st_d    25, VCPU_FPR25, a0
0046     st_d    26, VCPU_FPR26, a0
0047     st_d    27, VCPU_FPR27, a0
0048     st_d    28, VCPU_FPR28, a0
0049     st_d    29, VCPU_FPR29, a0
0050     st_d    30, VCPU_FPR30, a0
0051     st_d    31, VCPU_FPR31, a0
0052     jr  ra
0053      nop
0054     END(__kvm_save_msa)
0055 
0056 LEAF(__kvm_restore_msa)
0057     ld_d    0,  VCPU_FPR0,  a0
0058     ld_d    1,  VCPU_FPR1,  a0
0059     ld_d    2,  VCPU_FPR2,  a0
0060     ld_d    3,  VCPU_FPR3,  a0
0061     ld_d    4,  VCPU_FPR4,  a0
0062     ld_d    5,  VCPU_FPR5,  a0
0063     ld_d    6,  VCPU_FPR6,  a0
0064     ld_d    7,  VCPU_FPR7,  a0
0065     ld_d    8,  VCPU_FPR8,  a0
0066     ld_d    9,  VCPU_FPR9,  a0
0067     ld_d    10, VCPU_FPR10, a0
0068     ld_d    11, VCPU_FPR11, a0
0069     ld_d    12, VCPU_FPR12, a0
0070     ld_d    13, VCPU_FPR13, a0
0071     ld_d    14, VCPU_FPR14, a0
0072     ld_d    15, VCPU_FPR15, a0
0073     ld_d    16, VCPU_FPR16, a0
0074     ld_d    17, VCPU_FPR17, a0
0075     ld_d    18, VCPU_FPR18, a0
0076     ld_d    19, VCPU_FPR19, a0
0077     ld_d    20, VCPU_FPR20, a0
0078     ld_d    21, VCPU_FPR21, a0
0079     ld_d    22, VCPU_FPR22, a0
0080     ld_d    23, VCPU_FPR23, a0
0081     ld_d    24, VCPU_FPR24, a0
0082     ld_d    25, VCPU_FPR25, a0
0083     ld_d    26, VCPU_FPR26, a0
0084     ld_d    27, VCPU_FPR27, a0
0085     ld_d    28, VCPU_FPR28, a0
0086     ld_d    29, VCPU_FPR29, a0
0087     ld_d    30, VCPU_FPR30, a0
0088     ld_d    31, VCPU_FPR31, a0
0089     jr  ra
0090      nop
0091     END(__kvm_restore_msa)
0092 
0093     .macro  kvm_restore_msa_upper   wr, off, base
0094     .set    push
0095     .set    noat
0096 #ifdef CONFIG_64BIT
0097     ld  $1, \off(\base)
0098     insert_d \wr, 1
0099 #elif defined(CONFIG_CPU_LITTLE_ENDIAN)
0100     lw  $1, \off(\base)
0101     insert_w \wr, 2
0102     lw  $1, (\off+4)(\base)
0103     insert_w \wr, 3
0104 #else /* CONFIG_CPU_BIG_ENDIAN */
0105     lw  $1, (\off+4)(\base)
0106     insert_w \wr, 2
0107     lw  $1, \off(\base)
0108     insert_w \wr, 3
0109 #endif
0110     .set    pop
0111     .endm
0112 
0113 LEAF(__kvm_restore_msa_upper)
0114     kvm_restore_msa_upper   0,  VCPU_FPR0 +8, a0
0115     kvm_restore_msa_upper   1,  VCPU_FPR1 +8, a0
0116     kvm_restore_msa_upper   2,  VCPU_FPR2 +8, a0
0117     kvm_restore_msa_upper   3,  VCPU_FPR3 +8, a0
0118     kvm_restore_msa_upper   4,  VCPU_FPR4 +8, a0
0119     kvm_restore_msa_upper   5,  VCPU_FPR5 +8, a0
0120     kvm_restore_msa_upper   6,  VCPU_FPR6 +8, a0
0121     kvm_restore_msa_upper   7,  VCPU_FPR7 +8, a0
0122     kvm_restore_msa_upper   8,  VCPU_FPR8 +8, a0
0123     kvm_restore_msa_upper   9,  VCPU_FPR9 +8, a0
0124     kvm_restore_msa_upper   10, VCPU_FPR10+8, a0
0125     kvm_restore_msa_upper   11, VCPU_FPR11+8, a0
0126     kvm_restore_msa_upper   12, VCPU_FPR12+8, a0
0127     kvm_restore_msa_upper   13, VCPU_FPR13+8, a0
0128     kvm_restore_msa_upper   14, VCPU_FPR14+8, a0
0129     kvm_restore_msa_upper   15, VCPU_FPR15+8, a0
0130     kvm_restore_msa_upper   16, VCPU_FPR16+8, a0
0131     kvm_restore_msa_upper   17, VCPU_FPR17+8, a0
0132     kvm_restore_msa_upper   18, VCPU_FPR18+8, a0
0133     kvm_restore_msa_upper   19, VCPU_FPR19+8, a0
0134     kvm_restore_msa_upper   20, VCPU_FPR20+8, a0
0135     kvm_restore_msa_upper   21, VCPU_FPR21+8, a0
0136     kvm_restore_msa_upper   22, VCPU_FPR22+8, a0
0137     kvm_restore_msa_upper   23, VCPU_FPR23+8, a0
0138     kvm_restore_msa_upper   24, VCPU_FPR24+8, a0
0139     kvm_restore_msa_upper   25, VCPU_FPR25+8, a0
0140     kvm_restore_msa_upper   26, VCPU_FPR26+8, a0
0141     kvm_restore_msa_upper   27, VCPU_FPR27+8, a0
0142     kvm_restore_msa_upper   28, VCPU_FPR28+8, a0
0143     kvm_restore_msa_upper   29, VCPU_FPR29+8, a0
0144     kvm_restore_msa_upper   30, VCPU_FPR30+8, a0
0145     kvm_restore_msa_upper   31, VCPU_FPR31+8, a0
0146     jr  ra
0147      nop
0148     END(__kvm_restore_msa_upper)
0149 
0150 LEAF(__kvm_restore_msacsr)
0151     lw  t0, VCPU_MSA_CSR(a0)
0152     /*
0153      * The ctcmsa must stay at this offset in __kvm_restore_msacsr.
0154      * See kvm_mips_csr_die_notify() which handles t0 containing a value
0155      * which triggers an MSA FP Exception, which must be stepped over and
0156      * ignored since the set cause bits must remain there for the guest.
0157      */
0158     _ctcmsa MSA_CSR, t0
0159     jr  ra
0160      nop
0161     END(__kvm_restore_msacsr)