Back to home page

OSCL-LXR

 
 

    


0001 /*
0002 * This file is subject to the terms and conditions of the GNU General Public
0003 * License.  See the file "COPYING" in the main directory of this archive
0004 * for more details.
0005 *
0006 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
0007 * Authors: Sanjay Lal <sanjayl@kymasys.com>
0008 */
0009 
0010 #ifndef __MIPS_KVM_HOST_H__
0011 #define __MIPS_KVM_HOST_H__
0012 
0013 #include <linux/cpumask.h>
0014 #include <linux/mutex.h>
0015 #include <linux/hrtimer.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/types.h>
0018 #include <linux/kvm.h>
0019 #include <linux/kvm_types.h>
0020 #include <linux/threads.h>
0021 #include <linux/spinlock.h>
0022 
0023 #include <asm/asm.h>
0024 #include <asm/inst.h>
0025 #include <asm/mipsregs.h>
0026 
0027 #include <kvm/iodev.h>
0028 
0029 /* MIPS KVM register ids */
0030 #define MIPS_CP0_32(_R, _S)                 \
0031     (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
0032 
0033 #define MIPS_CP0_64(_R, _S)                 \
0034     (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
0035 
0036 #define KVM_REG_MIPS_CP0_INDEX      MIPS_CP0_32(0, 0)
0037 #define KVM_REG_MIPS_CP0_ENTRYLO0   MIPS_CP0_64(2, 0)
0038 #define KVM_REG_MIPS_CP0_ENTRYLO1   MIPS_CP0_64(3, 0)
0039 #define KVM_REG_MIPS_CP0_CONTEXT    MIPS_CP0_64(4, 0)
0040 #define KVM_REG_MIPS_CP0_CONTEXTCONFIG  MIPS_CP0_32(4, 1)
0041 #define KVM_REG_MIPS_CP0_USERLOCAL  MIPS_CP0_64(4, 2)
0042 #define KVM_REG_MIPS_CP0_XCONTEXTCONFIG MIPS_CP0_64(4, 3)
0043 #define KVM_REG_MIPS_CP0_PAGEMASK   MIPS_CP0_32(5, 0)
0044 #define KVM_REG_MIPS_CP0_PAGEGRAIN  MIPS_CP0_32(5, 1)
0045 #define KVM_REG_MIPS_CP0_SEGCTL0    MIPS_CP0_64(5, 2)
0046 #define KVM_REG_MIPS_CP0_SEGCTL1    MIPS_CP0_64(5, 3)
0047 #define KVM_REG_MIPS_CP0_SEGCTL2    MIPS_CP0_64(5, 4)
0048 #define KVM_REG_MIPS_CP0_PWBASE     MIPS_CP0_64(5, 5)
0049 #define KVM_REG_MIPS_CP0_PWFIELD    MIPS_CP0_64(5, 6)
0050 #define KVM_REG_MIPS_CP0_PWSIZE     MIPS_CP0_64(5, 7)
0051 #define KVM_REG_MIPS_CP0_WIRED      MIPS_CP0_32(6, 0)
0052 #define KVM_REG_MIPS_CP0_PWCTL      MIPS_CP0_32(6, 6)
0053 #define KVM_REG_MIPS_CP0_HWRENA     MIPS_CP0_32(7, 0)
0054 #define KVM_REG_MIPS_CP0_BADVADDR   MIPS_CP0_64(8, 0)
0055 #define KVM_REG_MIPS_CP0_BADINSTR   MIPS_CP0_32(8, 1)
0056 #define KVM_REG_MIPS_CP0_BADINSTRP  MIPS_CP0_32(8, 2)
0057 #define KVM_REG_MIPS_CP0_COUNT      MIPS_CP0_32(9, 0)
0058 #define KVM_REG_MIPS_CP0_ENTRYHI    MIPS_CP0_64(10, 0)
0059 #define KVM_REG_MIPS_CP0_COMPARE    MIPS_CP0_32(11, 0)
0060 #define KVM_REG_MIPS_CP0_STATUS     MIPS_CP0_32(12, 0)
0061 #define KVM_REG_MIPS_CP0_INTCTL     MIPS_CP0_32(12, 1)
0062 #define KVM_REG_MIPS_CP0_CAUSE      MIPS_CP0_32(13, 0)
0063 #define KVM_REG_MIPS_CP0_EPC        MIPS_CP0_64(14, 0)
0064 #define KVM_REG_MIPS_CP0_PRID       MIPS_CP0_32(15, 0)
0065 #define KVM_REG_MIPS_CP0_EBASE      MIPS_CP0_64(15, 1)
0066 #define KVM_REG_MIPS_CP0_CONFIG     MIPS_CP0_32(16, 0)
0067 #define KVM_REG_MIPS_CP0_CONFIG1    MIPS_CP0_32(16, 1)
0068 #define KVM_REG_MIPS_CP0_CONFIG2    MIPS_CP0_32(16, 2)
0069 #define KVM_REG_MIPS_CP0_CONFIG3    MIPS_CP0_32(16, 3)
0070 #define KVM_REG_MIPS_CP0_CONFIG4    MIPS_CP0_32(16, 4)
0071 #define KVM_REG_MIPS_CP0_CONFIG5    MIPS_CP0_32(16, 5)
0072 #define KVM_REG_MIPS_CP0_CONFIG6    MIPS_CP0_32(16, 6)
0073 #define KVM_REG_MIPS_CP0_CONFIG7    MIPS_CP0_32(16, 7)
0074 #define KVM_REG_MIPS_CP0_MAARI      MIPS_CP0_64(17, 2)
0075 #define KVM_REG_MIPS_CP0_XCONTEXT   MIPS_CP0_64(20, 0)
0076 #define KVM_REG_MIPS_CP0_DIAG       MIPS_CP0_32(22, 0)
0077 #define KVM_REG_MIPS_CP0_ERROREPC   MIPS_CP0_64(30, 0)
0078 #define KVM_REG_MIPS_CP0_KSCRATCH1  MIPS_CP0_64(31, 2)
0079 #define KVM_REG_MIPS_CP0_KSCRATCH2  MIPS_CP0_64(31, 3)
0080 #define KVM_REG_MIPS_CP0_KSCRATCH3  MIPS_CP0_64(31, 4)
0081 #define KVM_REG_MIPS_CP0_KSCRATCH4  MIPS_CP0_64(31, 5)
0082 #define KVM_REG_MIPS_CP0_KSCRATCH5  MIPS_CP0_64(31, 6)
0083 #define KVM_REG_MIPS_CP0_KSCRATCH6  MIPS_CP0_64(31, 7)
0084 
0085 
0086 #define KVM_MAX_VCPUS       16
0087 
0088 #define KVM_HALT_POLL_NS_DEFAULT 500000
0089 
0090 extern unsigned long GUESTID_MASK;
0091 extern unsigned long GUESTID_FIRST_VERSION;
0092 extern unsigned long GUESTID_VERSION_MASK;
0093 
0094 #define KVM_INVALID_ADDR        0xdeadbeef
0095 
0096 /*
0097  * EVA has overlapping user & kernel address spaces, so user VAs may be >
0098  * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
0099  * PAGE_OFFSET.
0100  */
0101 
0102 #define KVM_HVA_ERR_BAD         (-1UL)
0103 #define KVM_HVA_ERR_RO_BAD      (-2UL)
0104 
0105 static inline bool kvm_is_error_hva(unsigned long addr)
0106 {
0107     return IS_ERR_VALUE(addr);
0108 }
0109 
0110 struct kvm_vm_stat {
0111     struct kvm_vm_stat_generic generic;
0112 };
0113 
0114 struct kvm_vcpu_stat {
0115     struct kvm_vcpu_stat_generic generic;
0116     u64 wait_exits;
0117     u64 cache_exits;
0118     u64 signal_exits;
0119     u64 int_exits;
0120     u64 cop_unusable_exits;
0121     u64 tlbmod_exits;
0122     u64 tlbmiss_ld_exits;
0123     u64 tlbmiss_st_exits;
0124     u64 addrerr_st_exits;
0125     u64 addrerr_ld_exits;
0126     u64 syscall_exits;
0127     u64 resvd_inst_exits;
0128     u64 break_inst_exits;
0129     u64 trap_inst_exits;
0130     u64 msa_fpe_exits;
0131     u64 fpe_exits;
0132     u64 msa_disabled_exits;
0133     u64 flush_dcache_exits;
0134     u64 vz_gpsi_exits;
0135     u64 vz_gsfc_exits;
0136     u64 vz_hc_exits;
0137     u64 vz_grr_exits;
0138     u64 vz_gva_exits;
0139     u64 vz_ghfc_exits;
0140     u64 vz_gpa_exits;
0141     u64 vz_resvd_exits;
0142 #ifdef CONFIG_CPU_LOONGSON64
0143     u64 vz_cpucfg_exits;
0144 #endif
0145 };
0146 
0147 struct kvm_arch_memory_slot {
0148 };
0149 
0150 #ifdef CONFIG_CPU_LOONGSON64
0151 struct ipi_state {
0152     uint32_t status;
0153     uint32_t en;
0154     uint32_t set;
0155     uint32_t clear;
0156     uint64_t buf[4];
0157 };
0158 
0159 struct loongson_kvm_ipi;
0160 
0161 struct ipi_io_device {
0162     int node_id;
0163     struct loongson_kvm_ipi *ipi;
0164     struct kvm_io_device device;
0165 };
0166 
0167 struct loongson_kvm_ipi {
0168     spinlock_t lock;
0169     struct kvm *kvm;
0170     struct ipi_state ipistate[16];
0171     struct ipi_io_device dev_ipi[4];
0172 };
0173 #endif
0174 
0175 struct kvm_arch {
0176     /* Guest physical mm */
0177     struct mm_struct gpa_mm;
0178     /* Mask of CPUs needing GPA ASID flush */
0179     cpumask_t asid_flush_mask;
0180 #ifdef CONFIG_CPU_LOONGSON64
0181     struct loongson_kvm_ipi ipi;
0182 #endif
0183 };
0184 
0185 #define N_MIPS_COPROC_REGS  32
0186 #define N_MIPS_COPROC_SEL   8
0187 
0188 struct mips_coproc {
0189     unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
0190 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
0191     unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
0192 #endif
0193 };
0194 
0195 /*
0196  * Coprocessor 0 register names
0197  */
0198 #define MIPS_CP0_TLB_INDEX  0
0199 #define MIPS_CP0_TLB_RANDOM 1
0200 #define MIPS_CP0_TLB_LOW    2
0201 #define MIPS_CP0_TLB_LO0    2
0202 #define MIPS_CP0_TLB_LO1    3
0203 #define MIPS_CP0_TLB_CONTEXT    4
0204 #define MIPS_CP0_TLB_PG_MASK    5
0205 #define MIPS_CP0_TLB_WIRED  6
0206 #define MIPS_CP0_HWRENA     7
0207 #define MIPS_CP0_BAD_VADDR  8
0208 #define MIPS_CP0_COUNT      9
0209 #define MIPS_CP0_TLB_HI     10
0210 #define MIPS_CP0_COMPARE    11
0211 #define MIPS_CP0_STATUS     12
0212 #define MIPS_CP0_CAUSE      13
0213 #define MIPS_CP0_EXC_PC     14
0214 #define MIPS_CP0_PRID       15
0215 #define MIPS_CP0_CONFIG     16
0216 #define MIPS_CP0_LLADDR     17
0217 #define MIPS_CP0_WATCH_LO   18
0218 #define MIPS_CP0_WATCH_HI   19
0219 #define MIPS_CP0_TLB_XCONTEXT   20
0220 #define MIPS_CP0_DIAG       22
0221 #define MIPS_CP0_ECC        26
0222 #define MIPS_CP0_CACHE_ERR  27
0223 #define MIPS_CP0_TAG_LO     28
0224 #define MIPS_CP0_TAG_HI     29
0225 #define MIPS_CP0_ERROR_PC   30
0226 #define MIPS_CP0_DEBUG      23
0227 #define MIPS_CP0_DEPC       24
0228 #define MIPS_CP0_PERFCNT    25
0229 #define MIPS_CP0_ERRCTL     26
0230 #define MIPS_CP0_DATA_LO    28
0231 #define MIPS_CP0_DATA_HI    29
0232 #define MIPS_CP0_DESAVE     31
0233 
0234 #define MIPS_CP0_CONFIG_SEL 0
0235 #define MIPS_CP0_CONFIG1_SEL    1
0236 #define MIPS_CP0_CONFIG2_SEL    2
0237 #define MIPS_CP0_CONFIG3_SEL    3
0238 #define MIPS_CP0_CONFIG4_SEL    4
0239 #define MIPS_CP0_CONFIG5_SEL    5
0240 
0241 #define MIPS_CP0_GUESTCTL2  10
0242 #define MIPS_CP0_GUESTCTL2_SEL  5
0243 #define MIPS_CP0_GTOFFSET   12
0244 #define MIPS_CP0_GTOFFSET_SEL   7
0245 
0246 /* Resume Flags */
0247 #define RESUME_FLAG_DR      (1<<0)  /* Reload guest nonvolatile state? */
0248 #define RESUME_FLAG_HOST    (1<<1)  /* Resume host? */
0249 
0250 #define RESUME_GUEST        0
0251 #define RESUME_GUEST_DR     RESUME_FLAG_DR
0252 #define RESUME_HOST     RESUME_FLAG_HOST
0253 
0254 enum emulation_result {
0255     EMULATE_DONE,       /* no further processing */
0256     EMULATE_DO_MMIO,    /* kvm_run filled with MMIO request */
0257     EMULATE_FAIL,       /* can't emulate this instruction */
0258     EMULATE_WAIT,       /* WAIT instruction */
0259     EMULATE_PRIV_FAIL,
0260     EMULATE_EXCEPT,     /* A guest exception has been generated */
0261     EMULATE_HYPERCALL,  /* HYPCALL instruction */
0262 };
0263 
0264 #if defined(CONFIG_64BIT)
0265 #define VPN2_MASK       GENMASK(cpu_vmbits - 1, 13)
0266 #else
0267 #define VPN2_MASK       0xffffe000
0268 #endif
0269 #define KVM_ENTRYHI_ASID    cpu_asid_mask(&boot_cpu_data)
0270 #define TLB_IS_GLOBAL(x)    ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
0271 #define TLB_VPN2(x)     ((x).tlb_hi & VPN2_MASK)
0272 #define TLB_ASID(x)     ((x).tlb_hi & KVM_ENTRYHI_ASID)
0273 #define TLB_LO_IDX(x, va)   (((va) >> PAGE_SHIFT) & 1)
0274 #define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
0275 #define TLB_IS_DIRTY(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D)
0276 #define TLB_HI_VPN2_HIT(x, y)   ((TLB_VPN2(x) & ~(x).tlb_mask) ==   \
0277                  ((y) & VPN2_MASK & ~(x).tlb_mask))
0278 #define TLB_HI_ASID_HIT(x, y)   (TLB_IS_GLOBAL(x) ||            \
0279                  TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
0280 
0281 struct kvm_mips_tlb {
0282     long tlb_mask;
0283     long tlb_hi;
0284     long tlb_lo[2];
0285 };
0286 
0287 #define KVM_MIPS_AUX_FPU    0x1
0288 #define KVM_MIPS_AUX_MSA    0x2
0289 
0290 struct kvm_vcpu_arch {
0291     void *guest_ebase;
0292     int (*vcpu_run)(struct kvm_vcpu *vcpu);
0293 
0294     /* Host registers preserved across guest mode execution */
0295     unsigned long host_stack;
0296     unsigned long host_gp;
0297     unsigned long host_pgd;
0298     unsigned long host_entryhi;
0299 
0300     /* Host CP0 registers used when handling exits from guest */
0301     unsigned long host_cp0_badvaddr;
0302     unsigned long host_cp0_epc;
0303     u32 host_cp0_cause;
0304     u32 host_cp0_guestctl0;
0305     u32 host_cp0_badinstr;
0306     u32 host_cp0_badinstrp;
0307 
0308     /* GPRS */
0309     unsigned long gprs[32];
0310     unsigned long hi;
0311     unsigned long lo;
0312     unsigned long pc;
0313 
0314     /* FPU State */
0315     struct mips_fpu_struct fpu;
0316     /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
0317     unsigned int aux_inuse;
0318 
0319     /* COP0 State */
0320     struct mips_coproc *cop0;
0321 
0322     /* Resume PC after MMIO completion */
0323     unsigned long io_pc;
0324     /* GPR used as IO source/target */
0325     u32 io_gpr;
0326 
0327     struct hrtimer comparecount_timer;
0328     /* Count timer control KVM register */
0329     u32 count_ctl;
0330     /* Count bias from the raw time */
0331     u32 count_bias;
0332     /* Frequency of timer in Hz */
0333     u32 count_hz;
0334     /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
0335     s64 count_dyn_bias;
0336     /* Resume time */
0337     ktime_t count_resume;
0338     /* Period of timer tick in ns */
0339     u64 count_period;
0340 
0341     /* Bitmask of exceptions that are pending */
0342     unsigned long pending_exceptions;
0343 
0344     /* Bitmask of pending exceptions to be cleared */
0345     unsigned long pending_exceptions_clr;
0346 
0347     /* Cache some mmu pages needed inside spinlock regions */
0348     struct kvm_mmu_memory_cache mmu_page_cache;
0349 
0350     /* vcpu's vzguestid is different on each host cpu in an smp system */
0351     u32 vzguestid[NR_CPUS];
0352 
0353     /* wired guest TLB entries */
0354     struct kvm_mips_tlb *wired_tlb;
0355     unsigned int wired_tlb_limit;
0356     unsigned int wired_tlb_used;
0357 
0358     /* emulated guest MAAR registers */
0359     unsigned long maar[6];
0360 
0361     /* Last CPU the VCPU state was loaded on */
0362     int last_sched_cpu;
0363     /* Last CPU the VCPU actually executed guest code on */
0364     int last_exec_cpu;
0365 
0366     /* WAIT executed */
0367     int wait;
0368 
0369     u8 fpu_enabled;
0370     u8 msa_enabled;
0371 };
0372 
0373 static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
0374                         unsigned long val)
0375 {
0376     unsigned long temp;
0377     do {
0378         __asm__ __volatile__(
0379         "   .set    push                \n"
0380         "   .set    "MIPS_ISA_ARCH_LEVEL"       \n"
0381         "   "__stringify(LONG_LL)   " %0, %1    \n"
0382         "   or  %0, %2              \n"
0383         "   "__stringify(LONG_SC)   " %0, %1    \n"
0384         "   .set    pop             \n"
0385         : "=&r" (temp), "+m" (*reg)
0386         : "r" (val));
0387     } while (unlikely(!temp));
0388 }
0389 
0390 static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
0391                           unsigned long val)
0392 {
0393     unsigned long temp;
0394     do {
0395         __asm__ __volatile__(
0396         "   .set    push                \n"
0397         "   .set    "MIPS_ISA_ARCH_LEVEL"       \n"
0398         "   "__stringify(LONG_LL)   " %0, %1    \n"
0399         "   and %0, %2              \n"
0400         "   "__stringify(LONG_SC)   " %0, %1    \n"
0401         "   .set    pop             \n"
0402         : "=&r" (temp), "+m" (*reg)
0403         : "r" (~val));
0404     } while (unlikely(!temp));
0405 }
0406 
0407 static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
0408                            unsigned long change,
0409                            unsigned long val)
0410 {
0411     unsigned long temp;
0412     do {
0413         __asm__ __volatile__(
0414         "   .set    push                \n"
0415         "   .set    "MIPS_ISA_ARCH_LEVEL"       \n"
0416         "   "__stringify(LONG_LL)   " %0, %1    \n"
0417         "   and %0, %2              \n"
0418         "   or  %0, %3              \n"
0419         "   "__stringify(LONG_SC)   " %0, %1    \n"
0420         "   .set    pop             \n"
0421         : "=&r" (temp), "+m" (*reg)
0422         : "r" (~change), "r" (val & change));
0423     } while (unlikely(!temp));
0424 }
0425 
0426 /* Guest register types, used in accessor build below */
0427 #define __KVMT32    u32
0428 #define __KVMTl unsigned long
0429 
0430 /*
0431  * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
0432  * These operate on the saved guest C0 state in RAM.
0433  */
0434 
0435 /* Generate saved context simple accessors */
0436 #define __BUILD_KVM_RW_SAVED(name, type, _reg, sel)         \
0437 static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
0438 {                                   \
0439     return cop0->reg[(_reg)][(sel)];                \
0440 }                                   \
0441 static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0,    \
0442                        __KVMT##type val)        \
0443 {                                   \
0444     cop0->reg[(_reg)][(sel)] = val;                 \
0445 }
0446 
0447 /* Generate saved context bitwise modifiers */
0448 #define __BUILD_KVM_SET_SAVED(name, type, _reg, sel)            \
0449 static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0,  \
0450                      __KVMT##type val)      \
0451 {                                   \
0452     cop0->reg[(_reg)][(sel)] |= val;                \
0453 }                                   \
0454 static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0,    \
0455                        __KVMT##type val)        \
0456 {                                   \
0457     cop0->reg[(_reg)][(sel)] &= ~val;               \
0458 }                                   \
0459 static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0,   \
0460                         __KVMT##type mask,      \
0461                         __KVMT##type val)       \
0462 {                                   \
0463     unsigned long _mask = mask;                 \
0464     cop0->reg[(_reg)][(sel)] &= ~_mask;             \
0465     cop0->reg[(_reg)][(sel)] |= val & _mask;            \
0466 }
0467 
0468 /* Generate saved context atomic bitwise modifiers */
0469 #define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel)         \
0470 static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0,  \
0471                      __KVMT##type val)      \
0472 {                                   \
0473     _kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val);   \
0474 }                                   \
0475 static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0,    \
0476                        __KVMT##type val)        \
0477 {                                   \
0478     _kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
0479 }                                   \
0480 static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0,   \
0481                         __KVMT##type mask,      \
0482                         __KVMT##type val)       \
0483 {                                   \
0484     _kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
0485                     val);               \
0486 }
0487 
0488 /*
0489  * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
0490  * These operate on the VZ guest C0 context in hardware.
0491  */
0492 
0493 /* Generate VZ guest context simple accessors */
0494 #define __BUILD_KVM_RW_VZ(name, type, _reg, sel)            \
0495 static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
0496 {                                   \
0497     return read_gc0_##name();                   \
0498 }                                   \
0499 static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0,    \
0500                        __KVMT##type val)        \
0501 {                                   \
0502     write_gc0_##name(val);                      \
0503 }
0504 
0505 /* Generate VZ guest context bitwise modifiers */
0506 #define __BUILD_KVM_SET_VZ(name, type, _reg, sel)           \
0507 static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0,  \
0508                      __KVMT##type val)      \
0509 {                                   \
0510     set_gc0_##name(val);                        \
0511 }                                   \
0512 static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0,    \
0513                        __KVMT##type val)        \
0514 {                                   \
0515     clear_gc0_##name(val);                      \
0516 }                                   \
0517 static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0,   \
0518                         __KVMT##type mask,      \
0519                         __KVMT##type val)       \
0520 {                                   \
0521     change_gc0_##name(mask, val);                   \
0522 }
0523 
0524 /* Generate VZ guest context save/restore to/from saved context */
0525 #define __BUILD_KVM_SAVE_VZ(name, _reg, sel)            \
0526 static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \
0527 {                                   \
0528     write_gc0_##name(cop0->reg[(_reg)][(sel)]);         \
0529 }                                   \
0530 static inline void kvm_save_gc0_##name(struct mips_coproc *cop0)    \
0531 {                                   \
0532     cop0->reg[(_reg)][(sel)] = read_gc0_##name();           \
0533 }
0534 
0535 /*
0536  * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
0537  * These wrap a set of operations to provide them with a different name.
0538  */
0539 
0540 /* Generate simple accessor wrapper */
0541 #define __BUILD_KVM_RW_WRAP(name1, name2, type)             \
0542 static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0)   \
0543 {                                   \
0544     return kvm_read_##name2(cop0);                  \
0545 }                                   \
0546 static inline void kvm_write_##name1(struct mips_coproc *cop0,      \
0547                      __KVMT##type val)          \
0548 {                                   \
0549     kvm_write_##name2(cop0, val);                   \
0550 }
0551 
0552 /* Generate bitwise modifier wrapper */
0553 #define __BUILD_KVM_SET_WRAP(name1, name2, type)            \
0554 static inline void kvm_set_##name1(struct mips_coproc *cop0,        \
0555                    __KVMT##type val)            \
0556 {                                   \
0557     kvm_set_##name2(cop0, val);                 \
0558 }                                   \
0559 static inline void kvm_clear_##name1(struct mips_coproc *cop0,      \
0560                      __KVMT##type val)          \
0561 {                                   \
0562     kvm_clear_##name2(cop0, val);                   \
0563 }                                   \
0564 static inline void kvm_change_##name1(struct mips_coproc *cop0,     \
0565                       __KVMT##type mask,        \
0566                       __KVMT##type val)         \
0567 {                                   \
0568     kvm_change_##name2(cop0, mask, val);                \
0569 }
0570 
0571 /*
0572  * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
0573  * These generate accessors operating on the saved context in RAM, and wrap them
0574  * with the common guest C0 accessors (for use by common emulation code).
0575  */
0576 
0577 #define __BUILD_KVM_RW_SW(name, type, _reg, sel)            \
0578     __BUILD_KVM_RW_SAVED(name, type, _reg, sel)         \
0579     __BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
0580 
0581 #define __BUILD_KVM_SET_SW(name, type, _reg, sel)           \
0582     __BUILD_KVM_SET_SAVED(name, type, _reg, sel)            \
0583     __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
0584 
0585 #define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel)            \
0586     __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel)         \
0587     __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
0588 
0589 /*
0590  * VZ (hardware assisted virtualisation)
0591  * These macros use the active guest state in VZ mode (hardware registers),
0592  */
0593 
0594 /*
0595  * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
0596  * These generate accessors operating on the VZ guest context in hardware, and
0597  * wrap them with the common guest C0 accessors (for use by common emulation
0598  * code).
0599  *
0600  * Accessors operating on the saved context in RAM are also generated to allow
0601  * convenient explicit saving and restoring of the state.
0602  */
0603 
0604 #define __BUILD_KVM_RW_HW(name, type, _reg, sel)            \
0605     __BUILD_KVM_RW_SAVED(name, type, _reg, sel)         \
0606     __BUILD_KVM_RW_VZ(name, type, _reg, sel)            \
0607     __BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type)   \
0608     __BUILD_KVM_SAVE_VZ(name, _reg, sel)
0609 
0610 #define __BUILD_KVM_SET_HW(name, type, _reg, sel)           \
0611     __BUILD_KVM_SET_SAVED(name, type, _reg, sel)            \
0612     __BUILD_KVM_SET_VZ(name, type, _reg, sel)           \
0613     __BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
0614 
0615 /*
0616  * We can't do atomic modifications of COP0 state if hardware can modify it.
0617  * Races must be handled explicitly.
0618  */
0619 #define __BUILD_KVM_ATOMIC_HW   __BUILD_KVM_SET_HW
0620 
0621 /*
0622  * Define accessors for CP0 registers that are accessible to the guest. These
0623  * are primarily used by common emulation code, which may need to access the
0624  * registers differently depending on the implementation.
0625  *
0626  *    fns_hw/sw    name     type    reg num         select
0627  */
0628 __BUILD_KVM_RW_HW(index,          32, MIPS_CP0_TLB_INDEX,    0)
0629 __BUILD_KVM_RW_HW(entrylo0,       l,  MIPS_CP0_TLB_LO0,      0)
0630 __BUILD_KVM_RW_HW(entrylo1,       l,  MIPS_CP0_TLB_LO1,      0)
0631 __BUILD_KVM_RW_HW(context,        l,  MIPS_CP0_TLB_CONTEXT,  0)
0632 __BUILD_KVM_RW_HW(contextconfig,  32, MIPS_CP0_TLB_CONTEXT,  1)
0633 __BUILD_KVM_RW_HW(userlocal,      l,  MIPS_CP0_TLB_CONTEXT,  2)
0634 __BUILD_KVM_RW_HW(xcontextconfig, l,  MIPS_CP0_TLB_CONTEXT,  3)
0635 __BUILD_KVM_RW_HW(pagemask,       l,  MIPS_CP0_TLB_PG_MASK,  0)
0636 __BUILD_KVM_RW_HW(pagegrain,      32, MIPS_CP0_TLB_PG_MASK,  1)
0637 __BUILD_KVM_RW_HW(segctl0,        l,  MIPS_CP0_TLB_PG_MASK,  2)
0638 __BUILD_KVM_RW_HW(segctl1,        l,  MIPS_CP0_TLB_PG_MASK,  3)
0639 __BUILD_KVM_RW_HW(segctl2,        l,  MIPS_CP0_TLB_PG_MASK,  4)
0640 __BUILD_KVM_RW_HW(pwbase,         l,  MIPS_CP0_TLB_PG_MASK,  5)
0641 __BUILD_KVM_RW_HW(pwfield,        l,  MIPS_CP0_TLB_PG_MASK,  6)
0642 __BUILD_KVM_RW_HW(pwsize,         l,  MIPS_CP0_TLB_PG_MASK,  7)
0643 __BUILD_KVM_RW_HW(wired,          32, MIPS_CP0_TLB_WIRED,    0)
0644 __BUILD_KVM_RW_HW(pwctl,          32, MIPS_CP0_TLB_WIRED,    6)
0645 __BUILD_KVM_RW_HW(hwrena,         32, MIPS_CP0_HWRENA,       0)
0646 __BUILD_KVM_RW_HW(badvaddr,       l,  MIPS_CP0_BAD_VADDR,    0)
0647 __BUILD_KVM_RW_HW(badinstr,       32, MIPS_CP0_BAD_VADDR,    1)
0648 __BUILD_KVM_RW_HW(badinstrp,      32, MIPS_CP0_BAD_VADDR,    2)
0649 __BUILD_KVM_RW_SW(count,          32, MIPS_CP0_COUNT,        0)
0650 __BUILD_KVM_RW_HW(entryhi,        l,  MIPS_CP0_TLB_HI,       0)
0651 __BUILD_KVM_RW_HW(compare,        32, MIPS_CP0_COMPARE,      0)
0652 __BUILD_KVM_RW_HW(status,         32, MIPS_CP0_STATUS,       0)
0653 __BUILD_KVM_RW_HW(intctl,         32, MIPS_CP0_STATUS,       1)
0654 __BUILD_KVM_RW_HW(cause,          32, MIPS_CP0_CAUSE,        0)
0655 __BUILD_KVM_RW_HW(epc,            l,  MIPS_CP0_EXC_PC,       0)
0656 __BUILD_KVM_RW_SW(prid,           32, MIPS_CP0_PRID,         0)
0657 __BUILD_KVM_RW_HW(ebase,          l,  MIPS_CP0_PRID,         1)
0658 __BUILD_KVM_RW_HW(config,         32, MIPS_CP0_CONFIG,       0)
0659 __BUILD_KVM_RW_HW(config1,        32, MIPS_CP0_CONFIG,       1)
0660 __BUILD_KVM_RW_HW(config2,        32, MIPS_CP0_CONFIG,       2)
0661 __BUILD_KVM_RW_HW(config3,        32, MIPS_CP0_CONFIG,       3)
0662 __BUILD_KVM_RW_HW(config4,        32, MIPS_CP0_CONFIG,       4)
0663 __BUILD_KVM_RW_HW(config5,        32, MIPS_CP0_CONFIG,       5)
0664 __BUILD_KVM_RW_HW(config6,        32, MIPS_CP0_CONFIG,       6)
0665 __BUILD_KVM_RW_HW(config7,        32, MIPS_CP0_CONFIG,       7)
0666 __BUILD_KVM_RW_SW(maari,          l,  MIPS_CP0_LLADDR,       2)
0667 __BUILD_KVM_RW_HW(xcontext,       l,  MIPS_CP0_TLB_XCONTEXT, 0)
0668 __BUILD_KVM_RW_HW(errorepc,       l,  MIPS_CP0_ERROR_PC,     0)
0669 __BUILD_KVM_RW_HW(kscratch1,      l,  MIPS_CP0_DESAVE,       2)
0670 __BUILD_KVM_RW_HW(kscratch2,      l,  MIPS_CP0_DESAVE,       3)
0671 __BUILD_KVM_RW_HW(kscratch3,      l,  MIPS_CP0_DESAVE,       4)
0672 __BUILD_KVM_RW_HW(kscratch4,      l,  MIPS_CP0_DESAVE,       5)
0673 __BUILD_KVM_RW_HW(kscratch5,      l,  MIPS_CP0_DESAVE,       6)
0674 __BUILD_KVM_RW_HW(kscratch6,      l,  MIPS_CP0_DESAVE,       7)
0675 
0676 /* Bitwise operations (on HW state) */
0677 __BUILD_KVM_SET_HW(status,        32, MIPS_CP0_STATUS,       0)
0678 /* Cause can be modified asynchronously from hardirq hrtimer callback */
0679 __BUILD_KVM_ATOMIC_HW(cause,      32, MIPS_CP0_CAUSE,        0)
0680 __BUILD_KVM_SET_HW(ebase,         l,  MIPS_CP0_PRID,         1)
0681 
0682 /* Bitwise operations (on saved state) */
0683 __BUILD_KVM_SET_SAVED(config,     32, MIPS_CP0_CONFIG,       0)
0684 __BUILD_KVM_SET_SAVED(config1,    32, MIPS_CP0_CONFIG,       1)
0685 __BUILD_KVM_SET_SAVED(config2,    32, MIPS_CP0_CONFIG,       2)
0686 __BUILD_KVM_SET_SAVED(config3,    32, MIPS_CP0_CONFIG,       3)
0687 __BUILD_KVM_SET_SAVED(config4,    32, MIPS_CP0_CONFIG,       4)
0688 __BUILD_KVM_SET_SAVED(config5,    32, MIPS_CP0_CONFIG,       5)
0689 
0690 /* Helpers */
0691 
0692 static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
0693 {
0694     return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
0695         vcpu->fpu_enabled;
0696 }
0697 
0698 static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
0699 {
0700     return kvm_mips_guest_can_have_fpu(vcpu) &&
0701         kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
0702 }
0703 
0704 static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
0705 {
0706     return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
0707         vcpu->msa_enabled;
0708 }
0709 
0710 static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
0711 {
0712     return kvm_mips_guest_can_have_msa(vcpu) &&
0713         kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
0714 }
0715 
0716 struct kvm_mips_callbacks {
0717     int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
0718     int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
0719     int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
0720     int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
0721     int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
0722     int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
0723     int (*handle_syscall)(struct kvm_vcpu *vcpu);
0724     int (*handle_res_inst)(struct kvm_vcpu *vcpu);
0725     int (*handle_break)(struct kvm_vcpu *vcpu);
0726     int (*handle_trap)(struct kvm_vcpu *vcpu);
0727     int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
0728     int (*handle_fpe)(struct kvm_vcpu *vcpu);
0729     int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
0730     int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
0731     int (*hardware_enable)(void);
0732     void (*hardware_disable)(void);
0733     int (*check_extension)(struct kvm *kvm, long ext);
0734     int (*vcpu_init)(struct kvm_vcpu *vcpu);
0735     void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
0736     int (*vcpu_setup)(struct kvm_vcpu *vcpu);
0737     void (*prepare_flush_shadow)(struct kvm *kvm);
0738     gpa_t (*gva_to_gpa)(gva_t gva);
0739     void (*queue_timer_int)(struct kvm_vcpu *vcpu);
0740     void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
0741     void (*queue_io_int)(struct kvm_vcpu *vcpu,
0742                  struct kvm_mips_interrupt *irq);
0743     void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
0744                    struct kvm_mips_interrupt *irq);
0745     int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
0746                u32 cause);
0747     int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
0748              u32 cause);
0749     unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
0750     int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
0751     int (*get_one_reg)(struct kvm_vcpu *vcpu,
0752                const struct kvm_one_reg *reg, s64 *v);
0753     int (*set_one_reg)(struct kvm_vcpu *vcpu,
0754                const struct kvm_one_reg *reg, s64 v);
0755     int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
0756     int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
0757     int (*vcpu_run)(struct kvm_vcpu *vcpu);
0758     void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
0759 };
0760 extern struct kvm_mips_callbacks *kvm_mips_callbacks;
0761 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
0762 
0763 /* Debug: dump vcpu state */
0764 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
0765 
0766 extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
0767 
0768 /* Building of entry/exception code */
0769 int kvm_mips_entry_setup(void);
0770 void *kvm_mips_build_vcpu_run(void *addr);
0771 void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler);
0772 void *kvm_mips_build_exception(void *addr, void *handler);
0773 void *kvm_mips_build_exit(void *addr);
0774 
0775 /* FPU/MSA context management */
0776 void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
0777 void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
0778 void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
0779 void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
0780 void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
0781 void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
0782 void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
0783 void kvm_own_fpu(struct kvm_vcpu *vcpu);
0784 void kvm_own_msa(struct kvm_vcpu *vcpu);
0785 void kvm_drop_fpu(struct kvm_vcpu *vcpu);
0786 void kvm_lose_fpu(struct kvm_vcpu *vcpu);
0787 
0788 /* TLB handling */
0789 int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
0790                       struct kvm_vcpu *vcpu, bool write_fault);
0791 
0792 int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
0793 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
0794                 unsigned long *gpa);
0795 void kvm_vz_local_flush_roottlb_all_guests(void);
0796 void kvm_vz_local_flush_guesttlb_all(void);
0797 void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
0798               unsigned int count);
0799 void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
0800               unsigned int count);
0801 #ifdef CONFIG_CPU_LOONGSON64
0802 void kvm_loongson_clear_guest_vtlb(void);
0803 void kvm_loongson_clear_guest_ftlb(void);
0804 #endif
0805 
0806 /* MMU handling */
0807 
0808 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
0809 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
0810 pgd_t *kvm_pgd_alloc(void);
0811 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
0812 
0813 #define KVM_ARCH_WANT_MMU_NOTIFIER
0814 
0815 /* Emulation */
0816 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
0817 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
0818 int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
0819 
0820 /**
0821  * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
0822  * @vcpu:   Virtual CPU.
0823  *
0824  * Returns: Whether the TLBL exception was likely due to an instruction
0825  *      fetch fault rather than a data load fault.
0826  */
0827 static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
0828 {
0829     unsigned long badvaddr = vcpu->host_cp0_badvaddr;
0830     unsigned long epc = msk_isa16_mode(vcpu->pc);
0831     u32 cause = vcpu->host_cp0_cause;
0832 
0833     if (epc == badvaddr)
0834         return true;
0835 
0836     /*
0837      * Branches may be 32-bit or 16-bit instructions.
0838      * This isn't exact, but we don't really support MIPS16 or microMIPS yet
0839      * in KVM anyway.
0840      */
0841     if ((cause & CAUSEF_BD) && badvaddr - epc <= 4)
0842         return true;
0843 
0844     return false;
0845 }
0846 
0847 extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu);
0848 
0849 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
0850 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
0851 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
0852 void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
0853 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
0854 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
0855 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
0856 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
0857 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
0858 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
0859 
0860 /* fairly internal functions requiring some care to use */
0861 int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
0862 ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
0863 int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
0864                  u32 count, int min_drift);
0865 
0866 void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
0867 void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
0868 
0869 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
0870                          u32 cause,
0871                          struct kvm_vcpu *vcpu);
0872 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
0873                         u32 cause,
0874                         struct kvm_vcpu *vcpu);
0875 
0876 /* COP0 */
0877 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
0878 
0879 /* Hypercalls (hypcall.c) */
0880 
0881 enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
0882                         union mips_instruction inst);
0883 int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
0884 
0885 /* Misc */
0886 extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
0887 extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
0888 extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
0889                  struct kvm_mips_interrupt *irq);
0890 
0891 static inline void kvm_arch_hardware_unsetup(void) {}
0892 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
0893 static inline void kvm_arch_free_memslot(struct kvm *kvm,
0894                      struct kvm_memory_slot *slot) {}
0895 static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
0896 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
0897 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
0898 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
0899 
0900 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
0901 int kvm_arch_flush_remote_tlb(struct kvm *kvm);
0902 
0903 #endif /* __MIPS_KVM_HOST_H__ */