Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *
0004  * Copyright IBM Corp. 2007
0005  *
0006  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
0007  */
0008 
0009 #ifndef __POWERPC_KVM_HOST_H__
0010 #define __POWERPC_KVM_HOST_H__
0011 
0012 #include <linux/mutex.h>
0013 #include <linux/hrtimer.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/types.h>
0016 #include <linux/kvm_types.h>
0017 #include <linux/threads.h>
0018 #include <linux/spinlock.h>
0019 #include <linux/kvm_para.h>
0020 #include <linux/list.h>
0021 #include <linux/atomic.h>
0022 #include <asm/kvm_asm.h>
0023 #include <asm/processor.h>
0024 #include <asm/page.h>
0025 #include <asm/cacheflush.h>
0026 #include <asm/hvcall.h>
0027 #include <asm/mce.h>
0028 
0029 #define __KVM_HAVE_ARCH_VCPU_DEBUGFS
0030 
0031 #define KVM_MAX_VCPUS       NR_CPUS
0032 #define KVM_MAX_VCORES      NR_CPUS
0033 
0034 #include <asm/cputhreads.h>
0035 
0036 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0037 #include <asm/kvm_book3s_asm.h>     /* for MAX_SMT_THREADS */
0038 #define KVM_MAX_VCPU_IDS    (MAX_SMT_THREADS * KVM_MAX_VCORES)
0039 
0040 /*
0041  * Limit the nested partition table to 4096 entries (because that's what
0042  * hardware supports). Both guest and host use this value.
0043  */
0044 #define KVM_MAX_NESTED_GUESTS_SHIFT 12
0045 
0046 #else
0047 #define KVM_MAX_VCPU_IDS    KVM_MAX_VCPUS
0048 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
0049 
0050 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
0051 
0052 #define KVM_HALT_POLL_NS_DEFAULT 10000  /* 10 us */
0053 
0054 /* These values are internal and can be increased later */
0055 #define KVM_NR_IRQCHIPS          1
0056 #define KVM_IRQCHIP_NUM_PINS     256
0057 
0058 /* PPC-specific vcpu->requests bit members */
0059 #define KVM_REQ_WATCHDOG    KVM_ARCH_REQ(0)
0060 #define KVM_REQ_EPR_EXIT    KVM_ARCH_REQ(1)
0061 #define KVM_REQ_PENDING_TIMER   KVM_ARCH_REQ(2)
0062 
0063 #include <linux/mmu_notifier.h>
0064 
0065 #define KVM_ARCH_WANT_MMU_NOTIFIER
0066 
0067 #define HPTEG_CACHE_NUM         (1 << 15)
0068 #define HPTEG_HASH_BITS_PTE     13
0069 #define HPTEG_HASH_BITS_PTE_LONG    12
0070 #define HPTEG_HASH_BITS_VPTE        13
0071 #define HPTEG_HASH_BITS_VPTE_LONG   5
0072 #define HPTEG_HASH_BITS_VPTE_64K    11
0073 #define HPTEG_HASH_NUM_PTE      (1 << HPTEG_HASH_BITS_PTE)
0074 #define HPTEG_HASH_NUM_PTE_LONG     (1 << HPTEG_HASH_BITS_PTE_LONG)
0075 #define HPTEG_HASH_NUM_VPTE     (1 << HPTEG_HASH_BITS_VPTE)
0076 #define HPTEG_HASH_NUM_VPTE_LONG    (1 << HPTEG_HASH_BITS_VPTE_LONG)
0077 #define HPTEG_HASH_NUM_VPTE_64K     (1 << HPTEG_HASH_BITS_VPTE_64K)
0078 
0079 /* Physical Address Mask - allowed range of real mode RAM access */
0080 #define KVM_PAM         0x0fffffffffffffffULL
0081 
0082 struct lppaca;
0083 struct slb_shadow;
0084 struct dtl_entry;
0085 
0086 struct kvmppc_vcpu_book3s;
0087 struct kvmppc_book3s_shadow_vcpu;
0088 struct kvm_nested_guest;
0089 
0090 struct kvm_vm_stat {
0091     struct kvm_vm_stat_generic generic;
0092     u64 num_2M_pages;
0093     u64 num_1G_pages;
0094 };
0095 
0096 struct kvm_vcpu_stat {
0097     struct kvm_vcpu_stat_generic generic;
0098     u64 sum_exits;
0099     u64 mmio_exits;
0100     u64 signal_exits;
0101     u64 light_exits;
0102     /* Account for special types of light exits: */
0103     u64 itlb_real_miss_exits;
0104     u64 itlb_virt_miss_exits;
0105     u64 dtlb_real_miss_exits;
0106     u64 dtlb_virt_miss_exits;
0107     u64 syscall_exits;
0108     u64 isi_exits;
0109     u64 dsi_exits;
0110     u64 emulated_inst_exits;
0111     u64 dec_exits;
0112     u64 ext_intr_exits;
0113     u64 halt_successful_wait;
0114     u64 dbell_exits;
0115     u64 gdbell_exits;
0116     u64 ld;
0117     u64 st;
0118 #ifdef CONFIG_PPC_BOOK3S
0119     u64 pf_storage;
0120     u64 pf_instruc;
0121     u64 sp_storage;
0122     u64 sp_instruc;
0123     u64 queue_intr;
0124     u64 ld_slow;
0125     u64 st_slow;
0126 #endif
0127     u64 pthru_all;
0128     u64 pthru_host;
0129     u64 pthru_bad_aff;
0130 };
0131 
0132 enum kvm_exit_types {
0133     MMIO_EXITS,
0134     SIGNAL_EXITS,
0135     ITLB_REAL_MISS_EXITS,
0136     ITLB_VIRT_MISS_EXITS,
0137     DTLB_REAL_MISS_EXITS,
0138     DTLB_VIRT_MISS_EXITS,
0139     SYSCALL_EXITS,
0140     ISI_EXITS,
0141     DSI_EXITS,
0142     EMULATED_INST_EXITS,
0143     EMULATED_MTMSRWE_EXITS,
0144     EMULATED_WRTEE_EXITS,
0145     EMULATED_MTSPR_EXITS,
0146     EMULATED_MFSPR_EXITS,
0147     EMULATED_MTMSR_EXITS,
0148     EMULATED_MFMSR_EXITS,
0149     EMULATED_TLBSX_EXITS,
0150     EMULATED_TLBWE_EXITS,
0151     EMULATED_RFI_EXITS,
0152     EMULATED_RFCI_EXITS,
0153     EMULATED_RFDI_EXITS,
0154     DEC_EXITS,
0155     EXT_INTR_EXITS,
0156     HALT_WAKEUP,
0157     USR_PR_INST,
0158     FP_UNAVAIL,
0159     DEBUG_EXITS,
0160     TIMEINGUEST,
0161     DBELL_EXITS,
0162     GDBELL_EXITS,
0163     __NUMBER_OF_KVM_EXIT_TYPES
0164 };
0165 
0166 /* allow access to big endian 32bit upper/lower parts and 64bit var */
0167 struct kvmppc_exit_timing {
0168     union {
0169         u64 tv64;
0170         struct {
0171             u32 tbu, tbl;
0172         } tv32;
0173     };
0174 };
0175 
0176 struct kvmppc_pginfo {
0177     unsigned long pfn;
0178     atomic_t refcnt;
0179 };
0180 
0181 struct kvmppc_spapr_tce_iommu_table {
0182     struct rcu_head rcu;
0183     struct list_head next;
0184     struct iommu_table *tbl;
0185     struct kref kref;
0186 };
0187 
0188 #define TCES_PER_PAGE   (PAGE_SIZE / sizeof(u64))
0189 
0190 struct kvmppc_spapr_tce_table {
0191     struct list_head list;
0192     struct kvm *kvm;
0193     u64 liobn;
0194     struct rcu_head rcu;
0195     u32 page_shift;
0196     u64 offset;     /* in pages */
0197     u64 size;       /* window size in pages */
0198     struct list_head iommu_tables;
0199     struct mutex alloc_lock;
0200     struct page *pages[];
0201 };
0202 
0203 /* XICS components, defined in book3s_xics.c */
0204 struct kvmppc_xics;
0205 struct kvmppc_icp;
0206 extern struct kvm_device_ops kvm_xics_ops;
0207 
0208 /* XIVE components, defined in book3s_xive.c */
0209 struct kvmppc_xive;
0210 struct kvmppc_xive_vcpu;
0211 extern struct kvm_device_ops kvm_xive_ops;
0212 extern struct kvm_device_ops kvm_xive_native_ops;
0213 
0214 struct kvmppc_passthru_irqmap;
0215 
0216 /*
0217  * The reverse mapping array has one entry for each HPTE,
0218  * which stores the guest's view of the second word of the HPTE
0219  * (including the guest physical address of the mapping),
0220  * plus forward and backward pointers in a doubly-linked ring
0221  * of HPTEs that map the same host page.  The pointers in this
0222  * ring are 32-bit HPTE indexes, to save space.
0223  */
0224 struct revmap_entry {
0225     unsigned long guest_rpte;
0226     unsigned int forw, back;
0227 };
0228 
0229 /*
0230  * The rmap array of size number of guest pages is allocated for each memslot.
0231  * This array is used to store usage specific information about the guest page.
0232  * Below are the encodings of the various possible usage types.
0233  */
0234 /* Free bits which can be used to define a new usage */
0235 #define KVMPPC_RMAP_TYPE_MASK   0xff00000000000000
0236 #define KVMPPC_RMAP_NESTED  0xc000000000000000  /* Nested rmap array */
0237 #define KVMPPC_RMAP_HPT     0x0100000000000000  /* HPT guest */
0238 
0239 /*
0240  * rmap usage definition for a hash page table (hpt) guest:
0241  * 0x0000080000000000   Lock bit
0242  * 0x0000018000000000   RC bits
0243  * 0x0000000100000000   Present bit
0244  * 0x00000000ffffffff   HPT index bits
0245  * The bottom 32 bits are the index in the guest HPT of a HPTE that points to
0246  * the page.
0247  */
0248 #define KVMPPC_RMAP_LOCK_BIT    43
0249 #define KVMPPC_RMAP_RC_SHIFT    32
0250 #define KVMPPC_RMAP_REFERENCED  (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
0251 #define KVMPPC_RMAP_PRESENT 0x100000000ul
0252 #define KVMPPC_RMAP_INDEX   0xfffffffful
0253 
0254 struct kvm_arch_memory_slot {
0255 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0256     unsigned long *rmap;
0257 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
0258 };
0259 
0260 struct kvm_hpt_info {
0261     /* Host virtual (linear mapping) address of guest HPT */
0262     unsigned long virt;
0263     /* Array of reverse mapping entries for each guest HPTE */
0264     struct revmap_entry *rev;
0265     /* Guest HPT size is 2**(order) bytes */
0266     u32 order;
0267     /* 1 if HPT allocated with CMA, 0 otherwise */
0268     int cma;
0269 };
0270 
0271 struct kvm_resize_hpt;
0272 
0273 /* Flag values for kvm_arch.secure_guest */
0274 #define KVMPPC_SECURE_INIT_START 0x1 /* H_SVM_INIT_START has been called */
0275 #define KVMPPC_SECURE_INIT_DONE  0x2 /* H_SVM_INIT_DONE completed */
0276 #define KVMPPC_SECURE_INIT_ABORT 0x4 /* H_SVM_INIT_ABORT issued */
0277 
0278 struct kvm_arch {
0279     unsigned int lpid;
0280     unsigned int smt_mode;      /* # vcpus per virtual core */
0281     unsigned int emul_smt_mode; /* emualted SMT mode, on P9 */
0282 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0283     unsigned int tlb_sets;
0284     struct kvm_hpt_info hpt;
0285     atomic64_t mmio_update;
0286     unsigned int host_lpid;
0287     unsigned long host_lpcr;
0288     unsigned long sdr1;
0289     unsigned long host_sdr1;
0290     unsigned long lpcr;
0291     unsigned long vrma_slb_v;
0292     int mmu_ready;
0293     atomic_t vcpus_running;
0294     u32 online_vcores;
0295     atomic_t hpte_mod_interest;
0296     cpumask_t need_tlb_flush;
0297     u8 radix;
0298     u8 fwnmi_enabled;
0299     u8 secure_guest;
0300     u8 svm_enabled;
0301     bool nested_enable;
0302     bool dawr1_enabled;
0303     pgd_t *pgtable;
0304     u64 process_table;
0305     struct kvm_resize_hpt *resize_hpt; /* protected by kvm->lock */
0306 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
0307 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
0308     struct mutex hpt_mutex;
0309 #endif
0310 #ifdef CONFIG_PPC_BOOK3S_64
0311     struct list_head spapr_tce_tables;
0312     struct list_head rtas_tokens;
0313     struct mutex rtas_token_lock;
0314     DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
0315 #endif
0316 #ifdef CONFIG_KVM_MPIC
0317     struct openpic *mpic;
0318 #endif
0319 #ifdef CONFIG_KVM_XICS
0320     struct kvmppc_xics *xics;
0321     struct kvmppc_xics *xics_device;
0322     struct kvmppc_xive *xive;    /* Current XIVE device in use */
0323     struct {
0324         struct kvmppc_xive *native;
0325         struct kvmppc_xive *xics_on_xive;
0326     } xive_devices;
0327     struct kvmppc_passthru_irqmap *pimap;
0328 #endif
0329     struct kvmppc_ops *kvm_ops;
0330 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0331     struct mutex uvmem_lock;
0332     struct list_head uvmem_pfns;
0333     struct mutex mmu_setup_lock;    /* nests inside vcpu mutexes */
0334     u64 l1_ptcr;
0335     struct idr kvm_nested_guest_idr;
0336     /* This array can grow quite large, keep it at the end */
0337     struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
0338 #endif
0339 };
0340 
0341 #define VCORE_ENTRY_MAP(vc) ((vc)->entry_exit_map & 0xff)
0342 #define VCORE_EXIT_MAP(vc)  ((vc)->entry_exit_map >> 8)
0343 #define VCORE_IS_EXITING(vc)    (VCORE_EXIT_MAP(vc) != 0)
0344 
0345 /* This bit is used when a vcore exit is triggered from outside the vcore */
0346 #define VCORE_EXIT_REQ      0x10000
0347 
0348 /*
0349  * Values for vcore_state.
0350  * Note that these are arranged such that lower values
0351  * (< VCORE_SLEEPING) don't require stolen time accounting
0352  * on load/unload, and higher values do.
0353  */
0354 #define VCORE_INACTIVE  0
0355 #define VCORE_PREEMPT   1
0356 #define VCORE_PIGGYBACK 2
0357 #define VCORE_SLEEPING  3
0358 #define VCORE_RUNNING   4
0359 #define VCORE_EXITING   5
0360 #define VCORE_POLLING   6
0361 
0362 /*
0363  * Struct used to manage memory for a virtual processor area
0364  * registered by a PAPR guest.  There are three types of area
0365  * that a guest can register.
0366  */
0367 struct kvmppc_vpa {
0368     unsigned long gpa;  /* Current guest phys addr */
0369     void *pinned_addr;  /* Address in kernel linear mapping */
0370     void *pinned_end;   /* End of region */
0371     unsigned long next_gpa; /* Guest phys addr for update */
0372     unsigned long len;  /* Number of bytes required */
0373     u8 update_pending;  /* 1 => update pinned_addr from next_gpa */
0374     bool dirty;     /* true => area has been modified by kernel */
0375 };
0376 
0377 struct kvmppc_pte {
0378     ulong eaddr;
0379     u64 vpage;
0380     ulong raddr;
0381     bool may_read       : 1;
0382     bool may_write      : 1;
0383     bool may_execute    : 1;
0384     unsigned long wimg;
0385     unsigned long rc;
0386     u8 page_size;       /* MMU_PAGE_xxx */
0387     u8 page_shift;
0388 };
0389 
0390 struct kvmppc_mmu {
0391     /* book3s_64 only */
0392     void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
0393     u64  (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
0394     u64  (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
0395     int  (*slbfee)(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb);
0396     void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
0397     void (*slbia)(struct kvm_vcpu *vcpu);
0398     /* book3s */
0399     void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
0400     u32  (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
0401     int  (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
0402               struct kvmppc_pte *pte, bool data, bool iswrite);
0403     void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
0404     int  (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
0405     u64  (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
0406     bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
0407 };
0408 
0409 struct kvmppc_slb {
0410     u64 esid;
0411     u64 vsid;
0412     u64 orige;
0413     u64 origv;
0414     bool valid  : 1;
0415     bool Ks     : 1;
0416     bool Kp     : 1;
0417     bool nx     : 1;
0418     bool large  : 1;    /* PTEs are 16MB */
0419     bool tb     : 1;    /* 1TB segment */
0420     bool class  : 1;
0421     u8 base_page_size;  /* MMU_PAGE_xxx */
0422 };
0423 
0424 /* Struct used to accumulate timing information in HV real mode code */
0425 struct kvmhv_tb_accumulator {
0426     u64 seqcount;   /* used to synchronize access, also count * 2 */
0427     u64 tb_total;   /* total time in timebase ticks */
0428     u64 tb_min;     /* min time */
0429     u64 tb_max;     /* max time */
0430 };
0431 
0432 #ifdef CONFIG_PPC_BOOK3S_64
0433 struct kvmppc_irq_map {
0434     u32 r_hwirq;
0435     u32 v_hwirq;
0436     struct irq_desc *desc;
0437 };
0438 
0439 #define KVMPPC_PIRQ_MAPPED  1024
0440 struct kvmppc_passthru_irqmap {
0441     int n_mapped;
0442     struct kvmppc_irq_map mapped[KVMPPC_PIRQ_MAPPED];
0443 };
0444 #endif
0445 
0446 # ifdef CONFIG_PPC_FSL_BOOK3E
0447 #define KVMPPC_BOOKE_IAC_NUM    2
0448 #define KVMPPC_BOOKE_DAC_NUM    2
0449 # else
0450 #define KVMPPC_BOOKE_IAC_NUM    4
0451 #define KVMPPC_BOOKE_DAC_NUM    2
0452 # endif
0453 #define KVMPPC_BOOKE_MAX_IAC    4
0454 #define KVMPPC_BOOKE_MAX_DAC    2
0455 
0456 /* KVMPPC_EPR_USER takes precedence over KVMPPC_EPR_KERNEL */
0457 #define KVMPPC_EPR_NONE     0 /* EPR not supported */
0458 #define KVMPPC_EPR_USER     1 /* exit to userspace to fill EPR */
0459 #define KVMPPC_EPR_KERNEL   2 /* in-kernel irqchip */
0460 
0461 #define KVMPPC_IRQ_DEFAULT  0
0462 #define KVMPPC_IRQ_MPIC     1
0463 #define KVMPPC_IRQ_XICS     2 /* Includes a XIVE option */
0464 #define KVMPPC_IRQ_XIVE     3 /* XIVE native exploitation mode */
0465 
0466 #define MMIO_HPTE_CACHE_SIZE    4
0467 
0468 struct mmio_hpte_cache_entry {
0469     unsigned long hpte_v;
0470     unsigned long hpte_r;
0471     unsigned long rpte;
0472     unsigned long pte_index;
0473     unsigned long eaddr;
0474     unsigned long slb_v;
0475     long mmio_update;
0476     unsigned int slb_base_pshift;
0477 };
0478 
0479 struct mmio_hpte_cache {
0480     struct mmio_hpte_cache_entry entry[MMIO_HPTE_CACHE_SIZE];
0481     unsigned int index;
0482 };
0483 
0484 #define KVMPPC_VSX_COPY_NONE        0
0485 #define KVMPPC_VSX_COPY_WORD        1
0486 #define KVMPPC_VSX_COPY_DWORD       2
0487 #define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3
0488 #define KVMPPC_VSX_COPY_WORD_LOAD_DUMP  4
0489 
0490 #define KVMPPC_VMX_COPY_BYTE        8
0491 #define KVMPPC_VMX_COPY_HWORD       9
0492 #define KVMPPC_VMX_COPY_WORD        10
0493 #define KVMPPC_VMX_COPY_DWORD       11
0494 
0495 struct openpic;
0496 
0497 /* W0 and W1 of a XIVE thread management context */
0498 union xive_tma_w01 {
0499     struct {
0500         u8  nsr;
0501         u8  cppr;
0502         u8  ipb;
0503         u8  lsmfb;
0504         u8  ack;
0505         u8  inc;
0506         u8  age;
0507         u8  pipr;
0508     };
0509     __be64 w01;
0510 };
0511 
0512 struct kvm_vcpu_arch {
0513     ulong host_stack;
0514     u32 host_pid;
0515 #ifdef CONFIG_PPC_BOOK3S
0516     struct kvmppc_slb slb[64];
0517     int slb_max;        /* 1 + index of last valid entry in slb[] */
0518     int slb_nr;     /* total number of entries in SLB */
0519     struct kvmppc_mmu mmu;
0520     struct kvmppc_vcpu_book3s *book3s;
0521 #endif
0522 #ifdef CONFIG_PPC_BOOK3S_32
0523     struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
0524 #endif
0525 
0526     /*
0527      * This is passed along to the HV via H_ENTER_NESTED. Align to
0528      * prevent it crossing a real 4K page.
0529      */
0530     struct pt_regs regs __aligned(512);
0531 
0532     struct thread_fp_state fp;
0533 
0534 #ifdef CONFIG_SPE
0535     ulong evr[32];
0536     ulong spefscr;
0537     ulong host_spefscr;
0538     u64 acc;
0539 #endif
0540 #ifdef CONFIG_ALTIVEC
0541     struct thread_vr_state vr;
0542 #endif
0543 
0544 #ifdef CONFIG_KVM_BOOKE_HV
0545     u32 host_mas4;
0546     u32 host_mas6;
0547     u32 shadow_epcr;
0548     u32 shadow_msrp;
0549     u32 eplc;
0550     u32 epsc;
0551     u32 oldpir;
0552 #endif
0553 
0554 #if defined(CONFIG_BOOKE)
0555 #if defined(CONFIG_KVM_BOOKE_HV) || defined(CONFIG_64BIT)
0556     u32 epcr;
0557 #endif
0558 #endif
0559 
0560 #ifdef CONFIG_PPC_BOOK3S
0561     /* For Gekko paired singles */
0562     u32 qpr[32];
0563 #endif
0564 
0565 #ifdef CONFIG_PPC_BOOK3S
0566     ulong tar;
0567 #endif
0568 
0569 #ifdef CONFIG_PPC_BOOK3S
0570     ulong hflags;
0571     ulong guest_owned_ext;
0572     ulong purr;
0573     ulong spurr;
0574     ulong ic;
0575     ulong dscr;
0576     ulong amr;
0577     ulong uamor;
0578     ulong iamr;
0579     u32 ctrl;
0580     u32 dabrx;
0581     ulong dabr;
0582     ulong dawr0;
0583     ulong dawrx0;
0584     ulong dawr1;
0585     ulong dawrx1;
0586     ulong ciabr;
0587     ulong cfar;
0588     ulong ppr;
0589     u32 pspb;
0590     u8 load_ebb;
0591 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0592     u8 load_tm;
0593 #endif
0594     ulong fscr;
0595     ulong shadow_fscr;
0596     ulong ebbhr;
0597     ulong ebbrr;
0598     ulong bescr;
0599     ulong csigr;
0600     ulong tacr;
0601     ulong tcscr;
0602     ulong acop;
0603     ulong wort;
0604     ulong tid;
0605     ulong psscr;
0606     ulong hfscr;
0607     ulong shadow_srr1;
0608 #endif
0609     u32 vrsave; /* also USPRG0 */
0610     u32 mmucr;
0611     /* shadow_msr is unused for BookE HV */
0612     ulong shadow_msr;
0613     ulong csrr0;
0614     ulong csrr1;
0615     ulong dsrr0;
0616     ulong dsrr1;
0617     ulong mcsrr0;
0618     ulong mcsrr1;
0619     ulong mcsr;
0620     ulong dec;
0621 #ifdef CONFIG_BOOKE
0622     u32 decar;
0623 #endif
0624     /* Time base value when we entered the guest */
0625     u64 entry_tb;
0626     u64 entry_vtb;
0627     u64 entry_ic;
0628     u32 tcr;
0629     ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
0630     u32 ivor[64];
0631     ulong ivpr;
0632     u32 pvr;
0633 
0634     u32 shadow_pid;
0635     u32 shadow_pid1;
0636     u32 pid;
0637     u32 swap_pid;
0638 
0639     u32 ccr0;
0640     u32 ccr1;
0641     u32 dbsr;
0642 
0643     u64 mmcr[4];    /* MMCR0, MMCR1, MMCR2, MMCR3 */
0644     u64 mmcra;
0645     u64 mmcrs;
0646     u32 pmc[8];
0647     u32 spmc[2];
0648     u64 siar;
0649     u64 sdar;
0650     u64 sier[3];
0651 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0652     u64 tfhar;
0653     u64 texasr;
0654     u64 tfiar;
0655     u64 orig_texasr;
0656 
0657     u32 cr_tm;
0658     u64 xer_tm;
0659     u64 lr_tm;
0660     u64 ctr_tm;
0661     u64 amr_tm;
0662     u64 ppr_tm;
0663     u64 dscr_tm;
0664     u64 tar_tm;
0665 
0666     ulong gpr_tm[32];
0667 
0668     struct thread_fp_state fp_tm;
0669 
0670     struct thread_vr_state vr_tm;
0671     u32 vrsave_tm; /* also USPRG0 */
0672 #endif
0673 
0674 #ifdef CONFIG_KVM_EXIT_TIMING
0675     struct mutex exit_timing_lock;
0676     struct kvmppc_exit_timing timing_exit;
0677     struct kvmppc_exit_timing timing_last_enter;
0678     u32 last_exit_type;
0679     u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
0680     u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
0681     u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
0682     u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
0683     u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
0684     u64 timing_last_exit;
0685 #endif
0686 
0687 #ifdef CONFIG_PPC_BOOK3S
0688     ulong fault_dar;
0689     u32 fault_dsisr;
0690     unsigned long intr_msr;
0691     /*
0692      * POWER9 and later: fault_gpa contains the guest real address of page
0693      * fault for a radix guest, or segment descriptor (equivalent to result
0694      * from slbmfev of SLB entry that translated the EA) for hash guests.
0695      */
0696     ulong fault_gpa;
0697 #endif
0698 
0699 #ifdef CONFIG_BOOKE
0700     ulong fault_dear;
0701     ulong fault_esr;
0702     ulong queued_dear;
0703     ulong queued_esr;
0704     spinlock_t wdt_lock;
0705     struct timer_list wdt_timer;
0706     u32 tlbcfg[4];
0707     u32 tlbps[4];
0708     u32 mmucfg;
0709     u32 eptcfg;
0710     u32 epr;
0711     u64 sprg9;
0712     u32 pwrmgtcr0;
0713     u32 crit_save;
0714     /* guest debug registers*/
0715     struct debug_reg dbg_reg;
0716 #endif
0717     gpa_t paddr_accessed;
0718     gva_t vaddr_accessed;
0719     pgd_t *pgdir;
0720 
0721     u16 io_gpr; /* GPR used as IO source/target */
0722     u8 mmio_host_swabbed;
0723     u8 mmio_sign_extend;
0724     /* conversion between single and double precision */
0725     u8 mmio_sp64_extend;
0726     /*
0727      * Number of simulations for vsx.
0728      * If we use 2*8bytes to simulate 1*16bytes,
0729      * then the number should be 2 and
0730      * mmio_copy_type=KVMPPC_VSX_COPY_DWORD.
0731      * If we use 4*4bytes to simulate 1*16bytes,
0732      * the number should be 4 and
0733      * mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD.
0734      */
0735     u8 mmio_vsx_copy_nums;
0736     u8 mmio_vsx_offset;
0737     u8 mmio_vmx_copy_nums;
0738     u8 mmio_vmx_offset;
0739     u8 mmio_copy_type;
0740     u8 osi_needed;
0741     u8 osi_enabled;
0742     u8 papr_enabled;
0743     u8 watchdog_enabled;
0744     u8 sane;
0745     u8 cpu_type;
0746     u8 hcall_needed;
0747     u8 epr_flags; /* KVMPPC_EPR_xxx */
0748     u8 epr_needed;
0749     u8 external_oneshot;    /* clear external irq after delivery */
0750 
0751     u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
0752 
0753     struct hrtimer dec_timer;
0754     u64 dec_jiffies;
0755     u64 dec_expires;    /* Relative to guest timebase. */
0756     unsigned long pending_exceptions;
0757     u8 ceded;
0758     u8 prodded;
0759     u8 doorbell_request;
0760     u8 irq_pending; /* Used by XIVE to signal pending guest irqs */
0761     u32 last_inst;
0762 
0763     struct rcuwait wait;
0764     struct rcuwait *waitp;
0765     struct kvmppc_vcore *vcore;
0766     int ret;
0767     int trap;
0768     int state;
0769     int ptid;
0770     int thread_cpu;
0771     int prev_cpu;
0772     bool timer_running;
0773     wait_queue_head_t cpu_run;
0774     struct machine_check_event mce_evt; /* Valid if trap == 0x200 */
0775 
0776     struct kvm_vcpu_arch_shared *shared;
0777 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
0778     bool shared_big_endian;
0779 #endif
0780     unsigned long magic_page_pa; /* phys addr to map the magic page to */
0781     unsigned long magic_page_ea; /* effect. addr to map the magic page to */
0782     bool disable_kernel_nx;
0783 
0784     int irq_type;       /* one of KVM_IRQ_* */
0785     int irq_cpu_id;
0786     struct openpic *mpic;   /* KVM_IRQ_MPIC */
0787 #ifdef CONFIG_KVM_XICS
0788     struct kvmppc_icp *icp; /* XICS presentation controller */
0789     struct kvmppc_xive_vcpu *xive_vcpu; /* XIVE virtual CPU data */
0790     __be32 xive_cam_word;    /* Cooked W2 in proper endian with valid bit */
0791     u8 xive_pushed;      /* Is the VP pushed on the physical CPU ? */
0792     u8 xive_esc_on;      /* Is the escalation irq enabled ? */
0793     union xive_tma_w01 xive_saved_state; /* W0..1 of XIVE thread state */
0794     u64 xive_esc_raddr;  /* Escalation interrupt ESB real addr */
0795     u64 xive_esc_vaddr;  /* Escalation interrupt ESB virt addr */
0796 #endif
0797 
0798 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0799     struct kvm_vcpu_arch_shared shregs;
0800 
0801     struct mmio_hpte_cache mmio_cache;
0802     unsigned long pgfault_addr;
0803     long pgfault_index;
0804     unsigned long pgfault_hpte[2];
0805     struct mmio_hpte_cache_entry *pgfault_cache;
0806 
0807     struct task_struct *run_task;
0808 
0809     spinlock_t vpa_update_lock;
0810     struct kvmppc_vpa vpa;
0811     struct kvmppc_vpa dtl;
0812     struct dtl_entry *dtl_ptr;
0813     unsigned long dtl_index;
0814     u64 stolen_logged;
0815     struct kvmppc_vpa slb_shadow;
0816 
0817     spinlock_t tbacct_lock;
0818     u64 busy_stolen;
0819     u64 busy_preempt;
0820 
0821     u32 emul_inst;
0822 
0823     u32 online;
0824 
0825     u64 hfscr_permitted;    /* A mask of permitted HFSCR facilities */
0826 
0827     /* For support of nested guests */
0828     struct kvm_nested_guest *nested;
0829     u64 nested_hfscr;   /* HFSCR that the L1 requested for the nested guest */
0830     u32 nested_vcpu_id;
0831     gpa_t nested_io_gpr;
0832 #endif
0833 
0834 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
0835     struct kvmhv_tb_accumulator *cur_activity;  /* What we're timing */
0836     u64 cur_tb_start;           /* when it started */
0837 #ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING
0838     struct kvmhv_tb_accumulator vcpu_entry;
0839     struct kvmhv_tb_accumulator vcpu_exit;
0840     struct kvmhv_tb_accumulator in_guest;
0841     struct kvmhv_tb_accumulator hcall;
0842     struct kvmhv_tb_accumulator pg_fault;
0843     struct kvmhv_tb_accumulator guest_entry;
0844     struct kvmhv_tb_accumulator guest_exit;
0845 #else
0846     struct kvmhv_tb_accumulator rm_entry;   /* real-mode entry code */
0847     struct kvmhv_tb_accumulator rm_intr;    /* real-mode intr handling */
0848     struct kvmhv_tb_accumulator rm_exit;    /* real-mode exit code */
0849     struct kvmhv_tb_accumulator guest_time; /* guest execution */
0850     struct kvmhv_tb_accumulator cede_time;  /* time napping inside guest */
0851 #endif
0852 #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
0853 };
0854 
0855 #define VCPU_FPR(vcpu, i)   (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
0856 #define VCPU_VSX_FPR(vcpu, i, j)    ((vcpu)->arch.fp.fpr[i][j])
0857 #define VCPU_VSX_VR(vcpu, i)        ((vcpu)->arch.vr.vr[i])
0858 
0859 /* Values for vcpu->arch.state */
0860 #define KVMPPC_VCPU_NOTREADY        0
0861 #define KVMPPC_VCPU_RUNNABLE        1
0862 #define KVMPPC_VCPU_BUSY_IN_HOST    2
0863 
0864 /* Values for vcpu->arch.io_gpr */
0865 #define KVM_MMIO_REG_MASK   0x003f
0866 #define KVM_MMIO_REG_EXT_MASK   0xffc0
0867 #define KVM_MMIO_REG_GPR    0x0000
0868 #define KVM_MMIO_REG_FPR    0x0040
0869 #define KVM_MMIO_REG_QPR    0x0080
0870 #define KVM_MMIO_REG_FQPR   0x00c0
0871 #define KVM_MMIO_REG_VSX    0x0100
0872 #define KVM_MMIO_REG_VMX    0x0180
0873 #define KVM_MMIO_REG_NESTED_GPR 0xffc0
0874 
0875 
0876 #define __KVM_HAVE_ARCH_WQP
0877 #define __KVM_HAVE_CREATE_DEVICE
0878 
0879 static inline void kvm_arch_hardware_disable(void) {}
0880 static inline void kvm_arch_hardware_unsetup(void) {}
0881 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
0882 static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
0883 static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
0884 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
0885 static inline void kvm_arch_exit(void) {}
0886 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
0887 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
0888 
0889 #endif /* __POWERPC_KVM_HOST_H__ */