0001
0002
0003
0004
0005
0006
0007
0008
0009 #ifndef __ASM_KVM_BOOK3S_H__
0010 #define __ASM_KVM_BOOK3S_H__
0011
0012 #include <linux/types.h>
0013 #include <linux/kvm_host.h>
0014 #include <asm/kvm_book3s_asm.h>
0015
0016 struct kvmppc_bat {
0017 u64 raw;
0018 u32 bepi;
0019 u32 bepi_mask;
0020 u32 brpn;
0021 u8 wimg;
0022 u8 pp;
0023 bool vs : 1;
0024 bool vp : 1;
0025 };
0026
0027 struct kvmppc_sid_map {
0028 u64 guest_vsid;
0029 u64 guest_esid;
0030 u64 host_vsid;
0031 bool valid : 1;
0032 };
0033
0034 #define SID_MAP_BITS 9
0035 #define SID_MAP_NUM (1 << SID_MAP_BITS)
0036 #define SID_MAP_MASK (SID_MAP_NUM - 1)
0037
0038 #ifdef CONFIG_PPC_BOOK3S_64
0039 #define SID_CONTEXTS 1
0040 #else
0041 #define SID_CONTEXTS 128
0042 #define VSID_POOL_SIZE (SID_CONTEXTS * 16)
0043 #endif
0044
0045 struct hpte_cache {
0046 struct hlist_node list_pte;
0047 struct hlist_node list_pte_long;
0048 struct hlist_node list_vpte;
0049 struct hlist_node list_vpte_long;
0050 #ifdef CONFIG_PPC_BOOK3S_64
0051 struct hlist_node list_vpte_64k;
0052 #endif
0053 struct rcu_head rcu_head;
0054 u64 host_vpn;
0055 u64 pfn;
0056 ulong slot;
0057 struct kvmppc_pte pte;
0058 int pagesize;
0059 };
0060
0061
0062
0063
0064
0065
0066
0067
0068 struct kvmppc_vcore {
0069 int n_runnable;
0070 int num_threads;
0071 int entry_exit_map;
0072 int napping_threads;
0073 int first_vcpuid;
0074 u16 pcpu;
0075 u16 last_cpu;
0076 u8 vcore_state;
0077 u8 in_guest;
0078 struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
0079 struct list_head preempt_list;
0080 spinlock_t lock;
0081 struct rcuwait wait;
0082 spinlock_t stoltb_lock;
0083 u64 stolen_tb;
0084 u64 preempt_tb;
0085 struct kvm_vcpu *runner;
0086 struct kvm *kvm;
0087 u64 tb_offset;
0088 u64 tb_offset_applied;
0089 ulong lpcr;
0090 u32 arch_compat;
0091 ulong pcr;
0092 ulong dpdes;
0093 ulong vtb;
0094 ulong conferring_threads;
0095 unsigned int halt_poll_ns;
0096 atomic_t online_count;
0097 };
0098
0099 struct kvmppc_vcpu_book3s {
0100 struct kvmppc_sid_map sid_map[SID_MAP_NUM];
0101 struct {
0102 u64 esid;
0103 u64 vsid;
0104 } slb_shadow[64];
0105 u8 slb_shadow_max;
0106 struct kvmppc_bat ibat[8];
0107 struct kvmppc_bat dbat[8];
0108 u64 hid[6];
0109 u64 gqr[8];
0110 u64 sdr1;
0111 u64 hior;
0112 u64 msr_mask;
0113 u64 vtb;
0114 #ifdef CONFIG_PPC_BOOK3S_32
0115 u32 vsid_pool[VSID_POOL_SIZE];
0116 u32 vsid_next;
0117 #else
0118 u64 proto_vsid_first;
0119 u64 proto_vsid_max;
0120 u64 proto_vsid_next;
0121 #endif
0122 int context_id[SID_CONTEXTS];
0123
0124 bool hior_explicit;
0125
0126 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
0127 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
0128 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
0129 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
0130 #ifdef CONFIG_PPC_BOOK3S_64
0131 struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
0132 #endif
0133 int hpte_cache_count;
0134 spinlock_t mmu_lock;
0135 };
0136
0137 #define VSID_REAL 0x07ffffffffc00000ULL
0138 #define VSID_BAT 0x07ffffffffb00000ULL
0139 #define VSID_64K 0x0800000000000000ULL
0140 #define VSID_1T 0x1000000000000000ULL
0141 #define VSID_REAL_DR 0x2000000000000000ULL
0142 #define VSID_REAL_IR 0x4000000000000000ULL
0143 #define VSID_PR 0x8000000000000000ULL
0144
0145 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
0146 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
0147 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
0148 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
0149 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
0150 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
0151 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
0152 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
0153 bool iswrite);
0154 extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
0155 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
0156 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
0157 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
0158 extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
0159 unsigned long addr, unsigned long status);
0160 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
0161 unsigned long slb_v, unsigned long valid);
0162 extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
0163 unsigned long gpa, gva_t ea, int is_store);
0164
0165 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
0166 extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
0167 extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
0168 extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
0169 extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
0170 extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
0171 extern int kvmppc_mmu_hpte_sysinit(void);
0172 extern void kvmppc_mmu_hpte_sysexit(void);
0173 extern int kvmppc_mmu_hv_init(void);
0174 extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
0175
0176 extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
0177 unsigned long ea, unsigned long dsisr);
0178 extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
0179 gva_t eaddr, void *to, void *from,
0180 unsigned long n);
0181 extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
0182 void *to, unsigned long n);
0183 extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
0184 void *from, unsigned long n);
0185 extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
0186 struct kvmppc_pte *gpte, u64 root,
0187 u64 *pte_ret_p);
0188 extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
0189 struct kvmppc_pte *gpte, u64 table,
0190 int table_index, u64 *pte_ret_p);
0191 extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
0192 struct kvmppc_pte *gpte, bool data, bool iswrite);
0193 extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
0194 unsigned int pshift, unsigned int lpid);
0195 extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
0196 unsigned int shift,
0197 const struct kvm_memory_slot *memslot,
0198 unsigned int lpid);
0199 extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
0200 bool writing, unsigned long gpa,
0201 unsigned int lpid);
0202 extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
0203 unsigned long gpa,
0204 struct kvm_memory_slot *memslot,
0205 bool writing, bool kvm_ro,
0206 pte_t *inserted_pte, unsigned int *levelp);
0207 extern int kvmppc_init_vm_radix(struct kvm *kvm);
0208 extern void kvmppc_free_radix(struct kvm *kvm);
0209 extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
0210 unsigned int lpid);
0211 extern int kvmppc_radix_init(void);
0212 extern void kvmppc_radix_exit(void);
0213 extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
0214 unsigned long gfn);
0215 extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
0216 unsigned long gfn);
0217 extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
0218 unsigned long gfn);
0219 extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
0220 struct kvm_memory_slot *memslot, unsigned long *map);
0221 extern void kvmppc_radix_flush_memslot(struct kvm *kvm,
0222 const struct kvm_memory_slot *memslot);
0223 extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
0224
0225
0226 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
0227 extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
0228 extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
0229 unsigned int vec);
0230 extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
0231 extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
0232 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
0233 bool upper, u32 val);
0234 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
0235 extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
0236 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
0237 bool writing, bool *writable);
0238 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
0239 unsigned long *rmap, long pte_index, int realmode);
0240 extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
0241 unsigned long gfn, unsigned long psize);
0242 extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
0243 unsigned long pte_index);
0244 void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
0245 unsigned long pte_index);
0246 extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
0247 unsigned long *nb_ret);
0248 extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
0249 unsigned long gpa, bool dirty);
0250 extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
0251 long pte_index, unsigned long pteh, unsigned long ptel,
0252 pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
0253 extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
0254 unsigned long pte_index, unsigned long avpn,
0255 unsigned long *hpret);
0256 extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
0257 struct kvm_memory_slot *memslot, unsigned long *map);
0258 extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
0259 struct kvm_memory_slot *memslot,
0260 unsigned long *map);
0261 extern unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm,
0262 unsigned long lpcr);
0263 extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
0264 unsigned long mask);
0265 extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
0266
0267 extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
0268 extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
0269 extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);
0270
0271 extern void kvmppc_entry_trampoline(void);
0272 extern void kvmppc_hv_entry_trampoline(void);
0273 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
0274 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
0275 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
0276 extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
0277 extern int kvmppc_hcall_impl_pr(unsigned long cmd);
0278 extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
0279 extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
0280 extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
0281
0282 long kvmppc_read_intr(void);
0283 void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
0284 void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
0285
0286 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0287 void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
0288 void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
0289 void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
0290 void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
0291 #else
0292 static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
0293 static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
0294 static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
0295 static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
0296 #endif
0297
0298 long kvmhv_nested_init(void);
0299 void kvmhv_nested_exit(void);
0300 void kvmhv_vm_nested_init(struct kvm *kvm);
0301 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
0302 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
0303 void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
0304 void kvmhv_release_all_nested(struct kvm *kvm);
0305 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
0306 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
0307 long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
0308 unsigned long type, unsigned long pg_sizes,
0309 unsigned long start, unsigned long end);
0310 int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
0311 u64 time_limit, unsigned long lpcr);
0312 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
0313 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
0314 struct hv_guest_state *hr);
0315 long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
0316
0317 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
0318
0319 extern int kvm_irq_bypass;
0320
0321 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
0322 {
0323 return vcpu->arch.book3s;
0324 }
0325
0326
0327
0328 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
0329 #include <asm/kvm_book3s_32.h>
0330 #endif
0331 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
0332 #include <asm/kvm_book3s_64.h>
0333 #endif
0334
0335 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
0336 {
0337 vcpu->arch.regs.gpr[num] = val;
0338 }
0339
0340 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
0341 {
0342 return vcpu->arch.regs.gpr[num];
0343 }
0344
0345 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
0346 {
0347 vcpu->arch.regs.ccr = val;
0348 }
0349
0350 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
0351 {
0352 return vcpu->arch.regs.ccr;
0353 }
0354
0355 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
0356 {
0357 vcpu->arch.regs.xer = val;
0358 }
0359
0360 static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
0361 {
0362 return vcpu->arch.regs.xer;
0363 }
0364
0365 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
0366 {
0367 vcpu->arch.regs.ctr = val;
0368 }
0369
0370 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
0371 {
0372 return vcpu->arch.regs.ctr;
0373 }
0374
0375 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
0376 {
0377 vcpu->arch.regs.link = val;
0378 }
0379
0380 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
0381 {
0382 return vcpu->arch.regs.link;
0383 }
0384
0385 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
0386 {
0387 vcpu->arch.regs.nip = val;
0388 }
0389
0390 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
0391 {
0392 return vcpu->arch.regs.nip;
0393 }
0394
0395 static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
0396 static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
0397 {
0398 return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
0399 }
0400
0401 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
0402 {
0403 return vcpu->arch.fault_dar;
0404 }
0405
0406
0407 static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu)
0408 {
0409 return vcpu->arch.dec_expires - vcpu->arch.vcore->tb_offset;
0410 }
0411
0412 static inline bool is_kvmppc_resume_guest(int r)
0413 {
0414 return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
0415 }
0416
0417 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
0418 static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
0419 {
0420
0421 return !is_kvmppc_hv_enabled(vcpu->kvm);
0422 }
0423
0424 extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
0425 extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
0426
0427
0428
0429 #define OSI_SC_MAGIC_R3 0x113724FA
0430 #define OSI_SC_MAGIC_R4 0x77810F9B
0431
0432 #define INS_DCBZ 0x7c0007ec
0433
0434 #define INS_TW 0x7fe00008
0435
0436 #define SPLIT_HACK_MASK 0xff000000
0437 #define SPLIT_HACK_OFFS 0xfb000000
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471 static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
0472 {
0473 const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
0474 int stride = kvm->arch.emul_smt_mode;
0475 int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
0476 u32 packed_id;
0477
0478 if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
0479 return 0;
0480 packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
0481 if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed"))
0482 return 0;
0483 return packed_id;
0484 }
0485
0486 #endif