0001
0002
0003
0004
0005
0006
0007
0008
0009 #ifndef __POWERPC_KVM_PPC_H__
0010 #define __POWERPC_KVM_PPC_H__
0011
0012
0013
0014
0015 #include <linux/mutex.h>
0016 #include <linux/timer.h>
0017 #include <linux/types.h>
0018 #include <linux/kvm_types.h>
0019 #include <linux/kvm_host.h>
0020 #include <linux/bug.h>
0021 #ifdef CONFIG_PPC_BOOK3S
0022 #include <asm/kvm_book3s.h>
0023 #else
0024 #include <asm/kvm_booke.h>
0025 #endif
0026 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
0027 #include <asm/paca.h>
0028 #include <asm/xive.h>
0029 #include <asm/cpu_has_feature.h>
0030 #endif
0031
0032
0033
0034
0035
0036 #define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
0037
0038 enum emulation_result {
0039 EMULATE_DONE,
0040 EMULATE_DO_MMIO,
0041 EMULATE_FAIL,
0042 EMULATE_AGAIN,
0043 EMULATE_EXIT_USER,
0044 };
0045
0046 enum instruction_fetch_type {
0047 INST_GENERIC,
0048 INST_SC,
0049 };
0050
0051 enum xlate_instdata {
0052 XLATE_INST,
0053 XLATE_DATA
0054 };
0055
0056 enum xlate_readwrite {
0057 XLATE_READ,
0058 XLATE_WRITE
0059 };
0060
0061 extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
0062 extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
0063 extern void kvmppc_handler_highmem(void);
0064
0065 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
0066 extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
0067 unsigned int rt, unsigned int bytes,
0068 int is_default_endian);
0069 extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
0070 unsigned int rt, unsigned int bytes,
0071 int is_default_endian);
0072 extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
0073 unsigned int rt, unsigned int bytes,
0074 int is_default_endian, int mmio_sign_extend);
0075 extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
0076 unsigned int rt, unsigned int bytes, int is_default_endian);
0077 extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
0078 unsigned int rs, unsigned int bytes, int is_default_endian);
0079 extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
0080 u64 val, unsigned int bytes,
0081 int is_default_endian);
0082 extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
0083 int rs, unsigned int bytes,
0084 int is_default_endian);
0085
0086 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
0087 enum instruction_fetch_type type, u32 *inst);
0088
0089 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
0090 bool data);
0091 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
0092 bool data);
0093 extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu);
0094 extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
0095 extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu);
0096 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
0097 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
0098 extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
0099 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
0100 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
0101 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
0102
0103
0104
0105 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
0106 unsigned int gtlb_idx);
0107 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
0108 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
0109 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
0110 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
0111 extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
0112 gva_t eaddr);
0113 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
0114 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
0115 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
0116 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
0117 struct kvmppc_pte *pte);
0118
0119 extern int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu);
0120 extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
0121 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
0122 extern int kvmppc_core_check_processor_compat(void);
0123 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
0124 struct kvm_translation *tr);
0125
0126 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
0127 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
0128
0129 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
0130 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
0131 extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
0132 extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
0133 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
0134 extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
0135 extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
0136 extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
0137 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
0138 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
0139 extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
0140 struct kvm_interrupt *irq);
0141 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
0142 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
0143 ulong esr_flags);
0144 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
0145 ulong dear_flags,
0146 ulong esr_flags);
0147 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
0148 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
0149 ulong esr_flags);
0150 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
0151 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
0152
0153 extern int kvmppc_booke_init(void);
0154 extern void kvmppc_booke_exit(void);
0155
0156 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
0157 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
0158 extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
0159
0160 extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
0161 extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
0162 extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
0163 extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
0164 extern void kvmppc_rmap_reset(struct kvm *kvm);
0165 extern long kvmppc_prepare_vrma(struct kvm *kvm,
0166 struct kvm_userspace_memory_region *mem);
0167 extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
0168 struct kvm_memory_slot *memslot, unsigned long porder);
0169 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
0170 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
0171 struct iommu_group *grp);
0172 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
0173 struct iommu_group *grp);
0174 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
0175 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
0176 extern void kvmppc_setup_partition_table(struct kvm *kvm);
0177
0178 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
0179 struct kvm_create_spapr_tce_64 *args);
0180 #define kvmppc_ioba_validate(stt, ioba, npages) \
0181 (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
0182 (stt)->size, (ioba), (npages)) ? \
0183 H_PARAMETER : H_SUCCESS)
0184 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
0185 unsigned long ioba, unsigned long tce);
0186 extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
0187 unsigned long liobn, unsigned long ioba,
0188 unsigned long tce_list, unsigned long npages);
0189 extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
0190 unsigned long liobn, unsigned long ioba,
0191 unsigned long tce_value, unsigned long npages);
0192 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
0193 unsigned long ioba);
0194 extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
0195 extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
0196 extern int kvmppc_core_init_vm(struct kvm *kvm);
0197 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
0198 extern void kvmppc_core_free_memslot(struct kvm *kvm,
0199 struct kvm_memory_slot *slot);
0200 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
0201 const struct kvm_memory_slot *old,
0202 struct kvm_memory_slot *new,
0203 enum kvm_mr_change change);
0204 extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
0205 struct kvm_memory_slot *old,
0206 const struct kvm_memory_slot *new,
0207 enum kvm_mr_change change);
0208 extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
0209 struct kvm_ppc_smmu_info *info);
0210 extern void kvmppc_core_flush_memslot(struct kvm *kvm,
0211 struct kvm_memory_slot *memslot);
0212
0213 extern int kvmppc_bookehv_init(void);
0214 extern void kvmppc_bookehv_exit(void);
0215
0216 extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
0217
0218 extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
0219 extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
0220 struct kvm_ppc_resize_hpt *rhpt);
0221 extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
0222 struct kvm_ppc_resize_hpt *rhpt);
0223
0224 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
0225
0226 extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
0227 extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
0228 extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
0229
0230 extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
0231 u32 priority);
0232 extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
0233 u32 *priority);
0234 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
0235 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
0236
0237 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
0238 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
0239
0240 union kvmppc_one_reg {
0241 u32 wval;
0242 u64 dval;
0243 vector128 vval;
0244 u64 vsxval[2];
0245 u32 vsx32val[4];
0246 u16 vsx16val[8];
0247 u8 vsx8val[16];
0248 struct {
0249 u64 addr;
0250 u64 length;
0251 } vpaval;
0252 u64 xive_timaval[2];
0253 };
0254
0255 struct kvmppc_ops {
0256 struct module *owner;
0257 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
0258 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
0259 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
0260 union kvmppc_one_reg *val);
0261 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
0262 union kvmppc_one_reg *val);
0263 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
0264 void (*vcpu_put)(struct kvm_vcpu *vcpu);
0265 void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
0266 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
0267 int (*vcpu_run)(struct kvm_vcpu *vcpu);
0268 int (*vcpu_create)(struct kvm_vcpu *vcpu);
0269 void (*vcpu_free)(struct kvm_vcpu *vcpu);
0270 int (*check_requests)(struct kvm_vcpu *vcpu);
0271 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
0272 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
0273 int (*prepare_memory_region)(struct kvm *kvm,
0274 const struct kvm_memory_slot *old,
0275 struct kvm_memory_slot *new,
0276 enum kvm_mr_change change);
0277 void (*commit_memory_region)(struct kvm *kvm,
0278 struct kvm_memory_slot *old,
0279 const struct kvm_memory_slot *new,
0280 enum kvm_mr_change change);
0281 bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range);
0282 bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
0283 bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
0284 bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
0285 void (*free_memslot)(struct kvm_memory_slot *slot);
0286 int (*init_vm)(struct kvm *kvm);
0287 void (*destroy_vm)(struct kvm *kvm);
0288 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
0289 int (*emulate_op)(struct kvm_vcpu *vcpu,
0290 unsigned int inst, int *advance);
0291 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
0292 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
0293 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
0294 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
0295 unsigned long arg);
0296 int (*hcall_implemented)(unsigned long hcall);
0297 int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
0298 struct irq_bypass_producer *);
0299 void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
0300 struct irq_bypass_producer *);
0301 int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
0302 int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
0303 int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
0304 unsigned long flags);
0305 void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
0306 int (*enable_nested)(struct kvm *kvm);
0307 int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
0308 int size);
0309 int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
0310 int size);
0311 int (*enable_svm)(struct kvm *kvm);
0312 int (*svm_off)(struct kvm *kvm);
0313 int (*enable_dawr1)(struct kvm *kvm);
0314 bool (*hash_v3_possible)(void);
0315 int (*create_vm_debugfs)(struct kvm *kvm);
0316 int (*create_vcpu_debugfs)(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
0317 };
0318
0319 extern struct kvmppc_ops *kvmppc_hv_ops;
0320 extern struct kvmppc_ops *kvmppc_pr_ops;
0321
0322 static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
0323 enum instruction_fetch_type type, u32 *inst)
0324 {
0325 int ret = EMULATE_DONE;
0326 u32 fetched_inst;
0327
0328
0329
0330 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
0331 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
0332
0333
0334 if (ret == EMULATE_DONE)
0335 fetched_inst = kvmppc_need_byteswap(vcpu) ?
0336 swab32(vcpu->arch.last_inst) :
0337 vcpu->arch.last_inst;
0338 else
0339 fetched_inst = vcpu->arch.last_inst;
0340
0341 *inst = fetched_inst;
0342 return ret;
0343 }
0344
0345 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
0346 {
0347 return kvm->arch.kvm_ops == kvmppc_hv_ops;
0348 }
0349
0350 extern int kvmppc_hwrng_present(void);
0351
0352
0353
0354
0355
0356 static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
0357 {
0358 u32 r;
0359 u32 mask;
0360
0361 BUG_ON(msb > lsb);
0362
0363 mask = (1 << (lsb - msb + 1)) - 1;
0364 r = (inst >> (63 - lsb)) & mask;
0365
0366 return r;
0367 }
0368
0369
0370
0371
0372 static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
0373 {
0374 u32 r;
0375 u32 mask;
0376
0377 BUG_ON(msb > lsb);
0378
0379 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
0380 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
0381
0382 return r;
0383 }
0384
0385 #define one_reg_size(id) \
0386 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
0387
0388 #define get_reg_val(id, reg) ({ \
0389 union kvmppc_one_reg __u; \
0390 switch (one_reg_size(id)) { \
0391 case 4: __u.wval = (reg); break; \
0392 case 8: __u.dval = (reg); break; \
0393 default: BUG(); \
0394 } \
0395 __u; \
0396 })
0397
0398
0399 #define set_reg_val(id, val) ({ \
0400 u64 __v; \
0401 switch (one_reg_size(id)) { \
0402 case 4: __v = (val).wval; break; \
0403 case 8: __v = (val).dval; break; \
0404 default: BUG(); \
0405 } \
0406 __v; \
0407 })
0408
0409 int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
0410 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
0411
0412 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
0413 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
0414
0415 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
0416 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
0417 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
0418 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
0419
0420 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
0421
0422 struct openpic;
0423
0424 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0425 extern void kvm_cma_reserve(void) __init;
0426 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
0427 {
0428 paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
0429 }
0430
0431 static inline void kvmppc_set_xive_tima(int cpu,
0432 unsigned long phys_addr,
0433 void __iomem *virt_addr)
0434 {
0435 paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
0436 paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
0437 }
0438
0439 static inline u32 kvmppc_get_xics_latch(void)
0440 {
0441 u32 xirr;
0442
0443 xirr = get_paca()->kvm_hstate.saved_xirr;
0444 get_paca()->kvm_hstate.saved_xirr = 0;
0445 return xirr;
0446 }
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522 static inline void kvmppc_set_host_ipi(int cpu)
0523 {
0524
0525
0526
0527
0528
0529 smp_mb();
0530 paca_ptrs[cpu]->kvm_hstate.host_ipi = 1;
0531 }
0532
0533 static inline void kvmppc_clear_host_ipi(int cpu)
0534 {
0535 paca_ptrs[cpu]->kvm_hstate.host_ipi = 0;
0536
0537
0538
0539
0540
0541 smp_mb();
0542 }
0543
0544 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
0545 {
0546 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
0547 }
0548
0549 extern void kvm_hv_vm_activated(void);
0550 extern void kvm_hv_vm_deactivated(void);
0551 extern bool kvm_hv_mode_active(void);
0552
0553 extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu);
0554
0555 #else
0556 static inline void __init kvm_cma_reserve(void)
0557 {}
0558
0559 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
0560 {}
0561
0562 static inline void kvmppc_set_xive_tima(int cpu,
0563 unsigned long phys_addr,
0564 void __iomem *virt_addr)
0565 {}
0566
0567 static inline u32 kvmppc_get_xics_latch(void)
0568 {
0569 return 0;
0570 }
0571
0572 static inline void kvmppc_set_host_ipi(int cpu)
0573 {}
0574
0575 static inline void kvmppc_clear_host_ipi(int cpu)
0576 {}
0577
0578 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
0579 {
0580 kvm_vcpu_kick(vcpu);
0581 }
0582
0583 static inline bool kvm_hv_mode_active(void) { return false; }
0584
0585 #endif
0586
0587 #ifdef CONFIG_PPC_PSERIES
0588 static inline bool kvmhv_on_pseries(void)
0589 {
0590 return !cpu_has_feature(CPU_FTR_HVMODE);
0591 }
0592 #else
0593 static inline bool kvmhv_on_pseries(void)
0594 {
0595 return false;
0596 }
0597 #endif
0598
0599 #ifdef CONFIG_KVM_XICS
0600 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
0601 {
0602 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
0603 }
0604
0605 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
0606 struct kvm *kvm)
0607 {
0608 if (kvm && kvm_irq_bypass)
0609 return kvm->arch.pimap;
0610 return NULL;
0611 }
0612
0613 extern void kvmppc_alloc_host_rm_ops(void);
0614 extern void kvmppc_free_host_rm_ops(void);
0615 extern void kvmppc_free_pimap(struct kvm *kvm);
0616 extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
0617 extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
0618 extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
0619 extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req);
0620 extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
0621 extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
0622 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
0623 struct kvm_vcpu *vcpu, u32 cpu);
0624 extern void kvmppc_xics_ipi_action(void);
0625 extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
0626 unsigned long host_irq);
0627 extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
0628 unsigned long host_irq);
0629 extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
0630 struct kvmppc_irq_map *irq_map,
0631 struct kvmppc_passthru_irqmap *pimap,
0632 bool *again);
0633
0634 extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
0635 int level, bool line_status);
0636
0637 extern int h_ipi_redirect;
0638 #else
0639 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
0640 struct kvm *kvm)
0641 { return NULL; }
0642 static inline void kvmppc_alloc_host_rm_ops(void) {}
0643 static inline void kvmppc_free_host_rm_ops(void) {}
0644 static inline void kvmppc_free_pimap(struct kvm *kvm) {}
0645 static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
0646 { return 0; }
0647 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
0648 { return 0; }
0649 static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
0650 static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
0651 { return 0; }
0652 static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
0653 { return 0; }
0654 #endif
0655
0656 #ifdef CONFIG_KVM_XIVE
0657
0658
0659
0660
0661
0662
0663
0664
0665 extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
0666 u32 priority);
0667 extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
0668 u32 *priority);
0669 extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
0670 extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
0671
0672 extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
0673 struct kvm_vcpu *vcpu, u32 cpu);
0674 extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
0675 extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
0676 unsigned long host_irq);
0677 extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
0678 unsigned long host_irq);
0679 extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
0680 extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
0681
0682 extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
0683 int level, bool line_status);
0684 extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
0685 extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
0686 extern bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
0687
0688 static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
0689 {
0690 return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
0691 }
0692
0693 extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
0694 struct kvm_vcpu *vcpu, u32 cpu);
0695 extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
0696 extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
0697 union kvmppc_one_reg *val);
0698 extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
0699 union kvmppc_one_reg *val);
0700 extern bool kvmppc_xive_native_supported(void);
0701
0702 #else
0703 static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
0704 u32 priority) { return -1; }
0705 static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
0706 u32 *priority) { return -1; }
0707 static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
0708 static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
0709
0710 static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
0711 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
0712 static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
0713 static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
0714 struct irq_desc *host_desc) { return -ENODEV; }
0715 static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
0716 struct irq_desc *host_desc) { return -ENODEV; }
0717 static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
0718 static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
0719
0720 static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
0721 int level, bool line_status) { return -ENODEV; }
0722 static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
0723 static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
0724 static inline bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { return true; }
0725
0726 static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
0727 { return 0; }
0728 static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
0729 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
0730 static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
0731 static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
0732 union kvmppc_one_reg *val)
0733 { return 0; }
0734 static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
0735 union kvmppc_one_reg *val)
0736 { return -ENOENT; }
0737
0738 #endif
0739
0740 #if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
0741 static inline bool xics_on_xive(void)
0742 {
0743 return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
0744 }
0745 #else
0746 static inline bool xics_on_xive(void)
0747 {
0748 return false;
0749 }
0750 #endif
0751
0752
0753
0754
0755
0756 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
0757 unsigned long ioba, unsigned long tce);
0758 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
0759 unsigned long liobn, unsigned long ioba,
0760 unsigned long tce_list, unsigned long npages);
0761 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
0762 unsigned long liobn, unsigned long ioba,
0763 unsigned long tce_value, unsigned long npages);
0764 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
0765 unsigned int yield_count);
0766 long kvmppc_rm_h_random(struct kvm_vcpu *vcpu);
0767 void kvmhv_commence_exit(int trap);
0768 void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
0769 void kvmppc_subcore_enter_guest(void);
0770 void kvmppc_subcore_exit_guest(void);
0771 long kvmppc_realmode_hmi_handler(void);
0772 long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu);
0773 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
0774 long pte_index, unsigned long pteh, unsigned long ptel);
0775 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
0776 unsigned long pte_index, unsigned long avpn);
0777 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
0778 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
0779 unsigned long pte_index, unsigned long avpn);
0780 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
0781 unsigned long pte_index);
0782 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
0783 unsigned long pte_index);
0784 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
0785 unsigned long pte_index);
0786 long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
0787 unsigned long dest, unsigned long src);
0788 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
0789 unsigned long slb_v, unsigned int status, bool data);
0790 void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
0791
0792
0793
0794
0795
0796
0797
0798 union kvmppc_rm_state {
0799 unsigned long raw;
0800 struct {
0801 u32 in_host;
0802 u32 rm_action;
0803 };
0804 };
0805
0806 struct kvmppc_host_rm_core {
0807 union kvmppc_rm_state rm_state;
0808 void *rm_data;
0809 char pad[112];
0810 };
0811
0812 struct kvmppc_host_rm_ops {
0813 struct kvmppc_host_rm_core *rm_core;
0814 void (*vcpu_kick)(struct kvm_vcpu *vcpu);
0815 };
0816
0817 extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
0818
0819 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
0820 {
0821 #ifdef CONFIG_KVM_BOOKE_HV
0822 return mfspr(SPRN_GEPR);
0823 #elif defined(CONFIG_BOOKE)
0824 return vcpu->arch.epr;
0825 #else
0826 return 0;
0827 #endif
0828 }
0829
0830 static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
0831 {
0832 #ifdef CONFIG_KVM_BOOKE_HV
0833 mtspr(SPRN_GEPR, epr);
0834 #elif defined(CONFIG_BOOKE)
0835 vcpu->arch.epr = epr;
0836 #endif
0837 }
0838
0839 #ifdef CONFIG_KVM_MPIC
0840
0841 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
0842 int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
0843 u32 cpu);
0844 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
0845
0846 #else
0847
0848 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
0849 {
0850 }
0851
0852 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
0853 struct kvm_vcpu *vcpu, u32 cpu)
0854 {
0855 return -EINVAL;
0856 }
0857
0858 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
0859 struct kvm_vcpu *vcpu)
0860 {
0861 }
0862
0863 #endif
0864
0865 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
0866 struct kvm_config_tlb *cfg);
0867 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
0868 struct kvm_dirty_tlb *cfg);
0869
0870 long kvmppc_alloc_lpid(void);
0871 void kvmppc_free_lpid(long lpid);
0872 void kvmppc_init_lpid(unsigned long nr_lpids);
0873
0874 static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
0875 {
0876 struct page *page;
0877
0878
0879
0880
0881 if (!pfn_valid(pfn))
0882 return;
0883
0884
0885 page = pfn_to_page(pfn);
0886 if (!test_bit(PG_dcache_clean, &page->flags)) {
0887 flush_dcache_icache_page(page);
0888 set_bit(PG_dcache_clean, &page->flags);
0889 }
0890 }
0891
0892
0893
0894
0895
0896 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
0897 {
0898 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
0899
0900 return vcpu->arch.shared_big_endian;
0901 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
0902
0903 return false;
0904 #else
0905 return true;
0906 #endif
0907 }
0908
0909 #define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
0910 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
0911 { \
0912 return mfspr(bookehv_spr); \
0913 } \
0914
0915 #define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
0916 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
0917 { \
0918 mtspr(bookehv_spr, val); \
0919 } \
0920
0921 #define SHARED_WRAPPER_GET(reg, size) \
0922 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
0923 { \
0924 if (kvmppc_shared_big_endian(vcpu)) \
0925 return be##size##_to_cpu(vcpu->arch.shared->reg); \
0926 else \
0927 return le##size##_to_cpu(vcpu->arch.shared->reg); \
0928 } \
0929
0930 #define SHARED_WRAPPER_SET(reg, size) \
0931 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
0932 { \
0933 if (kvmppc_shared_big_endian(vcpu)) \
0934 vcpu->arch.shared->reg = cpu_to_be##size(val); \
0935 else \
0936 vcpu->arch.shared->reg = cpu_to_le##size(val); \
0937 } \
0938
0939 #define SHARED_WRAPPER(reg, size) \
0940 SHARED_WRAPPER_GET(reg, size) \
0941 SHARED_WRAPPER_SET(reg, size) \
0942
0943 #define SPRNG_WRAPPER(reg, bookehv_spr) \
0944 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
0945 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
0946
0947 #ifdef CONFIG_KVM_BOOKE_HV
0948
0949 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
0950 SPRNG_WRAPPER(reg, bookehv_spr) \
0951
0952 #else
0953
0954 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
0955 SHARED_WRAPPER(reg, size) \
0956
0957 #endif
0958
0959 SHARED_WRAPPER(critical, 64)
0960 SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
0961 SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
0962 SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
0963 SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
0964 SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
0965 SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
0966 SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
0967 SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
0968 SHARED_WRAPPER_GET(msr, 64)
0969 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
0970 {
0971 if (kvmppc_shared_big_endian(vcpu))
0972 vcpu->arch.shared->msr = cpu_to_be64(val);
0973 else
0974 vcpu->arch.shared->msr = cpu_to_le64(val);
0975 }
0976 SHARED_WRAPPER(dsisr, 32)
0977 SHARED_WRAPPER(int_pending, 32)
0978 SHARED_WRAPPER(sprg4, 64)
0979 SHARED_WRAPPER(sprg5, 64)
0980 SHARED_WRAPPER(sprg6, 64)
0981 SHARED_WRAPPER(sprg7, 64)
0982
0983 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
0984 {
0985 if (kvmppc_shared_big_endian(vcpu))
0986 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
0987 else
0988 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
0989 }
0990
0991 static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
0992 {
0993 if (kvmppc_shared_big_endian(vcpu))
0994 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
0995 else
0996 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
0997 }
0998
0999
1000
1001
1002
1003
1004 static inline void kvmppc_fix_ee_before_entry(void)
1005 {
1006 trace_hardirqs_on();
1007
1008 #ifdef CONFIG_PPC64
1009
1010
1011
1012
1013 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
1014
1015
1016 local_paca->irq_happened = 0;
1017 irq_soft_mask_set(IRQS_ENABLED);
1018 #endif
1019 }
1020
1021 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
1022 {
1023 ulong ea;
1024 ulong msr_64bit = 0;
1025
1026 ea = kvmppc_get_gpr(vcpu, rb);
1027 if (ra)
1028 ea += kvmppc_get_gpr(vcpu, ra);
1029
1030 #if defined(CONFIG_PPC_BOOK3E_64)
1031 msr_64bit = MSR_CM;
1032 #elif defined(CONFIG_PPC_BOOK3S_64)
1033 msr_64bit = MSR_SF;
1034 #endif
1035
1036 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
1037 ea = (uint32_t)ea;
1038
1039 return ea;
1040 }
1041
1042 extern void xics_wake_cpu(int cpu);
1043
1044 #endif