0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #ifndef ARCH_S390_KVM_S390_H
0013 #define ARCH_S390_KVM_S390_H
0014
0015 #include <linux/hrtimer.h>
0016 #include <linux/kvm.h>
0017 #include <linux/kvm_host.h>
0018 #include <linux/lockdep.h>
0019 #include <asm/facility.h>
0020 #include <asm/processor.h>
0021 #include <asm/sclp.h>
0022
0023
0024 #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE))
0025 #define TDB_FORMAT1 1
0026 #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
0027
0028 extern debug_info_t *kvm_s390_dbf;
0029 extern debug_info_t *kvm_s390_dbf_uv;
0030
0031 #define KVM_UV_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
0032 do { \
0033 debug_sprintf_event((d_kvm)->arch.dbf, d_loglevel, d_string "\n", \
0034 d_args); \
0035 debug_sprintf_event(kvm_s390_dbf_uv, d_loglevel, \
0036 "%d: " d_string "\n", (d_kvm)->userspace_pid, \
0037 d_args); \
0038 } while (0)
0039
0040 #define KVM_EVENT(d_loglevel, d_string, d_args...)\
0041 do { \
0042 debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \
0043 d_args); \
0044 } while (0)
0045
0046 #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
0047 do { \
0048 debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
0049 d_args); \
0050 } while (0)
0051
0052 #define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\
0053 do { \
0054 debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
0055 "%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \
0056 d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
0057 d_args); \
0058 } while (0)
0059
0060 static inline void kvm_s390_set_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
0061 {
0062 atomic_or(flags, &vcpu->arch.sie_block->cpuflags);
0063 }
0064
0065 static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
0066 {
0067 atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags);
0068 }
0069
0070 static inline bool kvm_s390_test_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
0071 {
0072 return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags;
0073 }
0074
0075 static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
0076 {
0077 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_STOPPED);
0078 }
0079
0080 static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
0081 {
0082 return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
0083 }
0084
0085 static inline int kvm_is_ucontrol(struct kvm *kvm)
0086 {
0087 #ifdef CONFIG_KVM_S390_UCONTROL
0088 if (kvm->arch.gmap)
0089 return 0;
0090 return 1;
0091 #else
0092 return 0;
0093 #endif
0094 }
0095
0096 #define GUEST_PREFIX_SHIFT 13
0097 static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
0098 {
0099 return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT;
0100 }
0101
0102 static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
0103 {
0104 VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id,
0105 prefix);
0106 vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
0107 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
0108 kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
0109 }
0110
0111 static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
0112 {
0113 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
0114 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
0115
0116 if (ar)
0117 *ar = base2;
0118
0119 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
0120 }
0121
0122 static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
0123 u64 *address1, u64 *address2,
0124 u8 *ar_b1, u8 *ar_b2)
0125 {
0126 u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
0127 u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
0128 u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
0129 u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
0130
0131 *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
0132 *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
0133
0134 if (ar_b1)
0135 *ar_b1 = base1;
0136 if (ar_b2)
0137 *ar_b2 = base2;
0138 }
0139
0140 static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
0141 {
0142 if (r1)
0143 *r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
0144 if (r2)
0145 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
0146 }
0147
0148 static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, u8 *ar)
0149 {
0150 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
0151 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
0152 ((vcpu->arch.sie_block->ipb & 0xff00) << 4);
0153
0154 if (disp2 & 0x80000)
0155 disp2+=0xfff00000;
0156
0157 if (ar)
0158 *ar = base2;
0159
0160 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
0161 }
0162
0163 static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, u8 *ar)
0164 {
0165 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
0166 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
0167
0168 if (ar)
0169 *ar = base2;
0170
0171 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
0172 }
0173
0174
0175 static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
0176 {
0177 vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44);
0178 vcpu->arch.sie_block->gpsw.mask |= cc << 44;
0179 }
0180
0181
0182 static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
0183 {
0184 return __test_facility(nr, kvm->arch.model.fac_mask) &&
0185 __test_facility(nr, kvm->arch.model.fac_list);
0186 }
0187
0188 static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
0189 {
0190 unsigned char *ptr;
0191
0192 if (nr >= MAX_FACILITY_BIT)
0193 return -EINVAL;
0194 ptr = (unsigned char *) fac_list + (nr >> 3);
0195 *ptr |= (0x80UL >> (nr & 7));
0196 return 0;
0197 }
0198
0199 static inline int test_kvm_cpu_feat(struct kvm *kvm, unsigned long nr)
0200 {
0201 WARN_ON_ONCE(nr >= KVM_S390_VM_CPU_FEAT_NR_BITS);
0202 return test_bit_inv(nr, kvm->arch.cpu_feat);
0203 }
0204
0205
0206 static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
0207 {
0208 return kvm->arch.user_cpu_state_ctrl != 0;
0209 }
0210
0211 static inline void kvm_s390_set_user_cpu_state_ctrl(struct kvm *kvm)
0212 {
0213 if (kvm->arch.user_cpu_state_ctrl)
0214 return;
0215
0216 VM_EVENT(kvm, 3, "%s", "ENABLE: Userspace CPU state control");
0217 kvm->arch.user_cpu_state_ctrl = 1;
0218 }
0219
0220
0221 static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots)
0222 {
0223 struct rb_node *node;
0224 struct kvm_memory_slot *ms;
0225
0226 if (WARN_ON(kvm_memslots_empty(slots)))
0227 return 0;
0228
0229 node = rb_last(&slots->gfn_tree);
0230 ms = container_of(node, struct kvm_memory_slot, gfn_node[slots->node_idx]);
0231 return ms->base_gfn + ms->npages;
0232 }
0233
0234 static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
0235 {
0236 u32 gd = (u32)(u64)kvm->arch.gisa_int.origin;
0237
0238 if (gd && sclp.has_gisaf)
0239 gd |= GISA_FORMAT1;
0240 return gd;
0241 }
0242
0243
0244 int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
0245 int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
0246 int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
0247 int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
0248 int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
0249 u16 *rrc);
0250 int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
0251 unsigned long tweak, u16 *rc, u16 *rrc);
0252 int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state);
0253 int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc);
0254 int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user,
0255 u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc);
0256 int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user,
0257 u16 *rc, u16 *rrc);
0258
0259 static inline u64 kvm_s390_pv_get_handle(struct kvm *kvm)
0260 {
0261 return kvm->arch.pv.handle;
0262 }
0263
0264 static inline u64 kvm_s390_pv_cpu_get_handle(struct kvm_vcpu *vcpu)
0265 {
0266 return vcpu->arch.pv.handle;
0267 }
0268
0269 static inline bool kvm_s390_pv_is_protected(struct kvm *kvm)
0270 {
0271 lockdep_assert_held(&kvm->lock);
0272 return !!kvm_s390_pv_get_handle(kvm);
0273 }
0274
0275 static inline bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu)
0276 {
0277 lockdep_assert_held(&vcpu->mutex);
0278 return !!kvm_s390_pv_cpu_get_handle(vcpu);
0279 }
0280
0281
0282 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
0283 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
0284 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
0285 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
0286 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
0287 void kvm_s390_clear_float_irqs(struct kvm *kvm);
0288 int __must_check kvm_s390_inject_vm(struct kvm *kvm,
0289 struct kvm_s390_interrupt *s390int);
0290 int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
0291 struct kvm_s390_irq *irq);
0292 static inline int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
0293 struct kvm_s390_pgm_info *pgm_info)
0294 {
0295 struct kvm_s390_irq irq = {
0296 .type = KVM_S390_PROGRAM_INT,
0297 .u.pgm = *pgm_info,
0298 };
0299
0300 return kvm_s390_inject_vcpu(vcpu, &irq);
0301 }
0302 static inline int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
0303 {
0304 struct kvm_s390_irq irq = {
0305 .type = KVM_S390_PROGRAM_INT,
0306 .u.pgm.code = code,
0307 };
0308
0309 return kvm_s390_inject_vcpu(vcpu, &irq);
0310 }
0311 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
0312 u64 isc_mask, u32 schid);
0313 int kvm_s390_reinject_io_int(struct kvm *kvm,
0314 struct kvm_s390_interrupt_info *inti);
0315 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
0316
0317
0318 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu);
0319 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
0320 static inline void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilen)
0321 {
0322 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
0323
0324 sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilen);
0325 }
0326 static inline void kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen)
0327 {
0328 kvm_s390_rewind_psw(vcpu, -ilen);
0329 }
0330 static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
0331 {
0332
0333 vcpu->arch.sie_block->icptstatus &= ~0x02;
0334 kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
0335 }
0336
0337 int handle_sthyi(struct kvm_vcpu *vcpu);
0338
0339
0340 int is_valid_psw(psw_t *psw);
0341 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
0342 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
0343 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu);
0344 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
0345 int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
0346 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
0347 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
0348 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
0349 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
0350 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
0351 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu);
0352
0353
0354 int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu);
0355 void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu);
0356 void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
0357 unsigned long end);
0358 void kvm_s390_vsie_init(struct kvm *kvm);
0359 void kvm_s390_vsie_destroy(struct kvm *kvm);
0360
0361
0362 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
0363 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
0364
0365
0366 void kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
0367 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
0368 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
0369 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
0370 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
0371 int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
0372 int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
0373 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
0374 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
0375 bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);
0376 void exit_sie(struct kvm_vcpu *vcpu);
0377 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
0378 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
0379 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
0380 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
0381 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);
0382 int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc);
0383
0384
0385 int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
0386
0387 static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
0388 {
0389 unsigned long i;
0390 struct kvm_vcpu *vcpu;
0391
0392 WARN_ON(!mutex_is_locked(&kvm->lock));
0393 kvm_for_each_vcpu(i, vcpu, kvm)
0394 kvm_s390_vcpu_block(vcpu);
0395 }
0396
0397 static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
0398 {
0399 unsigned long i;
0400 struct kvm_vcpu *vcpu;
0401
0402 kvm_for_each_vcpu(i, vcpu, kvm)
0403 kvm_s390_vcpu_unblock(vcpu);
0404 }
0405
0406 static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm)
0407 {
0408 u64 rc;
0409
0410 preempt_disable();
0411 rc = get_tod_clock_fast() + kvm->arch.epoch;
0412 preempt_enable();
0413 return rc;
0414 }
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442 static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
0443 {
0444 if (rc <= 0)
0445 return rc;
0446 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
0447 }
0448
0449 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
0450 struct kvm_s390_irq *s390irq);
0451
0452
0453 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop);
0454 int psw_extint_disabled(struct kvm_vcpu *vcpu);
0455 void kvm_s390_destroy_adapters(struct kvm *kvm);
0456 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
0457 extern struct kvm_device_ops kvm_flic_ops;
0458 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
0459 int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu);
0460 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
0461 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
0462 void __user *buf, int len);
0463 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
0464 __u8 __user *buf, int len);
0465 void kvm_s390_gisa_init(struct kvm *kvm);
0466 void kvm_s390_gisa_clear(struct kvm *kvm);
0467 void kvm_s390_gisa_destroy(struct kvm *kvm);
0468 void kvm_s390_gisa_disable(struct kvm *kvm);
0469 void kvm_s390_gisa_enable(struct kvm *kvm);
0470 int kvm_s390_gib_init(u8 nisc);
0471 void kvm_s390_gib_destroy(void);
0472
0473
0474 void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
0475 void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu);
0476 void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu);
0477 int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
0478 struct kvm_guest_debug *dbg);
0479 void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
0480 void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
0481 int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu);
0482 int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
0483
0484
0485 static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
0486 {
0487 struct bsca_block *sca = kvm->arch.sca;
0488
0489 return &sca->ipte_control;
0490 }
0491 static inline int kvm_s390_use_sca_entries(void)
0492 {
0493
0494
0495
0496
0497
0498 return sclp.has_sigpif;
0499 }
0500 void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
0501 struct mcck_volatile_info *mcck_info);
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm);
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524 void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm);
0525
0526
0527
0528
0529
0530
0531 extern unsigned int diag9c_forwarding_hz;
0532
0533 #endif