0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/kvm_host.h>
0010 #include <linux/errno.h>
0011 #include "kvm-s390.h"
0012 #include "gaccess.h"
0013
0014
0015
0016
0017
0018
0019 static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len)
0020 {
0021 u64 estop;
0022
0023 if (len > 0)
0024 len--;
0025 else
0026 len = 0;
0027
0028 estop = estart + len;
0029
0030
0031 if ((*start == 0) && (*stop == 0)) {
0032 *start = estart;
0033 *stop = estop;
0034 } else if (*start <= *stop) {
0035
0036 if (estart < *start)
0037 *start = estart;
0038 if (estop > *stop)
0039 *stop = estop;
0040 } else {
0041
0042 if (estart <= *stop) {
0043 if (estop > *stop)
0044 *stop = estop;
0045 } else if (estop > *start) {
0046 if (estart < *start)
0047 *start = estart;
0048 }
0049
0050 else if ((estop - *stop) < (*start - estart))
0051 *stop = estop;
0052 else
0053 *start = estart;
0054 }
0055 }
0056
0057 #define MAX_INST_SIZE 6
0058
0059 static void enable_all_hw_bp(struct kvm_vcpu *vcpu)
0060 {
0061 unsigned long start, len;
0062 u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
0063 u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
0064 u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
0065 int i;
0066
0067 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 ||
0068 vcpu->arch.guestdbg.hw_bp_info == NULL)
0069 return;
0070
0071
0072
0073
0074
0075 if (!(*cr9 & PER_EVENT_BRANCH))
0076 *cr9 |= PER_CONTROL_BRANCH_ADDRESS;
0077 *cr9 |= PER_EVENT_IFETCH | PER_EVENT_BRANCH;
0078
0079 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
0080 start = vcpu->arch.guestdbg.hw_bp_info[i].addr;
0081 len = vcpu->arch.guestdbg.hw_bp_info[i].len;
0082
0083
0084
0085
0086
0087 if (start < MAX_INST_SIZE) {
0088 len += start;
0089 start = 0;
0090 } else {
0091 start -= MAX_INST_SIZE;
0092 len += MAX_INST_SIZE;
0093 }
0094
0095 extend_address_range(cr10, cr11, start, len);
0096 }
0097 }
0098
0099 static void enable_all_hw_wp(struct kvm_vcpu *vcpu)
0100 {
0101 unsigned long start, len;
0102 u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
0103 u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
0104 u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
0105 int i;
0106
0107 if (vcpu->arch.guestdbg.nr_hw_wp <= 0 ||
0108 vcpu->arch.guestdbg.hw_wp_info == NULL)
0109 return;
0110
0111
0112
0113 if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) {
0114 *cr9 &= ~PER_CONTROL_ALTERATION;
0115 *cr10 = 0;
0116 *cr11 = -1UL;
0117 } else {
0118 *cr9 &= ~PER_CONTROL_ALTERATION;
0119 *cr9 |= PER_EVENT_STORE;
0120
0121 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
0122 start = vcpu->arch.guestdbg.hw_wp_info[i].addr;
0123 len = vcpu->arch.guestdbg.hw_wp_info[i].len;
0124
0125 extend_address_range(cr10, cr11, start, len);
0126 }
0127 }
0128 }
0129
0130 void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu)
0131 {
0132 vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0];
0133 vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9];
0134 vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10];
0135 vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11];
0136 }
0137
0138 void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu)
0139 {
0140 vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0;
0141 vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9;
0142 vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10;
0143 vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11;
0144 }
0145
0146 void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu)
0147 {
0148
0149
0150
0151
0152
0153
0154 if (guestdbg_sstep_enabled(vcpu)) {
0155
0156 vcpu->arch.sie_block->gcr[0] &= ~CR0_CLOCK_COMPARATOR_SUBMASK;
0157 vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH;
0158 vcpu->arch.sie_block->gcr[10] = 0;
0159 vcpu->arch.sie_block->gcr[11] = -1UL;
0160 }
0161
0162 if (guestdbg_hw_bp_enabled(vcpu)) {
0163 enable_all_hw_bp(vcpu);
0164 enable_all_hw_wp(vcpu);
0165 }
0166
0167
0168 if (vcpu->arch.sie_block->gcr[9] & PER_EVENT_NULLIFICATION)
0169 vcpu->arch.sie_block->gcr[9] &= ~PER_EVENT_NULLIFICATION;
0170 }
0171
0172 #define MAX_WP_SIZE 100
0173
0174 static int __import_wp_info(struct kvm_vcpu *vcpu,
0175 struct kvm_hw_breakpoint *bp_data,
0176 struct kvm_hw_wp_info_arch *wp_info)
0177 {
0178 int ret = 0;
0179 wp_info->len = bp_data->len;
0180 wp_info->addr = bp_data->addr;
0181 wp_info->phys_addr = bp_data->phys_addr;
0182 wp_info->old_data = NULL;
0183
0184 if (wp_info->len < 0 || wp_info->len > MAX_WP_SIZE)
0185 return -EINVAL;
0186
0187 wp_info->old_data = kmalloc(bp_data->len, GFP_KERNEL_ACCOUNT);
0188 if (!wp_info->old_data)
0189 return -ENOMEM;
0190
0191 ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data,
0192 wp_info->len);
0193 if (ret) {
0194 kfree(wp_info->old_data);
0195 wp_info->old_data = NULL;
0196 }
0197
0198 return ret;
0199 }
0200
0201 #define MAX_BP_COUNT 50
0202
0203 int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
0204 struct kvm_guest_debug *dbg)
0205 {
0206 int ret = 0, nr_wp = 0, nr_bp = 0, i;
0207 struct kvm_hw_breakpoint *bp_data = NULL;
0208 struct kvm_hw_wp_info_arch *wp_info = NULL;
0209 struct kvm_hw_bp_info_arch *bp_info = NULL;
0210
0211 if (dbg->arch.nr_hw_bp <= 0 || !dbg->arch.hw_bp)
0212 return 0;
0213 else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT)
0214 return -EINVAL;
0215
0216 bp_data = memdup_user(dbg->arch.hw_bp,
0217 sizeof(*bp_data) * dbg->arch.nr_hw_bp);
0218 if (IS_ERR(bp_data))
0219 return PTR_ERR(bp_data);
0220
0221 for (i = 0; i < dbg->arch.nr_hw_bp; i++) {
0222 switch (bp_data[i].type) {
0223 case KVM_HW_WP_WRITE:
0224 nr_wp++;
0225 break;
0226 case KVM_HW_BP:
0227 nr_bp++;
0228 break;
0229 default:
0230 break;
0231 }
0232 }
0233
0234 if (nr_wp > 0) {
0235 wp_info = kmalloc_array(nr_wp,
0236 sizeof(*wp_info),
0237 GFP_KERNEL_ACCOUNT);
0238 if (!wp_info) {
0239 ret = -ENOMEM;
0240 goto error;
0241 }
0242 }
0243 if (nr_bp > 0) {
0244 bp_info = kmalloc_array(nr_bp,
0245 sizeof(*bp_info),
0246 GFP_KERNEL_ACCOUNT);
0247 if (!bp_info) {
0248 ret = -ENOMEM;
0249 goto error;
0250 }
0251 }
0252
0253 for (nr_wp = 0, nr_bp = 0, i = 0; i < dbg->arch.nr_hw_bp; i++) {
0254 switch (bp_data[i].type) {
0255 case KVM_HW_WP_WRITE:
0256 ret = __import_wp_info(vcpu, &bp_data[i],
0257 &wp_info[nr_wp]);
0258 if (ret)
0259 goto error;
0260 nr_wp++;
0261 break;
0262 case KVM_HW_BP:
0263 bp_info[nr_bp].len = bp_data[i].len;
0264 bp_info[nr_bp].addr = bp_data[i].addr;
0265 nr_bp++;
0266 break;
0267 }
0268 }
0269
0270 vcpu->arch.guestdbg.nr_hw_bp = nr_bp;
0271 vcpu->arch.guestdbg.hw_bp_info = bp_info;
0272 vcpu->arch.guestdbg.nr_hw_wp = nr_wp;
0273 vcpu->arch.guestdbg.hw_wp_info = wp_info;
0274 return 0;
0275 error:
0276 kfree(bp_data);
0277 kfree(wp_info);
0278 kfree(bp_info);
0279 return ret;
0280 }
0281
0282 void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu)
0283 {
0284 int i;
0285 struct kvm_hw_wp_info_arch *hw_wp_info = NULL;
0286
0287 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
0288 hw_wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
0289 kfree(hw_wp_info->old_data);
0290 hw_wp_info->old_data = NULL;
0291 }
0292 kfree(vcpu->arch.guestdbg.hw_wp_info);
0293 vcpu->arch.guestdbg.hw_wp_info = NULL;
0294
0295 kfree(vcpu->arch.guestdbg.hw_bp_info);
0296 vcpu->arch.guestdbg.hw_bp_info = NULL;
0297
0298 vcpu->arch.guestdbg.nr_hw_wp = 0;
0299 vcpu->arch.guestdbg.nr_hw_bp = 0;
0300 }
0301
0302 static inline int in_addr_range(u64 addr, u64 a, u64 b)
0303 {
0304 if (a <= b)
0305 return (addr >= a) && (addr <= b);
0306 else
0307
0308 return (addr >= a) || (addr <= b);
0309 }
0310
0311 #define end_of_range(bp_info) (bp_info->addr + bp_info->len - 1)
0312
0313 static struct kvm_hw_bp_info_arch *find_hw_bp(struct kvm_vcpu *vcpu,
0314 unsigned long addr)
0315 {
0316 struct kvm_hw_bp_info_arch *bp_info = vcpu->arch.guestdbg.hw_bp_info;
0317 int i;
0318
0319 if (vcpu->arch.guestdbg.nr_hw_bp == 0)
0320 return NULL;
0321
0322 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
0323
0324 if (addr == bp_info->addr)
0325 goto found;
0326 if (bp_info->len > 0 &&
0327 in_addr_range(addr, bp_info->addr, end_of_range(bp_info)))
0328 goto found;
0329
0330 bp_info++;
0331 }
0332
0333 return NULL;
0334 found:
0335 return bp_info;
0336 }
0337
0338 static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
0339 {
0340 int i;
0341 struct kvm_hw_wp_info_arch *wp_info = NULL;
0342 void *temp = NULL;
0343
0344 if (vcpu->arch.guestdbg.nr_hw_wp == 0)
0345 return NULL;
0346
0347 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
0348 wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
0349 if (!wp_info || !wp_info->old_data || wp_info->len <= 0)
0350 continue;
0351
0352 temp = kmalloc(wp_info->len, GFP_KERNEL_ACCOUNT);
0353 if (!temp)
0354 continue;
0355
0356
0357 if (!read_guest_abs(vcpu, wp_info->phys_addr, temp,
0358 wp_info->len)) {
0359 if (memcmp(temp, wp_info->old_data, wp_info->len)) {
0360 kfree(temp);
0361 return wp_info;
0362 }
0363 }
0364 kfree(temp);
0365 temp = NULL;
0366 }
0367
0368 return NULL;
0369 }
0370
0371 void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
0372 {
0373 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
0374 vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
0375 }
0376
0377 #define PER_CODE_MASK (PER_EVENT_MASK >> 24)
0378 #define PER_CODE_BRANCH (PER_EVENT_BRANCH >> 24)
0379 #define PER_CODE_IFETCH (PER_EVENT_IFETCH >> 24)
0380 #define PER_CODE_STORE (PER_EVENT_STORE >> 24)
0381 #define PER_CODE_STORE_REAL (PER_EVENT_STORE_REAL >> 24)
0382
0383 #define per_bp_event(code) \
0384 (code & (PER_CODE_IFETCH | PER_CODE_BRANCH))
0385 #define per_write_wp_event(code) \
0386 (code & (PER_CODE_STORE | PER_CODE_STORE_REAL))
0387
0388 static int debug_exit_required(struct kvm_vcpu *vcpu, u8 perc,
0389 unsigned long peraddr)
0390 {
0391 struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
0392 struct kvm_hw_wp_info_arch *wp_info = NULL;
0393 struct kvm_hw_bp_info_arch *bp_info = NULL;
0394 unsigned long addr = vcpu->arch.sie_block->gpsw.addr;
0395
0396 if (guestdbg_hw_bp_enabled(vcpu)) {
0397 if (per_write_wp_event(perc) &&
0398 vcpu->arch.guestdbg.nr_hw_wp > 0) {
0399 wp_info = any_wp_changed(vcpu);
0400 if (wp_info) {
0401 debug_exit->addr = wp_info->addr;
0402 debug_exit->type = KVM_HW_WP_WRITE;
0403 goto exit_required;
0404 }
0405 }
0406 if (per_bp_event(perc) &&
0407 vcpu->arch.guestdbg.nr_hw_bp > 0) {
0408 bp_info = find_hw_bp(vcpu, addr);
0409
0410 if (bp_info && (addr != peraddr)) {
0411 debug_exit->addr = addr;
0412 debug_exit->type = KVM_HW_BP;
0413 vcpu->arch.guestdbg.last_bp = addr;
0414 goto exit_required;
0415 }
0416
0417 bp_info = find_hw_bp(vcpu, peraddr);
0418 if (bp_info && vcpu->arch.guestdbg.last_bp != peraddr) {
0419 debug_exit->addr = peraddr;
0420 debug_exit->type = KVM_HW_BP;
0421 goto exit_required;
0422 }
0423 }
0424 }
0425 if (guestdbg_sstep_enabled(vcpu) && per_bp_event(perc)) {
0426 debug_exit->addr = addr;
0427 debug_exit->type = KVM_SINGLESTEP;
0428 goto exit_required;
0429 }
0430
0431 return 0;
0432 exit_required:
0433 return 1;
0434 }
0435
0436 static int per_fetched_addr(struct kvm_vcpu *vcpu, unsigned long *addr)
0437 {
0438 u8 exec_ilen = 0;
0439 u16 opcode[3];
0440 int rc;
0441
0442 if (vcpu->arch.sie_block->icptcode == ICPT_PROGI) {
0443
0444 *addr = vcpu->arch.sie_block->peraddr;
0445
0446
0447
0448
0449
0450 rc = read_guest_instr(vcpu, *addr, &opcode, 2);
0451 if (rc)
0452 return rc;
0453 if (opcode[0] >> 8 == 0x44)
0454 exec_ilen = 4;
0455 if ((opcode[0] & 0xff0f) == 0xc600)
0456 exec_ilen = 6;
0457 } else {
0458
0459 *addr = __rewind_psw(vcpu->arch.sie_block->gpsw,
0460 kvm_s390_get_ilen(vcpu));
0461 if (vcpu->arch.sie_block->icptstatus & 0x01) {
0462 exec_ilen = (vcpu->arch.sie_block->icptstatus & 0x60) >> 4;
0463 if (!exec_ilen)
0464 exec_ilen = 4;
0465 }
0466 }
0467
0468 if (exec_ilen) {
0469
0470 rc = read_guest_instr(vcpu, *addr, &opcode, exec_ilen);
0471 if (rc)
0472 return rc;
0473 if (exec_ilen == 6) {
0474
0475 s32 rl = *((s32 *) (opcode + 1));
0476
0477
0478 *addr += (u64)(s64) rl * 2;
0479 } else {
0480
0481 u32 base = (opcode[1] & 0xf000) >> 12;
0482 u32 disp = opcode[1] & 0x0fff;
0483 u32 index = opcode[0] & 0x000f;
0484
0485 *addr = base ? vcpu->run->s.regs.gprs[base] : 0;
0486 *addr += index ? vcpu->run->s.regs.gprs[index] : 0;
0487 *addr += disp;
0488 }
0489 *addr = kvm_s390_logical_to_effective(vcpu, *addr);
0490 }
0491 return 0;
0492 }
0493
0494 #define guest_per_enabled(vcpu) \
0495 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
0496
0497 int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
0498 {
0499 const u64 cr10 = vcpu->arch.sie_block->gcr[10];
0500 const u64 cr11 = vcpu->arch.sie_block->gcr[11];
0501 const u8 ilen = kvm_s390_get_ilen(vcpu);
0502 struct kvm_s390_pgm_info pgm_info = {
0503 .code = PGM_PER,
0504 .per_code = PER_CODE_IFETCH,
0505 .per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen),
0506 };
0507 unsigned long fetched_addr;
0508 int rc;
0509
0510
0511
0512
0513
0514
0515 if (!guestdbg_enabled(vcpu))
0516 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
0517
0518 if (debug_exit_required(vcpu, pgm_info.per_code, pgm_info.per_address))
0519 vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
0520
0521 if (!guest_per_enabled(vcpu) ||
0522 !(vcpu->arch.sie_block->gcr[9] & PER_EVENT_IFETCH))
0523 return 0;
0524
0525 rc = per_fetched_addr(vcpu, &fetched_addr);
0526 if (rc < 0)
0527 return rc;
0528 if (rc)
0529
0530 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
0531
0532 if (in_addr_range(fetched_addr, cr10, cr11))
0533 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
0534 return 0;
0535 }
0536
0537 static int filter_guest_per_event(struct kvm_vcpu *vcpu)
0538 {
0539 const u8 perc = vcpu->arch.sie_block->perc;
0540 u64 addr = vcpu->arch.sie_block->gpsw.addr;
0541 u64 cr9 = vcpu->arch.sie_block->gcr[9];
0542 u64 cr10 = vcpu->arch.sie_block->gcr[10];
0543 u64 cr11 = vcpu->arch.sie_block->gcr[11];
0544
0545 u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK;
0546 unsigned long fetched_addr;
0547 int rc;
0548
0549 if (!guest_per_enabled(vcpu))
0550 guest_perc = 0;
0551
0552
0553 if (guest_perc & PER_CODE_BRANCH &&
0554 cr9 & PER_CONTROL_BRANCH_ADDRESS &&
0555 !in_addr_range(addr, cr10, cr11))
0556 guest_perc &= ~PER_CODE_BRANCH;
0557
0558
0559 if (guest_perc & PER_CODE_IFETCH) {
0560 rc = per_fetched_addr(vcpu, &fetched_addr);
0561 if (rc < 0)
0562 return rc;
0563
0564
0565
0566
0567 if (rc || !in_addr_range(fetched_addr, cr10, cr11))
0568 guest_perc &= ~PER_CODE_IFETCH;
0569 }
0570
0571
0572
0573
0574 vcpu->arch.sie_block->perc = guest_perc;
0575
0576 if (!guest_perc)
0577 vcpu->arch.sie_block->iprcc &= ~PGM_PER;
0578 return 0;
0579 }
0580
0581 #define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
0582 #define hssec(vcpu) (vcpu->arch.sie_block->gcr[13] & _ASCE_SPACE_SWITCH)
0583 #define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
0584 #define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
0585
0586 int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
0587 {
0588 int rc, new_as;
0589
0590 if (debug_exit_required(vcpu, vcpu->arch.sie_block->perc,
0591 vcpu->arch.sie_block->peraddr))
0592 vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
0593
0594 rc = filter_guest_per_event(vcpu);
0595 if (rc)
0596 return rc;
0597
0598
0599
0600
0601
0602
0603
0604 if (vcpu->arch.sie_block->iprcc == PGM_SPACE_SWITCH) {
0605 vcpu->arch.sie_block->iprcc = 0;
0606 new_as = psw_bits(vcpu->arch.sie_block->gpsw).as;
0607
0608
0609
0610
0611
0612
0613 if (((new_as == PSW_BITS_AS_HOME) ^ old_as_is_home(vcpu)) &&
0614 (pssec(vcpu) || hssec(vcpu)))
0615 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
0616
0617
0618
0619
0620
0621 if (new_as == PSW_BITS_AS_PRIMARY && !old_as_is_home(vcpu) &&
0622 (pssec(vcpu) || old_ssec(vcpu)))
0623 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
0624 }
0625 return 0;
0626 }