0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 #include <linux/kvm_host.h>
0031 #include <linux/kvm.h>
0032 #include <linux/mm.h>
0033 #include <linux/highmem.h>
0034 #include <linux/smp.h>
0035 #include <linux/hrtimer.h>
0036 #include <linux/io.h>
0037 #include <linux/slab.h>
0038 #include <linux/export.h>
0039 #include <linux/nospec.h>
0040 #include <asm/processor.h>
0041 #include <asm/page.h>
0042 #include <asm/current.h>
0043 #include <trace/events/kvm.h>
0044
0045 #include "ioapic.h"
0046 #include "lapic.h"
0047 #include "irq.h"
0048
0049 static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
0050 bool line_status);
0051
0052 static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu,
0053 struct kvm_ioapic *ioapic,
0054 int trigger_mode,
0055 int pin);
0056
0057 static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic)
0058 {
0059 unsigned long result = 0;
0060
0061 switch (ioapic->ioregsel) {
0062 case IOAPIC_REG_VERSION:
0063 result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
0064 | (IOAPIC_VERSION_ID & 0xff));
0065 break;
0066
0067 case IOAPIC_REG_APIC_ID:
0068 case IOAPIC_REG_ARB_ID:
0069 result = ((ioapic->id & 0xf) << 24);
0070 break;
0071
0072 default:
0073 {
0074 u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
0075 u64 redir_content = ~0ULL;
0076
0077 if (redir_index < IOAPIC_NUM_PINS) {
0078 u32 index = array_index_nospec(
0079 redir_index, IOAPIC_NUM_PINS);
0080
0081 redir_content = ioapic->redirtbl[index].bits;
0082 }
0083
0084 result = (ioapic->ioregsel & 0x1) ?
0085 (redir_content >> 32) & 0xffffffff :
0086 redir_content & 0xffffffff;
0087 break;
0088 }
0089 }
0090
0091 return result;
0092 }
0093
0094 static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
0095 {
0096 ioapic->rtc_status.pending_eoi = 0;
0097 bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_IDS);
0098 }
0099
0100 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
0101
0102 static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
0103 {
0104 if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
0105 kvm_rtc_eoi_tracking_restore_all(ioapic);
0106 }
0107
0108 static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
0109 {
0110 bool new_val, old_val;
0111 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
0112 struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
0113 union kvm_ioapic_redirect_entry *e;
0114
0115 e = &ioapic->redirtbl[RTC_GSI];
0116 if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
0117 e->fields.dest_id,
0118 kvm_lapic_irq_dest_mode(!!e->fields.dest_mode)))
0119 return;
0120
0121 new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
0122 old_val = test_bit(vcpu->vcpu_id, dest_map->map);
0123
0124 if (new_val == old_val)
0125 return;
0126
0127 if (new_val) {
0128 __set_bit(vcpu->vcpu_id, dest_map->map);
0129 dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
0130 ioapic->rtc_status.pending_eoi++;
0131 } else {
0132 __clear_bit(vcpu->vcpu_id, dest_map->map);
0133 ioapic->rtc_status.pending_eoi--;
0134 rtc_status_pending_eoi_check_valid(ioapic);
0135 }
0136 }
0137
0138 void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
0139 {
0140 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
0141
0142 spin_lock(&ioapic->lock);
0143 __rtc_irq_eoi_tracking_restore_one(vcpu);
0144 spin_unlock(&ioapic->lock);
0145 }
0146
0147 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
0148 {
0149 struct kvm_vcpu *vcpu;
0150 unsigned long i;
0151
0152 if (RTC_GSI >= IOAPIC_NUM_PINS)
0153 return;
0154
0155 rtc_irq_eoi_tracking_reset(ioapic);
0156 kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
0157 __rtc_irq_eoi_tracking_restore_one(vcpu);
0158 }
0159
0160 static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu,
0161 int vector)
0162 {
0163 struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
0164
0165
0166 if (test_bit(vcpu->vcpu_id, dest_map->map) &&
0167 (vector == dest_map->vectors[vcpu->vcpu_id]) &&
0168 (test_and_clear_bit(vcpu->vcpu_id,
0169 ioapic->rtc_status.dest_map.map))) {
0170 --ioapic->rtc_status.pending_eoi;
0171 rtc_status_pending_eoi_check_valid(ioapic);
0172 }
0173 }
0174
0175 static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
0176 {
0177 if (ioapic->rtc_status.pending_eoi > 0)
0178 return true;
0179
0180 return false;
0181 }
0182
0183 static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq)
0184 {
0185 unsigned long i;
0186 struct kvm_vcpu *vcpu;
0187 union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
0188
0189 kvm_for_each_vcpu(i, vcpu, ioapic->kvm) {
0190 if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
0191 entry->fields.dest_id,
0192 entry->fields.dest_mode) ||
0193 kvm_apic_pending_eoi(vcpu, entry->fields.vector))
0194 continue;
0195
0196
0197
0198
0199
0200 rtc_irq_eoi(ioapic, vcpu, entry->fields.vector);
0201 break;
0202 }
0203 }
0204
0205 static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
0206 int irq_level, bool line_status)
0207 {
0208 union kvm_ioapic_redirect_entry entry;
0209 u32 mask = 1 << irq;
0210 u32 old_irr;
0211 int edge, ret;
0212
0213 entry = ioapic->redirtbl[irq];
0214 edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
0215
0216 if (!irq_level) {
0217 ioapic->irr &= ~mask;
0218 ret = 1;
0219 goto out;
0220 }
0221
0222
0223
0224
0225
0226
0227
0228 if (edge && kvm_apicv_activated(ioapic->kvm))
0229 ioapic_lazy_update_eoi(ioapic, irq);
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242 if (irq == RTC_GSI && line_status &&
0243 rtc_irq_check_coalesced(ioapic)) {
0244 ret = 0;
0245 goto out;
0246 }
0247
0248 old_irr = ioapic->irr;
0249 ioapic->irr |= mask;
0250 if (edge) {
0251 ioapic->irr_delivered &= ~mask;
0252 if (old_irr == ioapic->irr) {
0253 ret = 0;
0254 goto out;
0255 }
0256 }
0257
0258 ret = ioapic_service(ioapic, irq, line_status);
0259
0260 out:
0261 trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
0262 return ret;
0263 }
0264
0265 static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr)
0266 {
0267 u32 idx;
0268
0269 rtc_irq_eoi_tracking_reset(ioapic);
0270 for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS)
0271 ioapic_set_irq(ioapic, idx, 1, true);
0272
0273 kvm_rtc_eoi_tracking_restore_all(ioapic);
0274 }
0275
0276
0277 void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
0278 {
0279 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
0280 struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
0281 union kvm_ioapic_redirect_entry *e;
0282 int index;
0283
0284 spin_lock(&ioapic->lock);
0285
0286
0287 if (test_bit(vcpu->vcpu_id, dest_map->map))
0288 __set_bit(dest_map->vectors[vcpu->vcpu_id],
0289 ioapic_handled_vectors);
0290
0291 for (index = 0; index < IOAPIC_NUM_PINS; index++) {
0292 e = &ioapic->redirtbl[index];
0293 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
0294 kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
0295 index == RTC_GSI) {
0296 u16 dm = kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
0297
0298 if (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
0299 e->fields.dest_id, dm) ||
0300 kvm_apic_pending_eoi(vcpu, e->fields.vector))
0301 __set_bit(e->fields.vector,
0302 ioapic_handled_vectors);
0303 }
0304 }
0305 spin_unlock(&ioapic->lock);
0306 }
0307
0308 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
0309 {
0310 if (!ioapic_in_kernel(kvm))
0311 return;
0312 kvm_make_scan_ioapic_request(kvm);
0313 }
0314
0315 static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
0316 {
0317 unsigned index;
0318 bool mask_before, mask_after;
0319 union kvm_ioapic_redirect_entry *e;
0320 int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
0321 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
0322
0323 switch (ioapic->ioregsel) {
0324 case IOAPIC_REG_VERSION:
0325
0326 break;
0327
0328 case IOAPIC_REG_APIC_ID:
0329 ioapic->id = (val >> 24) & 0xf;
0330 break;
0331
0332 case IOAPIC_REG_ARB_ID:
0333 break;
0334
0335 default:
0336 index = (ioapic->ioregsel - 0x10) >> 1;
0337
0338 if (index >= IOAPIC_NUM_PINS)
0339 return;
0340 index = array_index_nospec(index, IOAPIC_NUM_PINS);
0341 e = &ioapic->redirtbl[index];
0342 mask_before = e->fields.mask;
0343
0344 old_remote_irr = e->fields.remote_irr;
0345 old_delivery_status = e->fields.delivery_status;
0346 old_dest_id = e->fields.dest_id;
0347 old_dest_mode = e->fields.dest_mode;
0348 if (ioapic->ioregsel & 1) {
0349 e->bits &= 0xffffffff;
0350 e->bits |= (u64) val << 32;
0351 } else {
0352 e->bits &= ~0xffffffffULL;
0353 e->bits |= (u32) val;
0354 }
0355 e->fields.remote_irr = old_remote_irr;
0356 e->fields.delivery_status = old_delivery_status;
0357
0358
0359
0360
0361
0362
0363
0364 if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
0365 e->fields.remote_irr = 0;
0366
0367 mask_after = e->fields.mask;
0368 if (mask_before != mask_after)
0369 kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
0370 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
0371 && ioapic->irr & (1 << index))
0372 ioapic_service(ioapic, index, false);
0373 if (e->fields.delivery_mode == APIC_DM_FIXED) {
0374 struct kvm_lapic_irq irq;
0375
0376 irq.vector = e->fields.vector;
0377 irq.delivery_mode = e->fields.delivery_mode << 8;
0378 irq.dest_mode =
0379 kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
0380 irq.level = false;
0381 irq.trig_mode = e->fields.trig_mode;
0382 irq.shorthand = APIC_DEST_NOSHORT;
0383 irq.dest_id = e->fields.dest_id;
0384 irq.msi_redir_hint = false;
0385 bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
0386 kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
0387 vcpu_bitmap);
0388 if (old_dest_mode != e->fields.dest_mode ||
0389 old_dest_id != e->fields.dest_id) {
0390
0391
0392
0393
0394
0395 irq.dest_id = old_dest_id;
0396 irq.dest_mode =
0397 kvm_lapic_irq_dest_mode(
0398 !!e->fields.dest_mode);
0399 kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
0400 vcpu_bitmap);
0401 }
0402 kvm_make_scan_ioapic_request_mask(ioapic->kvm,
0403 vcpu_bitmap);
0404 } else {
0405 kvm_make_scan_ioapic_request(ioapic->kvm);
0406 }
0407 break;
0408 }
0409 }
0410
0411 static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
0412 {
0413 union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
0414 struct kvm_lapic_irq irqe;
0415 int ret;
0416
0417 if (entry->fields.mask ||
0418 (entry->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
0419 entry->fields.remote_irr))
0420 return -1;
0421
0422 irqe.dest_id = entry->fields.dest_id;
0423 irqe.vector = entry->fields.vector;
0424 irqe.dest_mode = kvm_lapic_irq_dest_mode(!!entry->fields.dest_mode);
0425 irqe.trig_mode = entry->fields.trig_mode;
0426 irqe.delivery_mode = entry->fields.delivery_mode << 8;
0427 irqe.level = 1;
0428 irqe.shorthand = APIC_DEST_NOSHORT;
0429 irqe.msi_redir_hint = false;
0430
0431 if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
0432 ioapic->irr_delivered |= 1 << irq;
0433
0434 if (irq == RTC_GSI && line_status) {
0435
0436
0437
0438
0439
0440
0441 BUG_ON(ioapic->rtc_status.pending_eoi != 0);
0442 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
0443 &ioapic->rtc_status.dest_map);
0444 ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
0445 } else
0446 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
0447
0448 if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG)
0449 entry->fields.remote_irr = 1;
0450
0451 return ret;
0452 }
0453
0454 int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
0455 int level, bool line_status)
0456 {
0457 int ret, irq_level;
0458
0459 BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
0460
0461 spin_lock(&ioapic->lock);
0462 irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
0463 irq_source_id, level);
0464 ret = ioapic_set_irq(ioapic, irq, irq_level, line_status);
0465
0466 spin_unlock(&ioapic->lock);
0467
0468 return ret;
0469 }
0470
0471 void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
0472 {
0473 int i;
0474
0475 spin_lock(&ioapic->lock);
0476 for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
0477 __clear_bit(irq_source_id, &ioapic->irq_states[i]);
0478 spin_unlock(&ioapic->lock);
0479 }
0480
0481 static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
0482 {
0483 int i;
0484 struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
0485 eoi_inject.work);
0486 spin_lock(&ioapic->lock);
0487 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
0488 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
0489
0490 if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
0491 continue;
0492
0493 if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
0494 ioapic_service(ioapic, i, false);
0495 }
0496 spin_unlock(&ioapic->lock);
0497 }
0498
0499 #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
0500 static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu,
0501 struct kvm_ioapic *ioapic,
0502 int trigger_mode,
0503 int pin)
0504 {
0505 struct kvm_lapic *apic = vcpu->arch.apic;
0506 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[pin];
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516 spin_unlock(&ioapic->lock);
0517 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, pin);
0518 spin_lock(&ioapic->lock);
0519
0520 if (trigger_mode != IOAPIC_LEVEL_TRIG ||
0521 kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
0522 return;
0523
0524 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
0525 ent->fields.remote_irr = 0;
0526 if (!ent->fields.mask && (ioapic->irr & (1 << pin))) {
0527 ++ioapic->irq_eoi[pin];
0528 if (ioapic->irq_eoi[pin] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
0529
0530
0531
0532
0533
0534
0535
0536
0537 schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
0538 ioapic->irq_eoi[pin] = 0;
0539 trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
0540 } else {
0541 ioapic_service(ioapic, pin, false);
0542 }
0543 } else {
0544 ioapic->irq_eoi[pin] = 0;
0545 }
0546 }
0547
0548 void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
0549 {
0550 int i;
0551 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
0552
0553 spin_lock(&ioapic->lock);
0554 rtc_irq_eoi(ioapic, vcpu, vector);
0555 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
0556 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
0557
0558 if (ent->fields.vector != vector)
0559 continue;
0560 kvm_ioapic_update_eoi_one(vcpu, ioapic, trigger_mode, i);
0561 }
0562 spin_unlock(&ioapic->lock);
0563 }
0564
0565 static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
0566 {
0567 return container_of(dev, struct kvm_ioapic, dev);
0568 }
0569
0570 static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
0571 {
0572 return ((addr >= ioapic->base_address &&
0573 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
0574 }
0575
0576 static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
0577 gpa_t addr, int len, void *val)
0578 {
0579 struct kvm_ioapic *ioapic = to_ioapic(this);
0580 u32 result;
0581 if (!ioapic_in_range(ioapic, addr))
0582 return -EOPNOTSUPP;
0583
0584 ASSERT(!(addr & 0xf));
0585
0586 addr &= 0xff;
0587 spin_lock(&ioapic->lock);
0588 switch (addr) {
0589 case IOAPIC_REG_SELECT:
0590 result = ioapic->ioregsel;
0591 break;
0592
0593 case IOAPIC_REG_WINDOW:
0594 result = ioapic_read_indirect(ioapic);
0595 break;
0596
0597 default:
0598 result = 0;
0599 break;
0600 }
0601 spin_unlock(&ioapic->lock);
0602
0603 switch (len) {
0604 case 8:
0605 *(u64 *) val = result;
0606 break;
0607 case 1:
0608 case 2:
0609 case 4:
0610 memcpy(val, (char *)&result, len);
0611 break;
0612 default:
0613 printk(KERN_WARNING "ioapic: wrong length %d\n", len);
0614 }
0615 return 0;
0616 }
0617
0618 static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
0619 gpa_t addr, int len, const void *val)
0620 {
0621 struct kvm_ioapic *ioapic = to_ioapic(this);
0622 u32 data;
0623 if (!ioapic_in_range(ioapic, addr))
0624 return -EOPNOTSUPP;
0625
0626 ASSERT(!(addr & 0xf));
0627
0628 switch (len) {
0629 case 8:
0630 case 4:
0631 data = *(u32 *) val;
0632 break;
0633 case 2:
0634 data = *(u16 *) val;
0635 break;
0636 case 1:
0637 data = *(u8 *) val;
0638 break;
0639 default:
0640 printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
0641 return 0;
0642 }
0643
0644 addr &= 0xff;
0645 spin_lock(&ioapic->lock);
0646 switch (addr) {
0647 case IOAPIC_REG_SELECT:
0648 ioapic->ioregsel = data & 0xFF;
0649 break;
0650
0651 case IOAPIC_REG_WINDOW:
0652 ioapic_write_indirect(ioapic, data);
0653 break;
0654
0655 default:
0656 break;
0657 }
0658 spin_unlock(&ioapic->lock);
0659 return 0;
0660 }
0661
0662 static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
0663 {
0664 int i;
0665
0666 cancel_delayed_work_sync(&ioapic->eoi_inject);
0667 for (i = 0; i < IOAPIC_NUM_PINS; i++)
0668 ioapic->redirtbl[i].fields.mask = 1;
0669 ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
0670 ioapic->ioregsel = 0;
0671 ioapic->irr = 0;
0672 ioapic->irr_delivered = 0;
0673 ioapic->id = 0;
0674 memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
0675 rtc_irq_eoi_tracking_reset(ioapic);
0676 }
0677
0678 static const struct kvm_io_device_ops ioapic_mmio_ops = {
0679 .read = ioapic_mmio_read,
0680 .write = ioapic_mmio_write,
0681 };
0682
0683 int kvm_ioapic_init(struct kvm *kvm)
0684 {
0685 struct kvm_ioapic *ioapic;
0686 int ret;
0687
0688 ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL_ACCOUNT);
0689 if (!ioapic)
0690 return -ENOMEM;
0691 spin_lock_init(&ioapic->lock);
0692 INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
0693 kvm->arch.vioapic = ioapic;
0694 kvm_ioapic_reset(ioapic);
0695 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
0696 ioapic->kvm = kvm;
0697 mutex_lock(&kvm->slots_lock);
0698 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
0699 IOAPIC_MEM_LENGTH, &ioapic->dev);
0700 mutex_unlock(&kvm->slots_lock);
0701 if (ret < 0) {
0702 kvm->arch.vioapic = NULL;
0703 kfree(ioapic);
0704 }
0705
0706 return ret;
0707 }
0708
0709 void kvm_ioapic_destroy(struct kvm *kvm)
0710 {
0711 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
0712
0713 if (!ioapic)
0714 return;
0715
0716 cancel_delayed_work_sync(&ioapic->eoi_inject);
0717 mutex_lock(&kvm->slots_lock);
0718 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
0719 mutex_unlock(&kvm->slots_lock);
0720 kvm->arch.vioapic = NULL;
0721 kfree(ioapic);
0722 }
0723
0724 void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
0725 {
0726 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
0727
0728 spin_lock(&ioapic->lock);
0729 memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
0730 state->irr &= ~ioapic->irr_delivered;
0731 spin_unlock(&ioapic->lock);
0732 }
0733
0734 void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
0735 {
0736 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
0737
0738 spin_lock(&ioapic->lock);
0739 memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
0740 ioapic->irr = 0;
0741 ioapic->irr_delivered = 0;
0742 kvm_make_scan_ioapic_request(kvm);
0743 kvm_ioapic_inject_all(ioapic, state->irr);
0744 spin_unlock(&ioapic->lock);
0745 }