0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #define pr_fmt(fmt) "pit: " fmt
0034
0035 #include <linux/kvm_host.h>
0036 #include <linux/slab.h>
0037
0038 #include "ioapic.h"
0039 #include "irq.h"
0040 #include "i8254.h"
0041 #include "x86.h"
0042
0043 #ifndef CONFIG_X86_64
0044 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
0045 #else
0046 #define mod_64(x, y) ((x) % (y))
0047 #endif
0048
0049 #define RW_STATE_LSB 1
0050 #define RW_STATE_MSB 2
0051 #define RW_STATE_WORD0 3
0052 #define RW_STATE_WORD1 4
0053
0054 static void pit_set_gate(struct kvm_pit *pit, int channel, u32 val)
0055 {
0056 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
0057
0058 switch (c->mode) {
0059 default:
0060 case 0:
0061 case 4:
0062
0063 break;
0064 case 1:
0065 case 2:
0066 case 3:
0067 case 5:
0068
0069 if (c->gate < val)
0070 c->count_load_time = ktime_get();
0071 break;
0072 }
0073
0074 c->gate = val;
0075 }
0076
0077 static int pit_get_gate(struct kvm_pit *pit, int channel)
0078 {
0079 return pit->pit_state.channels[channel].gate;
0080 }
0081
0082 static s64 __kpit_elapsed(struct kvm_pit *pit)
0083 {
0084 s64 elapsed;
0085 ktime_t remaining;
0086 struct kvm_kpit_state *ps = &pit->pit_state;
0087
0088 if (!ps->period)
0089 return 0;
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100 remaining = hrtimer_get_remaining(&ps->timer);
0101 elapsed = ps->period - ktime_to_ns(remaining);
0102
0103 return elapsed;
0104 }
0105
0106 static s64 kpit_elapsed(struct kvm_pit *pit, struct kvm_kpit_channel_state *c,
0107 int channel)
0108 {
0109 if (channel == 0)
0110 return __kpit_elapsed(pit);
0111
0112 return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
0113 }
0114
0115 static int pit_get_count(struct kvm_pit *pit, int channel)
0116 {
0117 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
0118 s64 d, t;
0119 int counter;
0120
0121 t = kpit_elapsed(pit, c, channel);
0122 d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC);
0123
0124 switch (c->mode) {
0125 case 0:
0126 case 1:
0127 case 4:
0128 case 5:
0129 counter = (c->count - d) & 0xffff;
0130 break;
0131 case 3:
0132
0133 counter = c->count - (mod_64((2 * d), c->count));
0134 break;
0135 default:
0136 counter = c->count - mod_64(d, c->count);
0137 break;
0138 }
0139 return counter;
0140 }
0141
0142 static int pit_get_out(struct kvm_pit *pit, int channel)
0143 {
0144 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
0145 s64 d, t;
0146 int out;
0147
0148 t = kpit_elapsed(pit, c, channel);
0149 d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC);
0150
0151 switch (c->mode) {
0152 default:
0153 case 0:
0154 out = (d >= c->count);
0155 break;
0156 case 1:
0157 out = (d < c->count);
0158 break;
0159 case 2:
0160 out = ((mod_64(d, c->count) == 0) && (d != 0));
0161 break;
0162 case 3:
0163 out = (mod_64(d, c->count) < ((c->count + 1) >> 1));
0164 break;
0165 case 4:
0166 case 5:
0167 out = (d == c->count);
0168 break;
0169 }
0170
0171 return out;
0172 }
0173
0174 static void pit_latch_count(struct kvm_pit *pit, int channel)
0175 {
0176 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
0177
0178 if (!c->count_latched) {
0179 c->latched_count = pit_get_count(pit, channel);
0180 c->count_latched = c->rw_mode;
0181 }
0182 }
0183
0184 static void pit_latch_status(struct kvm_pit *pit, int channel)
0185 {
0186 struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
0187
0188 if (!c->status_latched) {
0189
0190 c->status = ((pit_get_out(pit, channel) << 7) |
0191 (c->rw_mode << 4) |
0192 (c->mode << 1) |
0193 c->bcd);
0194 c->status_latched = 1;
0195 }
0196 }
0197
0198 static inline struct kvm_pit *pit_state_to_pit(struct kvm_kpit_state *ps)
0199 {
0200 return container_of(ps, struct kvm_pit, pit_state);
0201 }
0202
0203 static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
0204 {
0205 struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
0206 irq_ack_notifier);
0207 struct kvm_pit *pit = pit_state_to_pit(ps);
0208
0209 atomic_set(&ps->irq_ack, 1);
0210
0211
0212
0213 smp_mb();
0214 if (atomic_dec_if_positive(&ps->pending) > 0)
0215 kthread_queue_work(pit->worker, &pit->expired);
0216 }
0217
0218 void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
0219 {
0220 struct kvm_pit *pit = vcpu->kvm->arch.vpit;
0221 struct hrtimer *timer;
0222
0223
0224 if (vcpu->vcpu_id || !pit)
0225 return;
0226
0227 timer = &pit->pit_state.timer;
0228 mutex_lock(&pit->pit_state.lock);
0229 if (hrtimer_cancel(timer))
0230 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
0231 mutex_unlock(&pit->pit_state.lock);
0232 }
0233
0234 static void destroy_pit_timer(struct kvm_pit *pit)
0235 {
0236 hrtimer_cancel(&pit->pit_state.timer);
0237 kthread_flush_work(&pit->expired);
0238 }
0239
0240 static void pit_do_work(struct kthread_work *work)
0241 {
0242 struct kvm_pit *pit = container_of(work, struct kvm_pit, expired);
0243 struct kvm *kvm = pit->kvm;
0244 struct kvm_vcpu *vcpu;
0245 unsigned long i;
0246 struct kvm_kpit_state *ps = &pit->pit_state;
0247
0248 if (atomic_read(&ps->reinject) && !atomic_xchg(&ps->irq_ack, 0))
0249 return;
0250
0251 kvm_set_irq(kvm, pit->irq_source_id, 0, 1, false);
0252 kvm_set_irq(kvm, pit->irq_source_id, 0, 0, false);
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263 if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
0264 kvm_for_each_vcpu(i, vcpu, kvm)
0265 kvm_apic_nmi_wd_deliver(vcpu);
0266 }
0267
0268 static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
0269 {
0270 struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
0271 struct kvm_pit *pt = pit_state_to_pit(ps);
0272
0273 if (atomic_read(&ps->reinject))
0274 atomic_inc(&ps->pending);
0275
0276 kthread_queue_work(pt->worker, &pt->expired);
0277
0278 if (ps->is_periodic) {
0279 hrtimer_add_expires_ns(&ps->timer, ps->period);
0280 return HRTIMER_RESTART;
0281 } else
0282 return HRTIMER_NORESTART;
0283 }
0284
0285 static inline void kvm_pit_reset_reinject(struct kvm_pit *pit)
0286 {
0287 atomic_set(&pit->pit_state.pending, 0);
0288 atomic_set(&pit->pit_state.irq_ack, 1);
0289 }
0290
0291 void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject)
0292 {
0293 struct kvm_kpit_state *ps = &pit->pit_state;
0294 struct kvm *kvm = pit->kvm;
0295
0296 if (atomic_read(&ps->reinject) == reinject)
0297 return;
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307 if (reinject) {
0308 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PIT_REINJ);
0309
0310 kvm_pit_reset_reinject(pit);
0311 kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
0312 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
0313 } else {
0314 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PIT_REINJ);
0315 kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
0316 kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
0317 }
0318
0319 atomic_set(&ps->reinject, reinject);
0320 }
0321
0322 static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period)
0323 {
0324 struct kvm_kpit_state *ps = &pit->pit_state;
0325 struct kvm *kvm = pit->kvm;
0326 s64 interval;
0327
0328 if (!ioapic_in_kernel(kvm) ||
0329 ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
0330 return;
0331
0332 interval = mul_u64_u32_div(val, NSEC_PER_SEC, KVM_PIT_FREQ);
0333
0334 pr_debug("create pit timer, interval is %llu nsec\n", interval);
0335
0336
0337 hrtimer_cancel(&ps->timer);
0338 kthread_flush_work(&pit->expired);
0339 ps->period = interval;
0340 ps->is_periodic = is_period;
0341
0342 kvm_pit_reset_reinject(pit);
0343
0344
0345
0346
0347
0348
0349 if (ps->is_periodic) {
0350 s64 min_period = min_timer_period_us * 1000LL;
0351
0352 if (ps->period < min_period) {
0353 pr_info_ratelimited(
0354 "kvm: requested %lld ns "
0355 "i8254 timer period limited to %lld ns\n",
0356 ps->period, min_period);
0357 ps->period = min_period;
0358 }
0359 }
0360
0361 hrtimer_start(&ps->timer, ktime_add_ns(ktime_get(), interval),
0362 HRTIMER_MODE_ABS);
0363 }
0364
0365 static void pit_load_count(struct kvm_pit *pit, int channel, u32 val)
0366 {
0367 struct kvm_kpit_state *ps = &pit->pit_state;
0368
0369 pr_debug("load_count val is %u, channel is %d\n", val, channel);
0370
0371
0372
0373
0374
0375 if (val == 0)
0376 val = 0x10000;
0377
0378 ps->channels[channel].count = val;
0379
0380 if (channel != 0) {
0381 ps->channels[channel].count_load_time = ktime_get();
0382 return;
0383 }
0384
0385
0386
0387 switch (ps->channels[0].mode) {
0388 case 0:
0389 case 1:
0390
0391 case 4:
0392 create_pit_timer(pit, val, 0);
0393 break;
0394 case 2:
0395 case 3:
0396 create_pit_timer(pit, val, 1);
0397 break;
0398 default:
0399 destroy_pit_timer(pit);
0400 }
0401 }
0402
0403 void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val,
0404 int hpet_legacy_start)
0405 {
0406 u8 saved_mode;
0407
0408 WARN_ON_ONCE(!mutex_is_locked(&pit->pit_state.lock));
0409
0410 if (hpet_legacy_start) {
0411
0412 WARN_ON(channel != 0);
0413 saved_mode = pit->pit_state.channels[0].mode;
0414 pit->pit_state.channels[0].mode = 0xff;
0415 pit_load_count(pit, channel, val);
0416 pit->pit_state.channels[0].mode = saved_mode;
0417 } else {
0418 pit_load_count(pit, channel, val);
0419 }
0420 }
0421
0422 static inline struct kvm_pit *dev_to_pit(struct kvm_io_device *dev)
0423 {
0424 return container_of(dev, struct kvm_pit, dev);
0425 }
0426
0427 static inline struct kvm_pit *speaker_to_pit(struct kvm_io_device *dev)
0428 {
0429 return container_of(dev, struct kvm_pit, speaker_dev);
0430 }
0431
0432 static inline int pit_in_range(gpa_t addr)
0433 {
0434 return ((addr >= KVM_PIT_BASE_ADDRESS) &&
0435 (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
0436 }
0437
0438 static int pit_ioport_write(struct kvm_vcpu *vcpu,
0439 struct kvm_io_device *this,
0440 gpa_t addr, int len, const void *data)
0441 {
0442 struct kvm_pit *pit = dev_to_pit(this);
0443 struct kvm_kpit_state *pit_state = &pit->pit_state;
0444 int channel, access;
0445 struct kvm_kpit_channel_state *s;
0446 u32 val = *(u32 *) data;
0447 if (!pit_in_range(addr))
0448 return -EOPNOTSUPP;
0449
0450 val &= 0xff;
0451 addr &= KVM_PIT_CHANNEL_MASK;
0452
0453 mutex_lock(&pit_state->lock);
0454
0455 if (val != 0)
0456 pr_debug("write addr is 0x%x, len is %d, val is 0x%x\n",
0457 (unsigned int)addr, len, val);
0458
0459 if (addr == 3) {
0460 channel = val >> 6;
0461 if (channel == 3) {
0462
0463 for (channel = 0; channel < 3; channel++) {
0464 if (val & (2 << channel)) {
0465 if (!(val & 0x20))
0466 pit_latch_count(pit, channel);
0467 if (!(val & 0x10))
0468 pit_latch_status(pit, channel);
0469 }
0470 }
0471 } else {
0472
0473 s = &pit_state->channels[channel];
0474 access = (val >> 4) & KVM_PIT_CHANNEL_MASK;
0475 if (access == 0) {
0476 pit_latch_count(pit, channel);
0477 } else {
0478 s->rw_mode = access;
0479 s->read_state = access;
0480 s->write_state = access;
0481 s->mode = (val >> 1) & 7;
0482 if (s->mode > 5)
0483 s->mode -= 4;
0484 s->bcd = val & 1;
0485 }
0486 }
0487 } else {
0488
0489 s = &pit_state->channels[addr];
0490 switch (s->write_state) {
0491 default:
0492 case RW_STATE_LSB:
0493 pit_load_count(pit, addr, val);
0494 break;
0495 case RW_STATE_MSB:
0496 pit_load_count(pit, addr, val << 8);
0497 break;
0498 case RW_STATE_WORD0:
0499 s->write_latch = val;
0500 s->write_state = RW_STATE_WORD1;
0501 break;
0502 case RW_STATE_WORD1:
0503 pit_load_count(pit, addr, s->write_latch | (val << 8));
0504 s->write_state = RW_STATE_WORD0;
0505 break;
0506 }
0507 }
0508
0509 mutex_unlock(&pit_state->lock);
0510 return 0;
0511 }
0512
0513 static int pit_ioport_read(struct kvm_vcpu *vcpu,
0514 struct kvm_io_device *this,
0515 gpa_t addr, int len, void *data)
0516 {
0517 struct kvm_pit *pit = dev_to_pit(this);
0518 struct kvm_kpit_state *pit_state = &pit->pit_state;
0519 int ret, count;
0520 struct kvm_kpit_channel_state *s;
0521 if (!pit_in_range(addr))
0522 return -EOPNOTSUPP;
0523
0524 addr &= KVM_PIT_CHANNEL_MASK;
0525 if (addr == 3)
0526 return 0;
0527
0528 s = &pit_state->channels[addr];
0529
0530 mutex_lock(&pit_state->lock);
0531
0532 if (s->status_latched) {
0533 s->status_latched = 0;
0534 ret = s->status;
0535 } else if (s->count_latched) {
0536 switch (s->count_latched) {
0537 default:
0538 case RW_STATE_LSB:
0539 ret = s->latched_count & 0xff;
0540 s->count_latched = 0;
0541 break;
0542 case RW_STATE_MSB:
0543 ret = s->latched_count >> 8;
0544 s->count_latched = 0;
0545 break;
0546 case RW_STATE_WORD0:
0547 ret = s->latched_count & 0xff;
0548 s->count_latched = RW_STATE_MSB;
0549 break;
0550 }
0551 } else {
0552 switch (s->read_state) {
0553 default:
0554 case RW_STATE_LSB:
0555 count = pit_get_count(pit, addr);
0556 ret = count & 0xff;
0557 break;
0558 case RW_STATE_MSB:
0559 count = pit_get_count(pit, addr);
0560 ret = (count >> 8) & 0xff;
0561 break;
0562 case RW_STATE_WORD0:
0563 count = pit_get_count(pit, addr);
0564 ret = count & 0xff;
0565 s->read_state = RW_STATE_WORD1;
0566 break;
0567 case RW_STATE_WORD1:
0568 count = pit_get_count(pit, addr);
0569 ret = (count >> 8) & 0xff;
0570 s->read_state = RW_STATE_WORD0;
0571 break;
0572 }
0573 }
0574
0575 if (len > sizeof(ret))
0576 len = sizeof(ret);
0577 memcpy(data, (char *)&ret, len);
0578
0579 mutex_unlock(&pit_state->lock);
0580 return 0;
0581 }
0582
0583 static int speaker_ioport_write(struct kvm_vcpu *vcpu,
0584 struct kvm_io_device *this,
0585 gpa_t addr, int len, const void *data)
0586 {
0587 struct kvm_pit *pit = speaker_to_pit(this);
0588 struct kvm_kpit_state *pit_state = &pit->pit_state;
0589 u32 val = *(u32 *) data;
0590 if (addr != KVM_SPEAKER_BASE_ADDRESS)
0591 return -EOPNOTSUPP;
0592
0593 mutex_lock(&pit_state->lock);
0594 if (val & (1 << 1))
0595 pit_state->flags |= KVM_PIT_FLAGS_SPEAKER_DATA_ON;
0596 else
0597 pit_state->flags &= ~KVM_PIT_FLAGS_SPEAKER_DATA_ON;
0598 pit_set_gate(pit, 2, val & 1);
0599 mutex_unlock(&pit_state->lock);
0600 return 0;
0601 }
0602
0603 static int speaker_ioport_read(struct kvm_vcpu *vcpu,
0604 struct kvm_io_device *this,
0605 gpa_t addr, int len, void *data)
0606 {
0607 struct kvm_pit *pit = speaker_to_pit(this);
0608 struct kvm_kpit_state *pit_state = &pit->pit_state;
0609 unsigned int refresh_clock;
0610 int ret;
0611 if (addr != KVM_SPEAKER_BASE_ADDRESS)
0612 return -EOPNOTSUPP;
0613
0614
0615 refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
0616
0617 mutex_lock(&pit_state->lock);
0618 ret = (!!(pit_state->flags & KVM_PIT_FLAGS_SPEAKER_DATA_ON) << 1) |
0619 pit_get_gate(pit, 2) | (pit_get_out(pit, 2) << 5) |
0620 (refresh_clock << 4);
0621 if (len > sizeof(ret))
0622 len = sizeof(ret);
0623 memcpy(data, (char *)&ret, len);
0624 mutex_unlock(&pit_state->lock);
0625 return 0;
0626 }
0627
0628 static void kvm_pit_reset(struct kvm_pit *pit)
0629 {
0630 int i;
0631 struct kvm_kpit_channel_state *c;
0632
0633 pit->pit_state.flags = 0;
0634 for (i = 0; i < 3; i++) {
0635 c = &pit->pit_state.channels[i];
0636 c->mode = 0xff;
0637 c->gate = (i != 2);
0638 pit_load_count(pit, i, 0);
0639 }
0640
0641 kvm_pit_reset_reinject(pit);
0642 }
0643
0644 static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
0645 {
0646 struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
0647
0648 if (!mask)
0649 kvm_pit_reset_reinject(pit);
0650 }
0651
0652 static const struct kvm_io_device_ops pit_dev_ops = {
0653 .read = pit_ioport_read,
0654 .write = pit_ioport_write,
0655 };
0656
0657 static const struct kvm_io_device_ops speaker_dev_ops = {
0658 .read = speaker_ioport_read,
0659 .write = speaker_ioport_write,
0660 };
0661
0662 struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
0663 {
0664 struct kvm_pit *pit;
0665 struct kvm_kpit_state *pit_state;
0666 struct pid *pid;
0667 pid_t pid_nr;
0668 int ret;
0669
0670 pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL_ACCOUNT);
0671 if (!pit)
0672 return NULL;
0673
0674 pit->irq_source_id = kvm_request_irq_source_id(kvm);
0675 if (pit->irq_source_id < 0)
0676 goto fail_request;
0677
0678 mutex_init(&pit->pit_state.lock);
0679
0680 pid = get_pid(task_tgid(current));
0681 pid_nr = pid_vnr(pid);
0682 put_pid(pid);
0683
0684 pit->worker = kthread_create_worker(0, "kvm-pit/%d", pid_nr);
0685 if (IS_ERR(pit->worker))
0686 goto fail_kthread;
0687
0688 kthread_init_work(&pit->expired, pit_do_work);
0689
0690 pit->kvm = kvm;
0691
0692 pit_state = &pit->pit_state;
0693 hrtimer_init(&pit_state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
0694 pit_state->timer.function = pit_timer_fn;
0695
0696 pit_state->irq_ack_notifier.gsi = 0;
0697 pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq;
0698 pit->mask_notifier.func = pit_mask_notifer;
0699
0700 kvm_pit_reset(pit);
0701
0702 kvm_pit_set_reinject(pit, true);
0703
0704 mutex_lock(&kvm->slots_lock);
0705 kvm_iodevice_init(&pit->dev, &pit_dev_ops);
0706 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, KVM_PIT_BASE_ADDRESS,
0707 KVM_PIT_MEM_LENGTH, &pit->dev);
0708 if (ret < 0)
0709 goto fail_register_pit;
0710
0711 if (flags & KVM_PIT_SPEAKER_DUMMY) {
0712 kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops);
0713 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS,
0714 KVM_SPEAKER_BASE_ADDRESS, 4,
0715 &pit->speaker_dev);
0716 if (ret < 0)
0717 goto fail_register_speaker;
0718 }
0719 mutex_unlock(&kvm->slots_lock);
0720
0721 return pit;
0722
0723 fail_register_speaker:
0724 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
0725 fail_register_pit:
0726 mutex_unlock(&kvm->slots_lock);
0727 kvm_pit_set_reinject(pit, false);
0728 kthread_destroy_worker(pit->worker);
0729 fail_kthread:
0730 kvm_free_irq_source_id(kvm, pit->irq_source_id);
0731 fail_request:
0732 kfree(pit);
0733 return NULL;
0734 }
0735
0736 void kvm_free_pit(struct kvm *kvm)
0737 {
0738 struct kvm_pit *pit = kvm->arch.vpit;
0739
0740 if (pit) {
0741 mutex_lock(&kvm->slots_lock);
0742 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
0743 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
0744 mutex_unlock(&kvm->slots_lock);
0745 kvm_pit_set_reinject(pit, false);
0746 hrtimer_cancel(&pit->pit_state.timer);
0747 kthread_destroy_worker(pit->worker);
0748 kvm_free_irq_source_id(kvm, pit->irq_source_id);
0749 kfree(pit);
0750 }
0751 }