Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2012 ARM Ltd.
0004  * Author: Marc Zyngier <marc.zyngier@arm.com>
0005  */
0006 
0007 #include <linux/cpu.h>
0008 #include <linux/kvm.h>
0009 #include <linux/kvm_host.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/irq.h>
0012 #include <linux/irqdomain.h>
0013 #include <linux/uaccess.h>
0014 
0015 #include <clocksource/arm_arch_timer.h>
0016 #include <asm/arch_timer.h>
0017 #include <asm/kvm_emulate.h>
0018 #include <asm/kvm_hyp.h>
0019 
0020 #include <kvm/arm_vgic.h>
0021 #include <kvm/arm_arch_timer.h>
0022 
0023 #include "trace.h"
0024 
0025 static struct timecounter *timecounter;
0026 static unsigned int host_vtimer_irq;
0027 static unsigned int host_ptimer_irq;
0028 static u32 host_vtimer_irq_flags;
0029 static u32 host_ptimer_irq_flags;
0030 
0031 static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
0032 
0033 static const struct kvm_irq_level default_ptimer_irq = {
0034     .irq    = 30,
0035     .level  = 1,
0036 };
0037 
0038 static const struct kvm_irq_level default_vtimer_irq = {
0039     .irq    = 27,
0040     .level  = 1,
0041 };
0042 
0043 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
0044 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
0045                  struct arch_timer_context *timer_ctx);
0046 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
0047 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
0048                 struct arch_timer_context *timer,
0049                 enum kvm_arch_timer_regs treg,
0050                 u64 val);
0051 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
0052                   struct arch_timer_context *timer,
0053                   enum kvm_arch_timer_regs treg);
0054 
0055 u32 timer_get_ctl(struct arch_timer_context *ctxt)
0056 {
0057     struct kvm_vcpu *vcpu = ctxt->vcpu;
0058 
0059     switch(arch_timer_ctx_index(ctxt)) {
0060     case TIMER_VTIMER:
0061         return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
0062     case TIMER_PTIMER:
0063         return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
0064     default:
0065         WARN_ON(1);
0066         return 0;
0067     }
0068 }
0069 
0070 u64 timer_get_cval(struct arch_timer_context *ctxt)
0071 {
0072     struct kvm_vcpu *vcpu = ctxt->vcpu;
0073 
0074     switch(arch_timer_ctx_index(ctxt)) {
0075     case TIMER_VTIMER:
0076         return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
0077     case TIMER_PTIMER:
0078         return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
0079     default:
0080         WARN_ON(1);
0081         return 0;
0082     }
0083 }
0084 
0085 static u64 timer_get_offset(struct arch_timer_context *ctxt)
0086 {
0087     struct kvm_vcpu *vcpu = ctxt->vcpu;
0088 
0089     switch(arch_timer_ctx_index(ctxt)) {
0090     case TIMER_VTIMER:
0091         return __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
0092     default:
0093         return 0;
0094     }
0095 }
0096 
0097 static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
0098 {
0099     struct kvm_vcpu *vcpu = ctxt->vcpu;
0100 
0101     switch(arch_timer_ctx_index(ctxt)) {
0102     case TIMER_VTIMER:
0103         __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
0104         break;
0105     case TIMER_PTIMER:
0106         __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
0107         break;
0108     default:
0109         WARN_ON(1);
0110     }
0111 }
0112 
0113 static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
0114 {
0115     struct kvm_vcpu *vcpu = ctxt->vcpu;
0116 
0117     switch(arch_timer_ctx_index(ctxt)) {
0118     case TIMER_VTIMER:
0119         __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
0120         break;
0121     case TIMER_PTIMER:
0122         __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
0123         break;
0124     default:
0125         WARN_ON(1);
0126     }
0127 }
0128 
0129 static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
0130 {
0131     struct kvm_vcpu *vcpu = ctxt->vcpu;
0132 
0133     switch(arch_timer_ctx_index(ctxt)) {
0134     case TIMER_VTIMER:
0135         __vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
0136         break;
0137     default:
0138         WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
0139     }
0140 }
0141 
0142 u64 kvm_phys_timer_read(void)
0143 {
0144     return timecounter->cc->read(timecounter->cc);
0145 }
0146 
0147 static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
0148 {
0149     if (has_vhe()) {
0150         map->direct_vtimer = vcpu_vtimer(vcpu);
0151         map->direct_ptimer = vcpu_ptimer(vcpu);
0152         map->emul_ptimer = NULL;
0153     } else {
0154         map->direct_vtimer = vcpu_vtimer(vcpu);
0155         map->direct_ptimer = NULL;
0156         map->emul_ptimer = vcpu_ptimer(vcpu);
0157     }
0158 
0159     trace_kvm_get_timer_map(vcpu->vcpu_id, map);
0160 }
0161 
0162 static inline bool userspace_irqchip(struct kvm *kvm)
0163 {
0164     return static_branch_unlikely(&userspace_irqchip_in_use) &&
0165         unlikely(!irqchip_in_kernel(kvm));
0166 }
0167 
0168 static void soft_timer_start(struct hrtimer *hrt, u64 ns)
0169 {
0170     hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
0171               HRTIMER_MODE_ABS_HARD);
0172 }
0173 
0174 static void soft_timer_cancel(struct hrtimer *hrt)
0175 {
0176     hrtimer_cancel(hrt);
0177 }
0178 
0179 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
0180 {
0181     struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
0182     struct arch_timer_context *ctx;
0183     struct timer_map map;
0184 
0185     /*
0186      * We may see a timer interrupt after vcpu_put() has been called which
0187      * sets the CPU's vcpu pointer to NULL, because even though the timer
0188      * has been disabled in timer_save_state(), the hardware interrupt
0189      * signal may not have been retired from the interrupt controller yet.
0190      */
0191     if (!vcpu)
0192         return IRQ_HANDLED;
0193 
0194     get_timer_map(vcpu, &map);
0195 
0196     if (irq == host_vtimer_irq)
0197         ctx = map.direct_vtimer;
0198     else
0199         ctx = map.direct_ptimer;
0200 
0201     if (kvm_timer_should_fire(ctx))
0202         kvm_timer_update_irq(vcpu, true, ctx);
0203 
0204     if (userspace_irqchip(vcpu->kvm) &&
0205         !static_branch_unlikely(&has_gic_active_state))
0206         disable_percpu_irq(host_vtimer_irq);
0207 
0208     return IRQ_HANDLED;
0209 }
0210 
0211 static u64 kvm_counter_compute_delta(struct arch_timer_context *timer_ctx,
0212                      u64 val)
0213 {
0214     u64 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
0215 
0216     if (now < val) {
0217         u64 ns;
0218 
0219         ns = cyclecounter_cyc2ns(timecounter->cc,
0220                      val - now,
0221                      timecounter->mask,
0222                      &timecounter->frac);
0223         return ns;
0224     }
0225 
0226     return 0;
0227 }
0228 
0229 static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
0230 {
0231     return kvm_counter_compute_delta(timer_ctx, timer_get_cval(timer_ctx));
0232 }
0233 
0234 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
0235 {
0236     WARN_ON(timer_ctx && timer_ctx->loaded);
0237     return timer_ctx &&
0238         ((timer_get_ctl(timer_ctx) &
0239           (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
0240 }
0241 
0242 static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu)
0243 {
0244     return (cpus_have_final_cap(ARM64_HAS_WFXT) &&
0245         vcpu_get_flag(vcpu, IN_WFIT));
0246 }
0247 
0248 static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
0249 {
0250     struct arch_timer_context *ctx = vcpu_vtimer(vcpu);
0251     u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
0252 
0253     return kvm_counter_compute_delta(ctx, val);
0254 }
0255 
0256 /*
0257  * Returns the earliest expiration time in ns among guest timers.
0258  * Note that it will return 0 if none of timers can fire.
0259  */
0260 static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
0261 {
0262     u64 min_delta = ULLONG_MAX;
0263     int i;
0264 
0265     for (i = 0; i < NR_KVM_TIMERS; i++) {
0266         struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
0267 
0268         WARN(ctx->loaded, "timer %d loaded\n", i);
0269         if (kvm_timer_irq_can_fire(ctx))
0270             min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
0271     }
0272 
0273     if (vcpu_has_wfit_active(vcpu))
0274         min_delta = min(min_delta, wfit_delay_ns(vcpu));
0275 
0276     /* If none of timers can fire, then return 0 */
0277     if (min_delta == ULLONG_MAX)
0278         return 0;
0279 
0280     return min_delta;
0281 }
0282 
0283 static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
0284 {
0285     struct arch_timer_cpu *timer;
0286     struct kvm_vcpu *vcpu;
0287     u64 ns;
0288 
0289     timer = container_of(hrt, struct arch_timer_cpu, bg_timer);
0290     vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
0291 
0292     /*
0293      * Check that the timer has really expired from the guest's
0294      * PoV (NTP on the host may have forced it to expire
0295      * early). If we should have slept longer, restart it.
0296      */
0297     ns = kvm_timer_earliest_exp(vcpu);
0298     if (unlikely(ns)) {
0299         hrtimer_forward_now(hrt, ns_to_ktime(ns));
0300         return HRTIMER_RESTART;
0301     }
0302 
0303     kvm_vcpu_wake_up(vcpu);
0304     return HRTIMER_NORESTART;
0305 }
0306 
0307 static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
0308 {
0309     struct arch_timer_context *ctx;
0310     struct kvm_vcpu *vcpu;
0311     u64 ns;
0312 
0313     ctx = container_of(hrt, struct arch_timer_context, hrtimer);
0314     vcpu = ctx->vcpu;
0315 
0316     trace_kvm_timer_hrtimer_expire(ctx);
0317 
0318     /*
0319      * Check that the timer has really expired from the guest's
0320      * PoV (NTP on the host may have forced it to expire
0321      * early). If not ready, schedule for a later time.
0322      */
0323     ns = kvm_timer_compute_delta(ctx);
0324     if (unlikely(ns)) {
0325         hrtimer_forward_now(hrt, ns_to_ktime(ns));
0326         return HRTIMER_RESTART;
0327     }
0328 
0329     kvm_timer_update_irq(vcpu, true, ctx);
0330     return HRTIMER_NORESTART;
0331 }
0332 
0333 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
0334 {
0335     enum kvm_arch_timers index;
0336     u64 cval, now;
0337 
0338     if (!timer_ctx)
0339         return false;
0340 
0341     index = arch_timer_ctx_index(timer_ctx);
0342 
0343     if (timer_ctx->loaded) {
0344         u32 cnt_ctl = 0;
0345 
0346         switch (index) {
0347         case TIMER_VTIMER:
0348             cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
0349             break;
0350         case TIMER_PTIMER:
0351             cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
0352             break;
0353         case NR_KVM_TIMERS:
0354             /* GCC is braindead */
0355             cnt_ctl = 0;
0356             break;
0357         }
0358 
0359         return  (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
0360                 (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
0361                !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
0362     }
0363 
0364     if (!kvm_timer_irq_can_fire(timer_ctx))
0365         return false;
0366 
0367     cval = timer_get_cval(timer_ctx);
0368     now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
0369 
0370     return cval <= now;
0371 }
0372 
0373 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
0374 {
0375     return vcpu_has_wfit_active(vcpu) && wfit_delay_ns(vcpu) == 0;
0376 }
0377 
0378 /*
0379  * Reflect the timer output level into the kvm_run structure
0380  */
0381 void kvm_timer_update_run(struct kvm_vcpu *vcpu)
0382 {
0383     struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
0384     struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
0385     struct kvm_sync_regs *regs = &vcpu->run->s.regs;
0386 
0387     /* Populate the device bitmap with the timer states */
0388     regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
0389                     KVM_ARM_DEV_EL1_PTIMER);
0390     if (kvm_timer_should_fire(vtimer))
0391         regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
0392     if (kvm_timer_should_fire(ptimer))
0393         regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
0394 }
0395 
0396 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
0397                  struct arch_timer_context *timer_ctx)
0398 {
0399     int ret;
0400 
0401     timer_ctx->irq.level = new_level;
0402     trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
0403                    timer_ctx->irq.level);
0404 
0405     if (!userspace_irqchip(vcpu->kvm)) {
0406         ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
0407                       timer_ctx->irq.irq,
0408                       timer_ctx->irq.level,
0409                       timer_ctx);
0410         WARN_ON(ret);
0411     }
0412 }
0413 
0414 /* Only called for a fully emulated timer */
0415 static void timer_emulate(struct arch_timer_context *ctx)
0416 {
0417     bool should_fire = kvm_timer_should_fire(ctx);
0418 
0419     trace_kvm_timer_emulate(ctx, should_fire);
0420 
0421     if (should_fire != ctx->irq.level) {
0422         kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
0423         return;
0424     }
0425 
0426     /*
0427      * If the timer can fire now, we don't need to have a soft timer
0428      * scheduled for the future.  If the timer cannot fire at all,
0429      * then we also don't need a soft timer.
0430      */
0431     if (!kvm_timer_irq_can_fire(ctx)) {
0432         soft_timer_cancel(&ctx->hrtimer);
0433         return;
0434     }
0435 
0436     soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
0437 }
0438 
0439 static void timer_save_state(struct arch_timer_context *ctx)
0440 {
0441     struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
0442     enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
0443     unsigned long flags;
0444 
0445     if (!timer->enabled)
0446         return;
0447 
0448     local_irq_save(flags);
0449 
0450     if (!ctx->loaded)
0451         goto out;
0452 
0453     switch (index) {
0454     case TIMER_VTIMER:
0455         timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
0456         timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
0457 
0458         /* Disable the timer */
0459         write_sysreg_el0(0, SYS_CNTV_CTL);
0460         isb();
0461 
0462         break;
0463     case TIMER_PTIMER:
0464         timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
0465         timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL));
0466 
0467         /* Disable the timer */
0468         write_sysreg_el0(0, SYS_CNTP_CTL);
0469         isb();
0470 
0471         break;
0472     case NR_KVM_TIMERS:
0473         BUG();
0474     }
0475 
0476     trace_kvm_timer_save_state(ctx);
0477 
0478     ctx->loaded = false;
0479 out:
0480     local_irq_restore(flags);
0481 }
0482 
0483 /*
0484  * Schedule the background timer before calling kvm_vcpu_halt, so that this
0485  * thread is removed from its waitqueue and made runnable when there's a timer
0486  * interrupt to handle.
0487  */
0488 static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
0489 {
0490     struct arch_timer_cpu *timer = vcpu_timer(vcpu);
0491     struct timer_map map;
0492 
0493     get_timer_map(vcpu, &map);
0494 
0495     /*
0496      * If no timers are capable of raising interrupts (disabled or
0497      * masked), then there's no more work for us to do.
0498      */
0499     if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
0500         !kvm_timer_irq_can_fire(map.direct_ptimer) &&
0501         !kvm_timer_irq_can_fire(map.emul_ptimer) &&
0502         !vcpu_has_wfit_active(vcpu))
0503         return;
0504 
0505     /*
0506      * At least one guest time will expire. Schedule a background timer.
0507      * Set the earliest expiration time among the guest timers.
0508      */
0509     soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
0510 }
0511 
0512 static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
0513 {
0514     struct arch_timer_cpu *timer = vcpu_timer(vcpu);
0515 
0516     soft_timer_cancel(&timer->bg_timer);
0517 }
0518 
0519 static void timer_restore_state(struct arch_timer_context *ctx)
0520 {
0521     struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
0522     enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
0523     unsigned long flags;
0524 
0525     if (!timer->enabled)
0526         return;
0527 
0528     local_irq_save(flags);
0529 
0530     if (ctx->loaded)
0531         goto out;
0532 
0533     switch (index) {
0534     case TIMER_VTIMER:
0535         write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
0536         isb();
0537         write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
0538         break;
0539     case TIMER_PTIMER:
0540         write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL);
0541         isb();
0542         write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
0543         break;
0544     case NR_KVM_TIMERS:
0545         BUG();
0546     }
0547 
0548     trace_kvm_timer_restore_state(ctx);
0549 
0550     ctx->loaded = true;
0551 out:
0552     local_irq_restore(flags);
0553 }
0554 
0555 static void set_cntvoff(u64 cntvoff)
0556 {
0557     kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
0558 }
0559 
0560 static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
0561 {
0562     int r;
0563     r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
0564     WARN_ON(r);
0565 }
0566 
0567 static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
0568 {
0569     struct kvm_vcpu *vcpu = ctx->vcpu;
0570     bool phys_active = false;
0571 
0572     /*
0573      * Update the timer output so that it is likely to match the
0574      * state we're about to restore. If the timer expires between
0575      * this point and the register restoration, we'll take the
0576      * interrupt anyway.
0577      */
0578     kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
0579 
0580     if (irqchip_in_kernel(vcpu->kvm))
0581         phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
0582 
0583     phys_active |= ctx->irq.level;
0584 
0585     set_timer_irq_phys_active(ctx, phys_active);
0586 }
0587 
0588 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
0589 {
0590     struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
0591 
0592     /*
0593      * Update the timer output so that it is likely to match the
0594      * state we're about to restore. If the timer expires between
0595      * this point and the register restoration, we'll take the
0596      * interrupt anyway.
0597      */
0598     kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
0599 
0600     /*
0601      * When using a userspace irqchip with the architected timers and a
0602      * host interrupt controller that doesn't support an active state, we
0603      * must still prevent continuously exiting from the guest, and
0604      * therefore mask the physical interrupt by disabling it on the host
0605      * interrupt controller when the virtual level is high, such that the
0606      * guest can make forward progress.  Once we detect the output level
0607      * being de-asserted, we unmask the interrupt again so that we exit
0608      * from the guest when the timer fires.
0609      */
0610     if (vtimer->irq.level)
0611         disable_percpu_irq(host_vtimer_irq);
0612     else
0613         enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
0614 }
0615 
0616 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
0617 {
0618     struct arch_timer_cpu *timer = vcpu_timer(vcpu);
0619     struct timer_map map;
0620 
0621     if (unlikely(!timer->enabled))
0622         return;
0623 
0624     get_timer_map(vcpu, &map);
0625 
0626     if (static_branch_likely(&has_gic_active_state)) {
0627         kvm_timer_vcpu_load_gic(map.direct_vtimer);
0628         if (map.direct_ptimer)
0629             kvm_timer_vcpu_load_gic(map.direct_ptimer);
0630     } else {
0631         kvm_timer_vcpu_load_nogic(vcpu);
0632     }
0633 
0634     set_cntvoff(timer_get_offset(map.direct_vtimer));
0635 
0636     kvm_timer_unblocking(vcpu);
0637 
0638     timer_restore_state(map.direct_vtimer);
0639     if (map.direct_ptimer)
0640         timer_restore_state(map.direct_ptimer);
0641 
0642     if (map.emul_ptimer)
0643         timer_emulate(map.emul_ptimer);
0644 }
0645 
0646 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
0647 {
0648     struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
0649     struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
0650     struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
0651     bool vlevel, plevel;
0652 
0653     if (likely(irqchip_in_kernel(vcpu->kvm)))
0654         return false;
0655 
0656     vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
0657     plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
0658 
0659     return kvm_timer_should_fire(vtimer) != vlevel ||
0660            kvm_timer_should_fire(ptimer) != plevel;
0661 }
0662 
0663 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
0664 {
0665     struct arch_timer_cpu *timer = vcpu_timer(vcpu);
0666     struct timer_map map;
0667 
0668     if (unlikely(!timer->enabled))
0669         return;
0670 
0671     get_timer_map(vcpu, &map);
0672 
0673     timer_save_state(map.direct_vtimer);
0674     if (map.direct_ptimer)
0675         timer_save_state(map.direct_ptimer);
0676 
0677     /*
0678      * Cancel soft timer emulation, because the only case where we
0679      * need it after a vcpu_put is in the context of a sleeping VCPU, and
0680      * in that case we already factor in the deadline for the physical
0681      * timer when scheduling the bg_timer.
0682      *
0683      * In any case, we re-schedule the hrtimer for the physical timer when
0684      * coming back to the VCPU thread in kvm_timer_vcpu_load().
0685      */
0686     if (map.emul_ptimer)
0687         soft_timer_cancel(&map.emul_ptimer->hrtimer);
0688 
0689     if (kvm_vcpu_is_blocking(vcpu))
0690         kvm_timer_blocking(vcpu);
0691 
0692     /*
0693      * The kernel may decide to run userspace after calling vcpu_put, so
0694      * we reset cntvoff to 0 to ensure a consistent read between user
0695      * accesses to the virtual counter and kernel access to the physical
0696      * counter of non-VHE case. For VHE, the virtual counter uses a fixed
0697      * virtual offset of zero, so no need to zero CNTVOFF_EL2 register.
0698      */
0699     set_cntvoff(0);
0700 }
0701 
0702 /*
0703  * With a userspace irqchip we have to check if the guest de-asserted the
0704  * timer and if so, unmask the timer irq signal on the host interrupt
0705  * controller to ensure that we see future timer signals.
0706  */
0707 static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
0708 {
0709     struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
0710 
0711     if (!kvm_timer_should_fire(vtimer)) {
0712         kvm_timer_update_irq(vcpu, false, vtimer);
0713         if (static_branch_likely(&has_gic_active_state))
0714             set_timer_irq_phys_active(vtimer, false);
0715         else
0716             enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
0717     }
0718 }
0719 
0720 void kvm_timer_sync_user(struct kvm_vcpu *vcpu)
0721 {
0722     struct arch_timer_cpu *timer = vcpu_timer(vcpu);
0723 
0724     if (unlikely(!timer->enabled))
0725         return;
0726 
0727     if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
0728         unmask_vtimer_irq_user(vcpu);
0729 }
0730 
0731 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
0732 {
0733     struct arch_timer_cpu *timer = vcpu_timer(vcpu);
0734     struct timer_map map;
0735 
0736     get_timer_map(vcpu, &map);
0737 
0738     /*
0739      * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
0740      * and to 0 for ARMv7.  We provide an implementation that always
0741      * resets the timer to be disabled and unmasked and is compliant with
0742      * the ARMv7 architecture.
0743      */
0744     timer_set_ctl(vcpu_vtimer(vcpu), 0);
0745     timer_set_ctl(vcpu_ptimer(vcpu), 0);
0746 
0747     if (timer->enabled) {
0748         kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
0749         kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
0750 
0751         if (irqchip_in_kernel(vcpu->kvm)) {
0752             kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
0753             if (map.direct_ptimer)
0754                 kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
0755         }
0756     }
0757 
0758     if (map.emul_ptimer)
0759         soft_timer_cancel(&map.emul_ptimer->hrtimer);
0760 
0761     return 0;
0762 }
0763 
0764 /* Make the updates of cntvoff for all vtimer contexts atomic */
0765 static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
0766 {
0767     unsigned long i;
0768     struct kvm *kvm = vcpu->kvm;
0769     struct kvm_vcpu *tmp;
0770 
0771     mutex_lock(&kvm->lock);
0772     kvm_for_each_vcpu(i, tmp, kvm)
0773         timer_set_offset(vcpu_vtimer(tmp), cntvoff);
0774 
0775     /*
0776      * When called from the vcpu create path, the CPU being created is not
0777      * included in the loop above, so we just set it here as well.
0778      */
0779     timer_set_offset(vcpu_vtimer(vcpu), cntvoff);
0780     mutex_unlock(&kvm->lock);
0781 }
0782 
0783 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
0784 {
0785     struct arch_timer_cpu *timer = vcpu_timer(vcpu);
0786     struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
0787     struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
0788 
0789     vtimer->vcpu = vcpu;
0790     ptimer->vcpu = vcpu;
0791 
0792     /* Synchronize cntvoff across all vtimers of a VM. */
0793     update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
0794     timer_set_offset(ptimer, 0);
0795 
0796     hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
0797     timer->bg_timer.function = kvm_bg_timer_expire;
0798 
0799     hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
0800     hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
0801     vtimer->hrtimer.function = kvm_hrtimer_expire;
0802     ptimer->hrtimer.function = kvm_hrtimer_expire;
0803 
0804     vtimer->irq.irq = default_vtimer_irq.irq;
0805     ptimer->irq.irq = default_ptimer_irq.irq;
0806 
0807     vtimer->host_timer_irq = host_vtimer_irq;
0808     ptimer->host_timer_irq = host_ptimer_irq;
0809 
0810     vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
0811     ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
0812 }
0813 
0814 static void kvm_timer_init_interrupt(void *info)
0815 {
0816     enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
0817     enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
0818 }
0819 
0820 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
0821 {
0822     struct arch_timer_context *timer;
0823 
0824     switch (regid) {
0825     case KVM_REG_ARM_TIMER_CTL:
0826         timer = vcpu_vtimer(vcpu);
0827         kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
0828         break;
0829     case KVM_REG_ARM_TIMER_CNT:
0830         timer = vcpu_vtimer(vcpu);
0831         update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
0832         break;
0833     case KVM_REG_ARM_TIMER_CVAL:
0834         timer = vcpu_vtimer(vcpu);
0835         kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
0836         break;
0837     case KVM_REG_ARM_PTIMER_CTL:
0838         timer = vcpu_ptimer(vcpu);
0839         kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
0840         break;
0841     case KVM_REG_ARM_PTIMER_CVAL:
0842         timer = vcpu_ptimer(vcpu);
0843         kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
0844         break;
0845 
0846     default:
0847         return -1;
0848     }
0849 
0850     return 0;
0851 }
0852 
0853 static u64 read_timer_ctl(struct arch_timer_context *timer)
0854 {
0855     /*
0856      * Set ISTATUS bit if it's expired.
0857      * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
0858      * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
0859      * regardless of ENABLE bit for our implementation convenience.
0860      */
0861     u32 ctl = timer_get_ctl(timer);
0862 
0863     if (!kvm_timer_compute_delta(timer))
0864         ctl |= ARCH_TIMER_CTRL_IT_STAT;
0865 
0866     return ctl;
0867 }
0868 
0869 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
0870 {
0871     switch (regid) {
0872     case KVM_REG_ARM_TIMER_CTL:
0873         return kvm_arm_timer_read(vcpu,
0874                       vcpu_vtimer(vcpu), TIMER_REG_CTL);
0875     case KVM_REG_ARM_TIMER_CNT:
0876         return kvm_arm_timer_read(vcpu,
0877                       vcpu_vtimer(vcpu), TIMER_REG_CNT);
0878     case KVM_REG_ARM_TIMER_CVAL:
0879         return kvm_arm_timer_read(vcpu,
0880                       vcpu_vtimer(vcpu), TIMER_REG_CVAL);
0881     case KVM_REG_ARM_PTIMER_CTL:
0882         return kvm_arm_timer_read(vcpu,
0883                       vcpu_ptimer(vcpu), TIMER_REG_CTL);
0884     case KVM_REG_ARM_PTIMER_CNT:
0885         return kvm_arm_timer_read(vcpu,
0886                       vcpu_ptimer(vcpu), TIMER_REG_CNT);
0887     case KVM_REG_ARM_PTIMER_CVAL:
0888         return kvm_arm_timer_read(vcpu,
0889                       vcpu_ptimer(vcpu), TIMER_REG_CVAL);
0890     }
0891     return (u64)-1;
0892 }
0893 
0894 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
0895                   struct arch_timer_context *timer,
0896                   enum kvm_arch_timer_regs treg)
0897 {
0898     u64 val;
0899 
0900     switch (treg) {
0901     case TIMER_REG_TVAL:
0902         val = timer_get_cval(timer) - kvm_phys_timer_read() + timer_get_offset(timer);
0903         val = lower_32_bits(val);
0904         break;
0905 
0906     case TIMER_REG_CTL:
0907         val = read_timer_ctl(timer);
0908         break;
0909 
0910     case TIMER_REG_CVAL:
0911         val = timer_get_cval(timer);
0912         break;
0913 
0914     case TIMER_REG_CNT:
0915         val = kvm_phys_timer_read() - timer_get_offset(timer);
0916         break;
0917 
0918     default:
0919         BUG();
0920     }
0921 
0922     return val;
0923 }
0924 
0925 u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
0926                   enum kvm_arch_timers tmr,
0927                   enum kvm_arch_timer_regs treg)
0928 {
0929     u64 val;
0930 
0931     preempt_disable();
0932     kvm_timer_vcpu_put(vcpu);
0933 
0934     val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg);
0935 
0936     kvm_timer_vcpu_load(vcpu);
0937     preempt_enable();
0938 
0939     return val;
0940 }
0941 
0942 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
0943                 struct arch_timer_context *timer,
0944                 enum kvm_arch_timer_regs treg,
0945                 u64 val)
0946 {
0947     switch (treg) {
0948     case TIMER_REG_TVAL:
0949         timer_set_cval(timer, kvm_phys_timer_read() - timer_get_offset(timer) + (s32)val);
0950         break;
0951 
0952     case TIMER_REG_CTL:
0953         timer_set_ctl(timer, val & ~ARCH_TIMER_CTRL_IT_STAT);
0954         break;
0955 
0956     case TIMER_REG_CVAL:
0957         timer_set_cval(timer, val);
0958         break;
0959 
0960     default:
0961         BUG();
0962     }
0963 }
0964 
0965 void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
0966                 enum kvm_arch_timers tmr,
0967                 enum kvm_arch_timer_regs treg,
0968                 u64 val)
0969 {
0970     preempt_disable();
0971     kvm_timer_vcpu_put(vcpu);
0972 
0973     kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val);
0974 
0975     kvm_timer_vcpu_load(vcpu);
0976     preempt_enable();
0977 }
0978 
0979 static int kvm_timer_starting_cpu(unsigned int cpu)
0980 {
0981     kvm_timer_init_interrupt(NULL);
0982     return 0;
0983 }
0984 
0985 static int kvm_timer_dying_cpu(unsigned int cpu)
0986 {
0987     disable_percpu_irq(host_vtimer_irq);
0988     return 0;
0989 }
0990 
0991 static int timer_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
0992 {
0993     if (vcpu)
0994         irqd_set_forwarded_to_vcpu(d);
0995     else
0996         irqd_clr_forwarded_to_vcpu(d);
0997 
0998     return 0;
0999 }
1000 
1001 static int timer_irq_set_irqchip_state(struct irq_data *d,
1002                        enum irqchip_irq_state which, bool val)
1003 {
1004     if (which != IRQCHIP_STATE_ACTIVE || !irqd_is_forwarded_to_vcpu(d))
1005         return irq_chip_set_parent_state(d, which, val);
1006 
1007     if (val)
1008         irq_chip_mask_parent(d);
1009     else
1010         irq_chip_unmask_parent(d);
1011 
1012     return 0;
1013 }
1014 
1015 static void timer_irq_eoi(struct irq_data *d)
1016 {
1017     if (!irqd_is_forwarded_to_vcpu(d))
1018         irq_chip_eoi_parent(d);
1019 }
1020 
1021 static void timer_irq_ack(struct irq_data *d)
1022 {
1023     d = d->parent_data;
1024     if (d->chip->irq_ack)
1025         d->chip->irq_ack(d);
1026 }
1027 
1028 static struct irq_chip timer_chip = {
1029     .name           = "KVM",
1030     .irq_ack        = timer_irq_ack,
1031     .irq_mask       = irq_chip_mask_parent,
1032     .irq_unmask     = irq_chip_unmask_parent,
1033     .irq_eoi        = timer_irq_eoi,
1034     .irq_set_type       = irq_chip_set_type_parent,
1035     .irq_set_vcpu_affinity  = timer_irq_set_vcpu_affinity,
1036     .irq_set_irqchip_state  = timer_irq_set_irqchip_state,
1037 };
1038 
1039 static int timer_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1040                   unsigned int nr_irqs, void *arg)
1041 {
1042     irq_hw_number_t hwirq = (uintptr_t)arg;
1043 
1044     return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
1045                          &timer_chip, NULL);
1046 }
1047 
1048 static void timer_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1049                   unsigned int nr_irqs)
1050 {
1051 }
1052 
1053 static const struct irq_domain_ops timer_domain_ops = {
1054     .alloc  = timer_irq_domain_alloc,
1055     .free   = timer_irq_domain_free,
1056 };
1057 
1058 static struct irq_ops arch_timer_irq_ops = {
1059     .get_input_level = kvm_arch_timer_get_input_level,
1060 };
1061 
1062 static void kvm_irq_fixup_flags(unsigned int virq, u32 *flags)
1063 {
1064     *flags = irq_get_trigger_type(virq);
1065     if (*flags != IRQF_TRIGGER_HIGH && *flags != IRQF_TRIGGER_LOW) {
1066         kvm_err("Invalid trigger for timer IRQ%d, assuming level low\n",
1067             virq);
1068         *flags = IRQF_TRIGGER_LOW;
1069     }
1070 }
1071 
1072 static int kvm_irq_init(struct arch_timer_kvm_info *info)
1073 {
1074     struct irq_domain *domain = NULL;
1075 
1076     if (info->virtual_irq <= 0) {
1077         kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
1078             info->virtual_irq);
1079         return -ENODEV;
1080     }
1081 
1082     host_vtimer_irq = info->virtual_irq;
1083     kvm_irq_fixup_flags(host_vtimer_irq, &host_vtimer_irq_flags);
1084 
1085     if (kvm_vgic_global_state.no_hw_deactivation) {
1086         struct fwnode_handle *fwnode;
1087         struct irq_data *data;
1088 
1089         fwnode = irq_domain_alloc_named_fwnode("kvm-timer");
1090         if (!fwnode)
1091             return -ENOMEM;
1092 
1093         /* Assume both vtimer and ptimer in the same parent */
1094         data = irq_get_irq_data(host_vtimer_irq);
1095         domain = irq_domain_create_hierarchy(data->domain, 0,
1096                              NR_KVM_TIMERS, fwnode,
1097                              &timer_domain_ops, NULL);
1098         if (!domain) {
1099             irq_domain_free_fwnode(fwnode);
1100             return -ENOMEM;
1101         }
1102 
1103         arch_timer_irq_ops.flags |= VGIC_IRQ_SW_RESAMPLE;
1104         WARN_ON(irq_domain_push_irq(domain, host_vtimer_irq,
1105                         (void *)TIMER_VTIMER));
1106     }
1107 
1108     if (info->physical_irq > 0) {
1109         host_ptimer_irq = info->physical_irq;
1110         kvm_irq_fixup_flags(host_ptimer_irq, &host_ptimer_irq_flags);
1111 
1112         if (domain)
1113             WARN_ON(irq_domain_push_irq(domain, host_ptimer_irq,
1114                             (void *)TIMER_PTIMER));
1115     }
1116 
1117     return 0;
1118 }
1119 
1120 int kvm_timer_hyp_init(bool has_gic)
1121 {
1122     struct arch_timer_kvm_info *info;
1123     int err;
1124 
1125     info = arch_timer_get_kvm_info();
1126     timecounter = &info->timecounter;
1127 
1128     if (!timecounter->cc) {
1129         kvm_err("kvm_arch_timer: uninitialized timecounter\n");
1130         return -ENODEV;
1131     }
1132 
1133     err = kvm_irq_init(info);
1134     if (err)
1135         return err;
1136 
1137     /* First, do the virtual EL1 timer irq */
1138 
1139     err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
1140                  "kvm guest vtimer", kvm_get_running_vcpus());
1141     if (err) {
1142         kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
1143             host_vtimer_irq, err);
1144         return err;
1145     }
1146 
1147     if (has_gic) {
1148         err = irq_set_vcpu_affinity(host_vtimer_irq,
1149                         kvm_get_running_vcpus());
1150         if (err) {
1151             kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1152             goto out_free_irq;
1153         }
1154 
1155         static_branch_enable(&has_gic_active_state);
1156     }
1157 
1158     kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
1159 
1160     /* Now let's do the physical EL1 timer irq */
1161 
1162     if (info->physical_irq > 0) {
1163         err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
1164                      "kvm guest ptimer", kvm_get_running_vcpus());
1165         if (err) {
1166             kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
1167                 host_ptimer_irq, err);
1168             return err;
1169         }
1170 
1171         if (has_gic) {
1172             err = irq_set_vcpu_affinity(host_ptimer_irq,
1173                             kvm_get_running_vcpus());
1174             if (err) {
1175                 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1176                 goto out_free_irq;
1177             }
1178         }
1179 
1180         kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
1181     } else if (has_vhe()) {
1182         kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
1183             info->physical_irq);
1184         err = -ENODEV;
1185         goto out_free_irq;
1186     }
1187 
1188     cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
1189               "kvm/arm/timer:starting", kvm_timer_starting_cpu,
1190               kvm_timer_dying_cpu);
1191     return 0;
1192 out_free_irq:
1193     free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
1194     return err;
1195 }
1196 
1197 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
1198 {
1199     struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1200 
1201     soft_timer_cancel(&timer->bg_timer);
1202 }
1203 
1204 static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
1205 {
1206     int vtimer_irq, ptimer_irq, ret;
1207     unsigned long i;
1208 
1209     vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
1210     ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
1211     if (ret)
1212         return false;
1213 
1214     ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
1215     ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
1216     if (ret)
1217         return false;
1218 
1219     kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
1220         if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
1221             vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
1222             return false;
1223     }
1224 
1225     return true;
1226 }
1227 
1228 bool kvm_arch_timer_get_input_level(int vintid)
1229 {
1230     struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
1231     struct arch_timer_context *timer;
1232 
1233     if (WARN(!vcpu, "No vcpu context!\n"))
1234         return false;
1235 
1236     if (vintid == vcpu_vtimer(vcpu)->irq.irq)
1237         timer = vcpu_vtimer(vcpu);
1238     else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
1239         timer = vcpu_ptimer(vcpu);
1240     else
1241         BUG();
1242 
1243     return kvm_timer_should_fire(timer);
1244 }
1245 
1246 int kvm_timer_enable(struct kvm_vcpu *vcpu)
1247 {
1248     struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1249     struct timer_map map;
1250     int ret;
1251 
1252     if (timer->enabled)
1253         return 0;
1254 
1255     /* Without a VGIC we do not map virtual IRQs to physical IRQs */
1256     if (!irqchip_in_kernel(vcpu->kvm))
1257         goto no_vgic;
1258 
1259     /*
1260      * At this stage, we have the guarantee that the vgic is both
1261      * available and initialized.
1262      */
1263     if (!timer_irqs_are_valid(vcpu)) {
1264         kvm_debug("incorrectly configured timer irqs\n");
1265         return -EINVAL;
1266     }
1267 
1268     get_timer_map(vcpu, &map);
1269 
1270     ret = kvm_vgic_map_phys_irq(vcpu,
1271                     map.direct_vtimer->host_timer_irq,
1272                     map.direct_vtimer->irq.irq,
1273                     &arch_timer_irq_ops);
1274     if (ret)
1275         return ret;
1276 
1277     if (map.direct_ptimer) {
1278         ret = kvm_vgic_map_phys_irq(vcpu,
1279                         map.direct_ptimer->host_timer_irq,
1280                         map.direct_ptimer->irq.irq,
1281                         &arch_timer_irq_ops);
1282     }
1283 
1284     if (ret)
1285         return ret;
1286 
1287 no_vgic:
1288     timer->enabled = 1;
1289     return 0;
1290 }
1291 
1292 /*
1293  * On VHE system, we only need to configure the EL2 timer trap register once,
1294  * not for every world switch.
1295  * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
1296  * and this makes those bits have no effect for the host kernel execution.
1297  */
1298 void kvm_timer_init_vhe(void)
1299 {
1300     /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
1301     u32 cnthctl_shift = 10;
1302     u64 val;
1303 
1304     /*
1305      * VHE systems allow the guest direct access to the EL1 physical
1306      * timer/counter.
1307      */
1308     val = read_sysreg(cnthctl_el2);
1309     val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
1310     val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
1311     write_sysreg(val, cnthctl_el2);
1312 }
1313 
1314 static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
1315 {
1316     struct kvm_vcpu *vcpu;
1317     unsigned long i;
1318 
1319     kvm_for_each_vcpu(i, vcpu, kvm) {
1320         vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
1321         vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
1322     }
1323 }
1324 
1325 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1326 {
1327     int __user *uaddr = (int __user *)(long)attr->addr;
1328     struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
1329     struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
1330     int irq;
1331 
1332     if (!irqchip_in_kernel(vcpu->kvm))
1333         return -EINVAL;
1334 
1335     if (get_user(irq, uaddr))
1336         return -EFAULT;
1337 
1338     if (!(irq_is_ppi(irq)))
1339         return -EINVAL;
1340 
1341     if (vcpu->arch.timer_cpu.enabled)
1342         return -EBUSY;
1343 
1344     switch (attr->attr) {
1345     case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1346         set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
1347         break;
1348     case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1349         set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
1350         break;
1351     default:
1352         return -ENXIO;
1353     }
1354 
1355     return 0;
1356 }
1357 
1358 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1359 {
1360     int __user *uaddr = (int __user *)(long)attr->addr;
1361     struct arch_timer_context *timer;
1362     int irq;
1363 
1364     switch (attr->attr) {
1365     case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1366         timer = vcpu_vtimer(vcpu);
1367         break;
1368     case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1369         timer = vcpu_ptimer(vcpu);
1370         break;
1371     default:
1372         return -ENXIO;
1373     }
1374 
1375     irq = timer->irq.irq;
1376     return put_user(irq, uaddr);
1377 }
1378 
1379 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1380 {
1381     switch (attr->attr) {
1382     case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1383     case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1384         return 0;
1385     }
1386 
1387     return -ENXIO;
1388 }