0001
0002 #ifndef __KVM_HOST_H
0003 #define __KVM_HOST_H
0004
0005
0006 #include <linux/types.h>
0007 #include <linux/hardirq.h>
0008 #include <linux/list.h>
0009 #include <linux/mutex.h>
0010 #include <linux/spinlock.h>
0011 #include <linux/signal.h>
0012 #include <linux/sched.h>
0013 #include <linux/sched/stat.h>
0014 #include <linux/bug.h>
0015 #include <linux/minmax.h>
0016 #include <linux/mm.h>
0017 #include <linux/mmu_notifier.h>
0018 #include <linux/preempt.h>
0019 #include <linux/msi.h>
0020 #include <linux/slab.h>
0021 #include <linux/vmalloc.h>
0022 #include <linux/rcupdate.h>
0023 #include <linux/ratelimit.h>
0024 #include <linux/err.h>
0025 #include <linux/irqflags.h>
0026 #include <linux/context_tracking.h>
0027 #include <linux/irqbypass.h>
0028 #include <linux/rcuwait.h>
0029 #include <linux/refcount.h>
0030 #include <linux/nospec.h>
0031 #include <linux/notifier.h>
0032 #include <linux/ftrace.h>
0033 #include <linux/hashtable.h>
0034 #include <linux/instrumentation.h>
0035 #include <linux/interval_tree.h>
0036 #include <linux/rbtree.h>
0037 #include <linux/xarray.h>
0038 #include <asm/signal.h>
0039
0040 #include <linux/kvm.h>
0041 #include <linux/kvm_para.h>
0042
0043 #include <linux/kvm_types.h>
0044
0045 #include <asm/kvm_host.h>
0046 #include <linux/kvm_dirty_ring.h>
0047
0048 #ifndef KVM_MAX_VCPU_IDS
0049 #define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
0050 #endif
0051
0052
0053
0054
0055
0056
0057 #define KVM_MEMSLOT_INVALID (1UL << 16)
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078 #define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63)
0079
0080
0081 #define KVM_MAX_MMIO_FRAGMENTS 2
0082
0083 #ifndef KVM_ADDRESS_SPACE_NUM
0084 #define KVM_ADDRESS_SPACE_NUM 1
0085 #endif
0086
0087
0088
0089
0090
0091
0092 #define KVM_PFN_ERR_MASK (0x7ffULL << 52)
0093 #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
0094 #define KVM_PFN_NOSLOT (0x1ULL << 63)
0095
0096 #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
0097 #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
0098 #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
0099
0100
0101
0102
0103
0104 static inline bool is_error_pfn(kvm_pfn_t pfn)
0105 {
0106 return !!(pfn & KVM_PFN_ERR_MASK);
0107 }
0108
0109
0110
0111
0112
0113
0114 static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
0115 {
0116 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
0117 }
0118
0119
0120 static inline bool is_noslot_pfn(kvm_pfn_t pfn)
0121 {
0122 return pfn == KVM_PFN_NOSLOT;
0123 }
0124
0125
0126
0127
0128
0129 #ifndef KVM_HVA_ERR_BAD
0130
0131 #define KVM_HVA_ERR_BAD (PAGE_OFFSET)
0132 #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
0133
0134 static inline bool kvm_is_error_hva(unsigned long addr)
0135 {
0136 return addr >= PAGE_OFFSET;
0137 }
0138
0139 #endif
0140
0141 #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
0142
0143 static inline bool is_error_page(struct page *page)
0144 {
0145 return IS_ERR(page);
0146 }
0147
0148 #define KVM_REQUEST_MASK GENMASK(7,0)
0149 #define KVM_REQUEST_NO_WAKEUP BIT(8)
0150 #define KVM_REQUEST_WAIT BIT(9)
0151 #define KVM_REQUEST_NO_ACTION BIT(10)
0152
0153
0154
0155
0156 #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
0157 #define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
0158 #define KVM_REQ_UNBLOCK 2
0159 #define KVM_REQ_UNHALT 3
0160 #define KVM_REQUEST_ARCH_BASE 8
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170 #define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
0171
0172 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
0173 BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
0174 (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
0175 })
0176 #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
0177
0178 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
0179 unsigned long *vcpu_bitmap);
0180 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
0181 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
0182 struct kvm_vcpu *except);
0183 bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
0184 unsigned long *vcpu_bitmap);
0185
0186 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
0187 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
0188
0189 extern struct mutex kvm_lock;
0190 extern struct list_head vm_list;
0191
0192 struct kvm_io_range {
0193 gpa_t addr;
0194 int len;
0195 struct kvm_io_device *dev;
0196 };
0197
0198 #define NR_IOBUS_DEVS 1000
0199
0200 struct kvm_io_bus {
0201 int dev_count;
0202 int ioeventfd_count;
0203 struct kvm_io_range range[];
0204 };
0205
0206 enum kvm_bus {
0207 KVM_MMIO_BUS,
0208 KVM_PIO_BUS,
0209 KVM_VIRTIO_CCW_NOTIFY_BUS,
0210 KVM_FAST_MMIO_BUS,
0211 KVM_NR_BUSES
0212 };
0213
0214 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
0215 int len, const void *val);
0216 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
0217 gpa_t addr, int len, const void *val, long cookie);
0218 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
0219 int len, void *val);
0220 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
0221 int len, struct kvm_io_device *dev);
0222 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
0223 struct kvm_io_device *dev);
0224 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
0225 gpa_t addr);
0226
0227 #ifdef CONFIG_KVM_ASYNC_PF
0228 struct kvm_async_pf {
0229 struct work_struct work;
0230 struct list_head link;
0231 struct list_head queue;
0232 struct kvm_vcpu *vcpu;
0233 struct mm_struct *mm;
0234 gpa_t cr2_or_gpa;
0235 unsigned long addr;
0236 struct kvm_arch_async_pf arch;
0237 bool wakeup_all;
0238 bool notpresent_injected;
0239 };
0240
0241 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
0242 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
0243 bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
0244 unsigned long hva, struct kvm_arch_async_pf *arch);
0245 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
0246 #endif
0247
0248 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
0249 struct kvm_gfn_range {
0250 struct kvm_memory_slot *slot;
0251 gfn_t start;
0252 gfn_t end;
0253 pte_t pte;
0254 bool may_block;
0255 };
0256 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
0257 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
0258 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
0259 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
0260 #endif
0261
0262 enum {
0263 OUTSIDE_GUEST_MODE,
0264 IN_GUEST_MODE,
0265 EXITING_GUEST_MODE,
0266 READING_SHADOW_PAGE_TABLES,
0267 };
0268
0269 #define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA)
0270
0271 struct kvm_host_map {
0272
0273
0274
0275
0276
0277
0278
0279
0280 struct page *page;
0281 void *hva;
0282 kvm_pfn_t pfn;
0283 kvm_pfn_t gfn;
0284 };
0285
0286
0287
0288
0289
0290 static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
0291 {
0292 return !!map->hva;
0293 }
0294
0295 static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
0296 {
0297 return single_task_running() && !need_resched() && ktime_before(cur, stop);
0298 }
0299
0300
0301
0302
0303
0304 struct kvm_mmio_fragment {
0305 gpa_t gpa;
0306 void *data;
0307 unsigned len;
0308 };
0309
0310 struct kvm_vcpu {
0311 struct kvm *kvm;
0312 #ifdef CONFIG_PREEMPT_NOTIFIERS
0313 struct preempt_notifier preempt_notifier;
0314 #endif
0315 int cpu;
0316 int vcpu_id;
0317 int vcpu_idx;
0318 int ____srcu_idx;
0319 #ifdef CONFIG_PROVE_RCU
0320 int srcu_depth;
0321 #endif
0322 int mode;
0323 u64 requests;
0324 unsigned long guest_debug;
0325
0326 struct mutex mutex;
0327 struct kvm_run *run;
0328
0329 #ifndef __KVM_HAVE_ARCH_WQP
0330 struct rcuwait wait;
0331 #endif
0332 struct pid __rcu *pid;
0333 int sigset_active;
0334 sigset_t sigset;
0335 unsigned int halt_poll_ns;
0336 bool valid_wakeup;
0337
0338 #ifdef CONFIG_HAS_IOMEM
0339 int mmio_needed;
0340 int mmio_read_completed;
0341 int mmio_is_write;
0342 int mmio_cur_fragment;
0343 int mmio_nr_fragments;
0344 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
0345 #endif
0346
0347 #ifdef CONFIG_KVM_ASYNC_PF
0348 struct {
0349 u32 queued;
0350 struct list_head queue;
0351 struct list_head done;
0352 spinlock_t lock;
0353 } async_pf;
0354 #endif
0355
0356 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
0357
0358
0359
0360
0361
0362
0363 struct {
0364 bool in_spin_loop;
0365 bool dy_eligible;
0366 } spin_loop;
0367 #endif
0368 bool preempted;
0369 bool ready;
0370 struct kvm_vcpu_arch arch;
0371 struct kvm_vcpu_stat stat;
0372 char stats_id[KVM_STATS_NAME_SIZE];
0373 struct kvm_dirty_ring dirty_ring;
0374
0375
0376
0377
0378
0379
0380
0381 struct kvm_memory_slot *last_used_slot;
0382 u64 last_used_slot_gen;
0383 };
0384
0385
0386
0387
0388
0389 static __always_inline void guest_timing_enter_irqoff(void)
0390 {
0391
0392
0393
0394
0395 instrumentation_begin();
0396 vtime_account_guest_enter();
0397 instrumentation_end();
0398 }
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408 static __always_inline void guest_context_enter_irqoff(void)
0409 {
0410
0411
0412
0413
0414
0415
0416
0417
0418 if (!context_tracking_guest_enter()) {
0419 instrumentation_begin();
0420 rcu_virt_note_context_switch(smp_processor_id());
0421 instrumentation_end();
0422 }
0423 }
0424
0425
0426
0427
0428
0429 static __always_inline void guest_enter_irqoff(void)
0430 {
0431 guest_timing_enter_irqoff();
0432 guest_context_enter_irqoff();
0433 }
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452 static __always_inline void guest_state_enter_irqoff(void)
0453 {
0454 instrumentation_begin();
0455 trace_hardirqs_on_prepare();
0456 lockdep_hardirqs_on_prepare();
0457 instrumentation_end();
0458
0459 guest_context_enter_irqoff();
0460 lockdep_hardirqs_on(CALLER_ADDR0);
0461 }
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471 static __always_inline void guest_context_exit_irqoff(void)
0472 {
0473 context_tracking_guest_exit();
0474 }
0475
0476
0477
0478
0479
0480 static __always_inline void guest_timing_exit_irqoff(void)
0481 {
0482 instrumentation_begin();
0483
0484 vtime_account_guest_exit();
0485 instrumentation_end();
0486 }
0487
0488
0489
0490
0491
0492 static __always_inline void guest_exit_irqoff(void)
0493 {
0494 guest_context_exit_irqoff();
0495 guest_timing_exit_irqoff();
0496 }
0497
0498 static inline void guest_exit(void)
0499 {
0500 unsigned long flags;
0501
0502 local_irq_save(flags);
0503 guest_exit_irqoff();
0504 local_irq_restore(flags);
0505 }
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524 static __always_inline void guest_state_exit_irqoff(void)
0525 {
0526 lockdep_hardirqs_off(CALLER_ADDR0);
0527 guest_context_exit_irqoff();
0528
0529 instrumentation_begin();
0530 trace_hardirqs_off_finish();
0531 instrumentation_end();
0532 }
0533
0534 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
0535 {
0536
0537
0538
0539
0540
0541 smp_mb__before_atomic();
0542 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
0543 }
0544
0545
0546
0547
0548
0549 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567 struct kvm_memory_slot {
0568 struct hlist_node id_node[2];
0569 struct interval_tree_node hva_node[2];
0570 struct rb_node gfn_node[2];
0571 gfn_t base_gfn;
0572 unsigned long npages;
0573 unsigned long *dirty_bitmap;
0574 struct kvm_arch_memory_slot arch;
0575 unsigned long userspace_addr;
0576 u32 flags;
0577 short id;
0578 u16 as_id;
0579 };
0580
0581 static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
0582 {
0583 return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
0584 }
0585
0586 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
0587 {
0588 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
0589 }
0590
0591 static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
0592 {
0593 unsigned long len = kvm_dirty_bitmap_bytes(memslot);
0594
0595 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
0596 }
0597
0598 #ifndef KVM_DIRTY_LOG_MANUAL_CAPS
0599 #define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
0600 #endif
0601
0602 struct kvm_s390_adapter_int {
0603 u64 ind_addr;
0604 u64 summary_addr;
0605 u64 ind_offset;
0606 u32 summary_offset;
0607 u32 adapter_id;
0608 };
0609
0610 struct kvm_hv_sint {
0611 u32 vcpu;
0612 u32 sint;
0613 };
0614
0615 struct kvm_xen_evtchn {
0616 u32 port;
0617 u32 vcpu_id;
0618 int vcpu_idx;
0619 u32 priority;
0620 };
0621
0622 struct kvm_kernel_irq_routing_entry {
0623 u32 gsi;
0624 u32 type;
0625 int (*set)(struct kvm_kernel_irq_routing_entry *e,
0626 struct kvm *kvm, int irq_source_id, int level,
0627 bool line_status);
0628 union {
0629 struct {
0630 unsigned irqchip;
0631 unsigned pin;
0632 } irqchip;
0633 struct {
0634 u32 address_lo;
0635 u32 address_hi;
0636 u32 data;
0637 u32 flags;
0638 u32 devid;
0639 } msi;
0640 struct kvm_s390_adapter_int adapter;
0641 struct kvm_hv_sint hv_sint;
0642 struct kvm_xen_evtchn xen_evtchn;
0643 };
0644 struct hlist_node link;
0645 };
0646
0647 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
0648 struct kvm_irq_routing_table {
0649 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
0650 u32 nr_rt_entries;
0651
0652
0653
0654
0655 struct hlist_head map[];
0656 };
0657 #endif
0658
0659 #ifndef KVM_INTERNAL_MEM_SLOTS
0660 #define KVM_INTERNAL_MEM_SLOTS 0
0661 #endif
0662
0663 #define KVM_MEM_SLOTS_NUM SHRT_MAX
0664 #define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
0665
0666 #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
0667 static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
0668 {
0669 return 0;
0670 }
0671 #endif
0672
0673 struct kvm_memslots {
0674 u64 generation;
0675 atomic_long_t last_used_slot;
0676 struct rb_root_cached hva_tree;
0677 struct rb_root gfn_tree;
0678
0679
0680
0681
0682
0683
0684
0685
0686 DECLARE_HASHTABLE(id_hash, 7);
0687 int node_idx;
0688 };
0689
0690 struct kvm {
0691 #ifdef KVM_HAVE_MMU_RWLOCK
0692 rwlock_t mmu_lock;
0693 #else
0694 spinlock_t mmu_lock;
0695 #endif
0696
0697 struct mutex slots_lock;
0698
0699
0700
0701
0702
0703
0704
0705
0706 struct mutex slots_arch_lock;
0707 struct mm_struct *mm;
0708 unsigned long nr_memslot_pages;
0709
0710 struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2];
0711
0712 struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
0713 struct xarray vcpu_array;
0714
0715
0716 spinlock_t mn_invalidate_lock;
0717 unsigned long mn_active_invalidate_count;
0718 struct rcuwait mn_memslots_update_rcuwait;
0719
0720
0721 spinlock_t gpc_lock;
0722 struct list_head gpc_list;
0723
0724
0725
0726
0727
0728
0729
0730 atomic_t online_vcpus;
0731 int max_vcpus;
0732 int created_vcpus;
0733 int last_boosted_vcpu;
0734 struct list_head vm_list;
0735 struct mutex lock;
0736 struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
0737 #ifdef CONFIG_HAVE_KVM_EVENTFD
0738 struct {
0739 spinlock_t lock;
0740 struct list_head items;
0741 struct list_head resampler_list;
0742 struct mutex resampler_lock;
0743 } irqfds;
0744 struct list_head ioeventfds;
0745 #endif
0746 struct kvm_vm_stat stat;
0747 struct kvm_arch arch;
0748 refcount_t users_count;
0749 #ifdef CONFIG_KVM_MMIO
0750 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
0751 spinlock_t ring_lock;
0752 struct list_head coalesced_zones;
0753 #endif
0754
0755 struct mutex irq_lock;
0756 #ifdef CONFIG_HAVE_KVM_IRQCHIP
0757
0758
0759
0760 struct kvm_irq_routing_table __rcu *irq_routing;
0761 #endif
0762 #ifdef CONFIG_HAVE_KVM_IRQFD
0763 struct hlist_head irq_ack_notifier_list;
0764 #endif
0765
0766 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
0767 struct mmu_notifier mmu_notifier;
0768 unsigned long mmu_invalidate_seq;
0769 long mmu_invalidate_in_progress;
0770 unsigned long mmu_invalidate_range_start;
0771 unsigned long mmu_invalidate_range_end;
0772 #endif
0773 struct list_head devices;
0774 u64 manual_dirty_log_protect;
0775 struct dentry *debugfs_dentry;
0776 struct kvm_stat_data **debugfs_stat_data;
0777 struct srcu_struct srcu;
0778 struct srcu_struct irq_srcu;
0779 pid_t userspace_pid;
0780 unsigned int max_halt_poll_ns;
0781 u32 dirty_ring_size;
0782 bool vm_bugged;
0783 bool vm_dead;
0784
0785 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
0786 struct notifier_block pm_notifier;
0787 #endif
0788 char stats_id[KVM_STATS_NAME_SIZE];
0789 };
0790
0791 #define kvm_err(fmt, ...) \
0792 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
0793 #define kvm_info(fmt, ...) \
0794 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
0795 #define kvm_debug(fmt, ...) \
0796 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
0797 #define kvm_debug_ratelimited(fmt, ...) \
0798 pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
0799 ## __VA_ARGS__)
0800 #define kvm_pr_unimpl(fmt, ...) \
0801 pr_err_ratelimited("kvm [%i]: " fmt, \
0802 task_tgid_nr(current), ## __VA_ARGS__)
0803
0804
0805 #define vcpu_unimpl(vcpu, fmt, ...) \
0806 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \
0807 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
0808
0809 #define vcpu_debug(vcpu, fmt, ...) \
0810 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
0811 #define vcpu_debug_ratelimited(vcpu, fmt, ...) \
0812 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \
0813 ## __VA_ARGS__)
0814 #define vcpu_err(vcpu, fmt, ...) \
0815 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
0816
0817 static inline void kvm_vm_dead(struct kvm *kvm)
0818 {
0819 kvm->vm_dead = true;
0820 kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
0821 }
0822
0823 static inline void kvm_vm_bugged(struct kvm *kvm)
0824 {
0825 kvm->vm_bugged = true;
0826 kvm_vm_dead(kvm);
0827 }
0828
0829
0830 #define KVM_BUG(cond, kvm, fmt...) \
0831 ({ \
0832 int __ret = (cond); \
0833 \
0834 if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
0835 kvm_vm_bugged(kvm); \
0836 unlikely(__ret); \
0837 })
0838
0839 #define KVM_BUG_ON(cond, kvm) \
0840 ({ \
0841 int __ret = (cond); \
0842 \
0843 if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
0844 kvm_vm_bugged(kvm); \
0845 unlikely(__ret); \
0846 })
0847
0848 static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
0849 {
0850 #ifdef CONFIG_PROVE_RCU
0851 WARN_ONCE(vcpu->srcu_depth++,
0852 "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1);
0853 #endif
0854 vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
0855 }
0856
0857 static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu)
0858 {
0859 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx);
0860
0861 #ifdef CONFIG_PROVE_RCU
0862 WARN_ONCE(--vcpu->srcu_depth,
0863 "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth);
0864 #endif
0865 }
0866
0867 static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
0868 {
0869 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
0870 }
0871
0872 static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
0873 {
0874 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
0875 lockdep_is_held(&kvm->slots_lock) ||
0876 !refcount_read(&kvm->users_count));
0877 }
0878
0879 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
0880 {
0881 int num_vcpus = atomic_read(&kvm->online_vcpus);
0882 i = array_index_nospec(i, num_vcpus);
0883
0884
0885 smp_rmb();
0886 return xa_load(&kvm->vcpu_array, i);
0887 }
0888
0889 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
0890 xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
0891 (atomic_read(&kvm->online_vcpus) - 1))
0892
0893 static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
0894 {
0895 struct kvm_vcpu *vcpu = NULL;
0896 unsigned long i;
0897
0898 if (id < 0)
0899 return NULL;
0900 if (id < KVM_MAX_VCPUS)
0901 vcpu = kvm_get_vcpu(kvm, id);
0902 if (vcpu && vcpu->vcpu_id == id)
0903 return vcpu;
0904 kvm_for_each_vcpu(i, vcpu, kvm)
0905 if (vcpu->vcpu_id == id)
0906 return vcpu;
0907 return NULL;
0908 }
0909
0910 void kvm_destroy_vcpus(struct kvm *kvm);
0911
0912 void vcpu_load(struct kvm_vcpu *vcpu);
0913 void vcpu_put(struct kvm_vcpu *vcpu);
0914
0915 #ifdef __KVM_HAVE_IOAPIC
0916 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
0917 void kvm_arch_post_irq_routing_update(struct kvm *kvm);
0918 #else
0919 static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
0920 {
0921 }
0922 static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
0923 {
0924 }
0925 #endif
0926
0927 #ifdef CONFIG_HAVE_KVM_IRQFD
0928 int kvm_irqfd_init(void);
0929 void kvm_irqfd_exit(void);
0930 #else
0931 static inline int kvm_irqfd_init(void)
0932 {
0933 return 0;
0934 }
0935
0936 static inline void kvm_irqfd_exit(void)
0937 {
0938 }
0939 #endif
0940 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
0941 struct module *module);
0942 void kvm_exit(void);
0943
0944 void kvm_get_kvm(struct kvm *kvm);
0945 bool kvm_get_kvm_safe(struct kvm *kvm);
0946 void kvm_put_kvm(struct kvm *kvm);
0947 bool file_is_kvm(struct file *file);
0948 void kvm_put_kvm_no_destroy(struct kvm *kvm);
0949
0950 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
0951 {
0952 as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
0953 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
0954 lockdep_is_held(&kvm->slots_lock) ||
0955 !refcount_read(&kvm->users_count));
0956 }
0957
0958 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
0959 {
0960 return __kvm_memslots(kvm, 0);
0961 }
0962
0963 static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
0964 {
0965 int as_id = kvm_arch_vcpu_memslots_id(vcpu);
0966
0967 return __kvm_memslots(vcpu->kvm, as_id);
0968 }
0969
0970 static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
0971 {
0972 return RB_EMPTY_ROOT(&slots->gfn_tree);
0973 }
0974
0975 #define kvm_for_each_memslot(memslot, bkt, slots) \
0976 hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
0977 if (WARN_ON_ONCE(!memslot->npages)) { \
0978 } else
0979
0980 static inline
0981 struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
0982 {
0983 struct kvm_memory_slot *slot;
0984 int idx = slots->node_idx;
0985
0986 hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) {
0987 if (slot->id == id)
0988 return slot;
0989 }
0990
0991 return NULL;
0992 }
0993
0994
0995 struct kvm_memslot_iter {
0996 struct kvm_memslots *slots;
0997 struct rb_node *node;
0998 struct kvm_memory_slot *slot;
0999 };
1000
1001 static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter)
1002 {
1003 iter->node = rb_next(iter->node);
1004 if (!iter->node)
1005 return;
1006
1007 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]);
1008 }
1009
1010 static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter,
1011 struct kvm_memslots *slots,
1012 gfn_t start)
1013 {
1014 int idx = slots->node_idx;
1015 struct rb_node *tmp;
1016 struct kvm_memory_slot *slot;
1017
1018 iter->slots = slots;
1019
1020
1021
1022
1023
1024 iter->node = NULL;
1025 for (tmp = slots->gfn_tree.rb_node; tmp; ) {
1026 slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]);
1027 if (start < slot->base_gfn) {
1028 iter->node = tmp;
1029 tmp = tmp->rb_left;
1030 } else {
1031 tmp = tmp->rb_right;
1032 }
1033 }
1034
1035
1036
1037
1038
1039 if (iter->node) {
1040
1041
1042
1043
1044
1045 tmp = rb_prev(iter->node);
1046 if (tmp)
1047 iter->node = tmp;
1048 } else {
1049
1050 iter->node = rb_last(&slots->gfn_tree);
1051 }
1052
1053 if (iter->node) {
1054 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]);
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 if (iter->slot->base_gfn + iter->slot->npages <= start)
1066 kvm_memslot_iter_next(iter);
1067 }
1068 }
1069
1070 static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end)
1071 {
1072 if (!iter->node)
1073 return false;
1074
1075
1076
1077
1078
1079 return iter->slot->base_gfn < end;
1080 }
1081
1082
1083 #define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \
1084 for (kvm_memslot_iter_start(iter, slots, start); \
1085 kvm_memslot_iter_is_valid(iter, end); \
1086 kvm_memslot_iter_next(iter))
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099 enum kvm_mr_change {
1100 KVM_MR_CREATE,
1101 KVM_MR_DELETE,
1102 KVM_MR_MOVE,
1103 KVM_MR_FLAGS_ONLY,
1104 };
1105
1106 int kvm_set_memory_region(struct kvm *kvm,
1107 const struct kvm_userspace_memory_region *mem);
1108 int __kvm_set_memory_region(struct kvm *kvm,
1109 const struct kvm_userspace_memory_region *mem);
1110 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
1111 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
1112 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1113 const struct kvm_memory_slot *old,
1114 struct kvm_memory_slot *new,
1115 enum kvm_mr_change change);
1116 void kvm_arch_commit_memory_region(struct kvm *kvm,
1117 struct kvm_memory_slot *old,
1118 const struct kvm_memory_slot *new,
1119 enum kvm_mr_change change);
1120
1121 void kvm_arch_flush_shadow_all(struct kvm *kvm);
1122
1123 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1124 struct kvm_memory_slot *slot);
1125
1126 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1127 struct page **pages, int nr_pages);
1128
1129 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
1130 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
1131 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
1132 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
1133 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
1134 bool *writable);
1135 void kvm_release_page_clean(struct page *page);
1136 void kvm_release_page_dirty(struct page *page);
1137
1138 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
1139 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1140 bool *writable);
1141 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
1142 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn);
1143 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
1144 bool atomic, bool *async, bool write_fault,
1145 bool *writable, hva_t *hva);
1146
1147 void kvm_release_pfn_clean(kvm_pfn_t pfn);
1148 void kvm_release_pfn_dirty(kvm_pfn_t pfn);
1149 void kvm_set_pfn_dirty(kvm_pfn_t pfn);
1150 void kvm_set_pfn_accessed(kvm_pfn_t pfn);
1151
1152 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
1153 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1154 int len);
1155 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
1156 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1157 void *data, unsigned long len);
1158 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1159 void *data, unsigned int offset,
1160 unsigned long len);
1161 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1162 int offset, int len);
1163 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1164 unsigned long len);
1165 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1166 void *data, unsigned long len);
1167 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1168 void *data, unsigned int offset,
1169 unsigned long len);
1170 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1171 gpa_t gpa, unsigned long len);
1172
1173 #define __kvm_get_guest(kvm, gfn, offset, v) \
1174 ({ \
1175 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1176 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
1177 int __ret = -EFAULT; \
1178 \
1179 if (!kvm_is_error_hva(__addr)) \
1180 __ret = get_user(v, __uaddr); \
1181 __ret; \
1182 })
1183
1184 #define kvm_get_guest(kvm, gpa, v) \
1185 ({ \
1186 gpa_t __gpa = gpa; \
1187 struct kvm *__kvm = kvm; \
1188 \
1189 __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
1190 offset_in_page(__gpa), v); \
1191 })
1192
1193 #define __kvm_put_guest(kvm, gfn, offset, v) \
1194 ({ \
1195 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1196 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
1197 int __ret = -EFAULT; \
1198 \
1199 if (!kvm_is_error_hva(__addr)) \
1200 __ret = put_user(v, __uaddr); \
1201 if (!__ret) \
1202 mark_page_dirty(kvm, gfn); \
1203 __ret; \
1204 })
1205
1206 #define kvm_put_guest(kvm, gpa, v) \
1207 ({ \
1208 gpa_t __gpa = gpa; \
1209 struct kvm *__kvm = kvm; \
1210 \
1211 __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
1212 offset_in_page(__gpa), v); \
1213 })
1214
1215 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
1216 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
1217 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
1218 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1219 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
1220 void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
1221 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
1222
1223 struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
1224 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
1225 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
1226 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1227 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
1228 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
1229 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
1230 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
1231 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
1232 int len);
1233 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
1234 unsigned long len);
1235 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
1236 unsigned long len);
1237 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
1238 int offset, int len);
1239 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
1240 unsigned long len);
1241 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269 int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
1270 struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
1271 gpa_t gpa, unsigned long len);
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291 bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
1292 gpa_t gpa, unsigned long len);
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312 int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
1313 gpa_t gpa, unsigned long len);
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325 void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336 void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
1337
1338 void kvm_sigset_activate(struct kvm_vcpu *vcpu);
1339 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
1340
1341 void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1342 bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
1343 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
1344 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
1345 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
1346 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
1347 int kvm_vcpu_yield_to(struct kvm_vcpu *target);
1348 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
1349
1350 void kvm_flush_remote_tlbs(struct kvm *kvm);
1351
1352 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
1353 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
1354 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min);
1355 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
1356 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
1357 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
1358 #endif
1359
1360 void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
1361 unsigned long end);
1362 void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
1363 unsigned long end);
1364
1365 long kvm_arch_dev_ioctl(struct file *filp,
1366 unsigned int ioctl, unsigned long arg);
1367 long kvm_arch_vcpu_ioctl(struct file *filp,
1368 unsigned int ioctl, unsigned long arg);
1369 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
1370
1371 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
1372
1373 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1374 struct kvm_memory_slot *slot,
1375 gfn_t gfn_offset,
1376 unsigned long mask);
1377 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
1378
1379 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1380 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
1381 const struct kvm_memory_slot *memslot);
1382 #else
1383 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
1384 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1385 int *is_dirty, struct kvm_memory_slot **memslot);
1386 #endif
1387
1388 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
1389 bool line_status);
1390 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1391 struct kvm_enable_cap *cap);
1392 long kvm_arch_vm_ioctl(struct file *filp,
1393 unsigned int ioctl, unsigned long arg);
1394
1395 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
1396 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
1397
1398 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1399 struct kvm_translation *tr);
1400
1401 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
1402 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
1403 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1404 struct kvm_sregs *sregs);
1405 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1406 struct kvm_sregs *sregs);
1407 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1408 struct kvm_mp_state *mp_state);
1409 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1410 struct kvm_mp_state *mp_state);
1411 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1412 struct kvm_guest_debug *dbg);
1413 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
1414
1415 int kvm_arch_init(void *opaque);
1416 void kvm_arch_exit(void);
1417
1418 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
1419
1420 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
1421 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
1422 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
1423 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
1424 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
1425 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
1426
1427 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
1428 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
1429 #endif
1430
1431 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
1432 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
1433 #else
1434 static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
1435 #endif
1436
1437 int kvm_arch_hardware_enable(void);
1438 void kvm_arch_hardware_disable(void);
1439 int kvm_arch_hardware_setup(void *opaque);
1440 void kvm_arch_hardware_unsetup(void);
1441 int kvm_arch_check_processor_compat(void *opaque);
1442 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
1443 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
1444 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
1445 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
1446 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
1447 int kvm_arch_post_init_vm(struct kvm *kvm);
1448 void kvm_arch_pre_destroy_vm(struct kvm *kvm);
1449 int kvm_arch_create_vm_debugfs(struct kvm *kvm);
1450
1451 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
1452
1453
1454
1455
1456 static inline struct kvm *kvm_arch_alloc_vm(void)
1457 {
1458 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
1459 }
1460 #endif
1461
1462 static inline void __kvm_arch_free_vm(struct kvm *kvm)
1463 {
1464 kvfree(kvm);
1465 }
1466
1467 #ifndef __KVM_HAVE_ARCH_VM_FREE
1468 static inline void kvm_arch_free_vm(struct kvm *kvm)
1469 {
1470 __kvm_arch_free_vm(kvm);
1471 }
1472 #endif
1473
1474 #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
1475 static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
1476 {
1477 return -ENOTSUPP;
1478 }
1479 #endif
1480
1481 #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
1482 void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
1483 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
1484 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
1485 #else
1486 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
1487 {
1488 }
1489
1490 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
1491 {
1492 }
1493
1494 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
1495 {
1496 return false;
1497 }
1498 #endif
1499 #ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
1500 void kvm_arch_start_assignment(struct kvm *kvm);
1501 void kvm_arch_end_assignment(struct kvm *kvm);
1502 bool kvm_arch_has_assigned_device(struct kvm *kvm);
1503 #else
1504 static inline void kvm_arch_start_assignment(struct kvm *kvm)
1505 {
1506 }
1507
1508 static inline void kvm_arch_end_assignment(struct kvm *kvm)
1509 {
1510 }
1511
1512 static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
1513 {
1514 return false;
1515 }
1516 #endif
1517
1518 static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
1519 {
1520 #ifdef __KVM_HAVE_ARCH_WQP
1521 return vcpu->arch.waitp;
1522 #else
1523 return &vcpu->wait;
1524 #endif
1525 }
1526
1527
1528
1529
1530
1531 static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
1532 {
1533 return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
1534 }
1535
1536 static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu)
1537 {
1538 return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu));
1539 }
1540
1541 #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
1542
1543
1544
1545
1546
1547 bool kvm_arch_intc_initialized(struct kvm *kvm);
1548 #else
1549 static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
1550 {
1551 return true;
1552 }
1553 #endif
1554
1555 #ifdef CONFIG_GUEST_PERF_EVENTS
1556 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
1557
1558 void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
1559 void kvm_unregister_perf_callbacks(void);
1560 #else
1561 static inline void kvm_register_perf_callbacks(void *ign) {}
1562 static inline void kvm_unregister_perf_callbacks(void) {}
1563 #endif
1564
1565 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
1566 void kvm_arch_destroy_vm(struct kvm *kvm);
1567 void kvm_arch_sync_events(struct kvm *kvm);
1568
1569 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
1570
1571 struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn);
1572 bool kvm_is_zone_device_page(struct page *page);
1573
1574 struct kvm_irq_ack_notifier {
1575 struct hlist_node link;
1576 unsigned gsi;
1577 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
1578 };
1579
1580 int kvm_irq_map_gsi(struct kvm *kvm,
1581 struct kvm_kernel_irq_routing_entry *entries, int gsi);
1582 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
1583
1584 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1585 bool line_status);
1586 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
1587 int irq_source_id, int level, bool line_status);
1588 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
1589 struct kvm *kvm, int irq_source_id,
1590 int level, bool line_status);
1591 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
1592 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
1593 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
1594 void kvm_register_irq_ack_notifier(struct kvm *kvm,
1595 struct kvm_irq_ack_notifier *kian);
1596 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
1597 struct kvm_irq_ack_notifier *kian);
1598 int kvm_request_irq_source_id(struct kvm *kvm);
1599 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
1600 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1601
1602
1603
1604
1605
1606 static inline struct kvm_memory_slot *
1607 try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1608 {
1609 if (!slot)
1610 return NULL;
1611
1612 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
1613 return slot;
1614 else
1615 return NULL;
1616 }
1617
1618
1619
1620
1621
1622
1623
1624
1625 static inline struct kvm_memory_slot *
1626 search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
1627 {
1628 struct kvm_memory_slot *slot;
1629 struct rb_node *node;
1630 int idx = slots->node_idx;
1631
1632 slot = NULL;
1633 for (node = slots->gfn_tree.rb_node; node; ) {
1634 slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
1635 if (gfn >= slot->base_gfn) {
1636 if (gfn < slot->base_gfn + slot->npages)
1637 return slot;
1638 node = node->rb_right;
1639 } else
1640 node = node->rb_left;
1641 }
1642
1643 return approx ? slot : NULL;
1644 }
1645
1646 static inline struct kvm_memory_slot *
1647 ____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
1648 {
1649 struct kvm_memory_slot *slot;
1650
1651 slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot);
1652 slot = try_get_memslot(slot, gfn);
1653 if (slot)
1654 return slot;
1655
1656 slot = search_memslots(slots, gfn, approx);
1657 if (slot) {
1658 atomic_long_set(&slots->last_used_slot, (unsigned long)slot);
1659 return slot;
1660 }
1661
1662 return NULL;
1663 }
1664
1665
1666
1667
1668
1669
1670 static inline struct kvm_memory_slot *
1671 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
1672 {
1673 return ____gfn_to_memslot(slots, gfn, false);
1674 }
1675
1676 static inline unsigned long
1677 __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
1678 {
1679
1680
1681
1682
1683
1684
1685 unsigned long offset = gfn - slot->base_gfn;
1686 offset = array_index_nospec(offset, slot->npages);
1687 return slot->userspace_addr + offset * PAGE_SIZE;
1688 }
1689
1690 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
1691 {
1692 return gfn_to_memslot(kvm, gfn)->id;
1693 }
1694
1695 static inline gfn_t
1696 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
1697 {
1698 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
1699
1700 return slot->base_gfn + gfn_offset;
1701 }
1702
1703 static inline gpa_t gfn_to_gpa(gfn_t gfn)
1704 {
1705 return (gpa_t)gfn << PAGE_SHIFT;
1706 }
1707
1708 static inline gfn_t gpa_to_gfn(gpa_t gpa)
1709 {
1710 return (gfn_t)(gpa >> PAGE_SHIFT);
1711 }
1712
1713 static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
1714 {
1715 return (hpa_t)pfn << PAGE_SHIFT;
1716 }
1717
1718 static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
1719 {
1720 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
1721
1722 return kvm_is_error_hva(hva);
1723 }
1724
1725 enum kvm_stat_kind {
1726 KVM_STAT_VM,
1727 KVM_STAT_VCPU,
1728 };
1729
1730 struct kvm_stat_data {
1731 struct kvm *kvm;
1732 const struct _kvm_stats_desc *desc;
1733 enum kvm_stat_kind kind;
1734 };
1735
1736 struct _kvm_stats_desc {
1737 struct kvm_stats_desc desc;
1738 char name[KVM_STATS_NAME_SIZE];
1739 };
1740
1741 #define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \
1742 .flags = type | unit | base | \
1743 BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
1744 BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
1745 BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
1746 .exponent = exp, \
1747 .size = sz, \
1748 .bucket_size = bsz
1749
1750 #define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1751 { \
1752 { \
1753 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1754 .offset = offsetof(struct kvm_vm_stat, generic.stat) \
1755 }, \
1756 .name = #stat, \
1757 }
1758 #define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1759 { \
1760 { \
1761 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1762 .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
1763 }, \
1764 .name = #stat, \
1765 }
1766 #define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1767 { \
1768 { \
1769 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1770 .offset = offsetof(struct kvm_vm_stat, stat) \
1771 }, \
1772 .name = #stat, \
1773 }
1774 #define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1775 { \
1776 { \
1777 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1778 .offset = offsetof(struct kvm_vcpu_stat, stat) \
1779 }, \
1780 .name = #stat, \
1781 }
1782
1783 #define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \
1784 SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz)
1785
1786 #define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \
1787 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \
1788 unit, base, exponent, 1, 0)
1789 #define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \
1790 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \
1791 unit, base, exponent, 1, 0)
1792 #define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \
1793 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \
1794 unit, base, exponent, 1, 0)
1795 #define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \
1796 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \
1797 unit, base, exponent, sz, bsz)
1798 #define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \
1799 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \
1800 unit, base, exponent, sz, 0)
1801
1802
1803 #define STATS_DESC_COUNTER(SCOPE, name) \
1804 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \
1805 KVM_STATS_BASE_POW10, 0)
1806
1807 #define STATS_DESC_ICOUNTER(SCOPE, name) \
1808 STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \
1809 KVM_STATS_BASE_POW10, 0)
1810
1811 #define STATS_DESC_PCOUNTER(SCOPE, name) \
1812 STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \
1813 KVM_STATS_BASE_POW10, 0)
1814
1815
1816 #define STATS_DESC_IBOOLEAN(SCOPE, name) \
1817 STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
1818 KVM_STATS_BASE_POW10, 0)
1819
1820 #define STATS_DESC_PBOOLEAN(SCOPE, name) \
1821 STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
1822 KVM_STATS_BASE_POW10, 0)
1823
1824
1825 #define STATS_DESC_TIME_NSEC(SCOPE, name) \
1826 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
1827 KVM_STATS_BASE_POW10, -9)
1828
1829 #define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \
1830 STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
1831 KVM_STATS_BASE_POW10, -9, sz, bsz)
1832
1833 #define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \
1834 STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
1835 KVM_STATS_BASE_POW10, -9, sz)
1836
1837 #define KVM_GENERIC_VM_STATS() \
1838 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \
1839 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests)
1840
1841 #define KVM_GENERIC_VCPU_STATS() \
1842 STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \
1843 STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \
1844 STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \
1845 STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \
1846 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \
1847 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \
1848 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \
1849 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \
1850 HALT_POLL_HIST_COUNT), \
1851 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \
1852 HALT_POLL_HIST_COUNT), \
1853 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \
1854 HALT_POLL_HIST_COUNT), \
1855 STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking)
1856
1857 extern struct dentry *kvm_debugfs_dir;
1858
1859 ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
1860 const struct _kvm_stats_desc *desc,
1861 void *stats, size_t size_stats,
1862 char __user *user_buffer, size_t size, loff_t *offset);
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873 static inline void kvm_stats_linear_hist_update(u64 *data, size_t size,
1874 u64 value, size_t bucket_size)
1875 {
1876 size_t index = div64_u64(value, bucket_size);
1877
1878 index = min(index, size - 1);
1879 ++data[index];
1880 }
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890 static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
1891 {
1892 size_t index = fls64(value);
1893
1894 index = min(index, size - 1);
1895 ++data[index];
1896 }
1897
1898 #define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \
1899 kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize)
1900 #define KVM_STATS_LOG_HIST_UPDATE(array, value) \
1901 kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value)
1902
1903
1904 extern const struct kvm_stats_header kvm_vm_stats_header;
1905 extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
1906 extern const struct kvm_stats_header kvm_vcpu_stats_header;
1907 extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
1908
1909 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1910 static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
1911 {
1912 if (unlikely(kvm->mmu_invalidate_in_progress))
1913 return 1;
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926 smp_rmb();
1927 if (kvm->mmu_invalidate_seq != mmu_seq)
1928 return 1;
1929 return 0;
1930 }
1931
1932 static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
1933 unsigned long mmu_seq,
1934 unsigned long hva)
1935 {
1936 lockdep_assert_held(&kvm->mmu_lock);
1937
1938
1939
1940
1941
1942
1943 if (unlikely(kvm->mmu_invalidate_in_progress) &&
1944 hva >= kvm->mmu_invalidate_range_start &&
1945 hva < kvm->mmu_invalidate_range_end)
1946 return 1;
1947 if (kvm->mmu_invalidate_seq != mmu_seq)
1948 return 1;
1949 return 0;
1950 }
1951 #endif
1952
1953 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
1954
1955 #define KVM_MAX_IRQ_ROUTES 4096
1956
1957 bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
1958 int kvm_set_irq_routing(struct kvm *kvm,
1959 const struct kvm_irq_routing_entry *entries,
1960 unsigned nr,
1961 unsigned flags);
1962 int kvm_set_routing_entry(struct kvm *kvm,
1963 struct kvm_kernel_irq_routing_entry *e,
1964 const struct kvm_irq_routing_entry *ue);
1965 void kvm_free_irq_routing(struct kvm *kvm);
1966
1967 #else
1968
1969 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
1970
1971 #endif
1972
1973 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
1974
1975 #ifdef CONFIG_HAVE_KVM_EVENTFD
1976
1977 void kvm_eventfd_init(struct kvm *kvm);
1978 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
1979
1980 #ifdef CONFIG_HAVE_KVM_IRQFD
1981 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
1982 void kvm_irqfd_release(struct kvm *kvm);
1983 void kvm_irq_routing_update(struct kvm *);
1984 #else
1985 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1986 {
1987 return -EINVAL;
1988 }
1989
1990 static inline void kvm_irqfd_release(struct kvm *kvm) {}
1991 #endif
1992
1993 #else
1994
1995 static inline void kvm_eventfd_init(struct kvm *kvm) {}
1996
1997 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1998 {
1999 return -EINVAL;
2000 }
2001
2002 static inline void kvm_irqfd_release(struct kvm *kvm) {}
2003
2004 #ifdef CONFIG_HAVE_KVM_IRQCHIP
2005 static inline void kvm_irq_routing_update(struct kvm *kvm)
2006 {
2007 }
2008 #endif
2009
2010 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
2011 {
2012 return -ENOSYS;
2013 }
2014
2015 #endif
2016
2017 void kvm_arch_irq_routing_update(struct kvm *kvm);
2018
2019 static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu)
2020 {
2021
2022
2023
2024
2025 smp_wmb();
2026 set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2027 }
2028
2029 static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
2030 {
2031
2032
2033
2034
2035
2036 BUILD_BUG_ON(!__builtin_constant_p(req) ||
2037 (req & KVM_REQUEST_NO_ACTION));
2038
2039 __kvm_make_request(req, vcpu);
2040 }
2041
2042 static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
2043 {
2044 return READ_ONCE(vcpu->requests);
2045 }
2046
2047 static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
2048 {
2049 return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2050 }
2051
2052 static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
2053 {
2054 clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2055 }
2056
2057 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
2058 {
2059 if (kvm_test_request(req, vcpu)) {
2060 kvm_clear_request(req, vcpu);
2061
2062
2063
2064
2065
2066 smp_mb__after_atomic();
2067 return true;
2068 } else {
2069 return false;
2070 }
2071 }
2072
2073 extern bool kvm_rebooting;
2074
2075 extern unsigned int halt_poll_ns;
2076 extern unsigned int halt_poll_ns_grow;
2077 extern unsigned int halt_poll_ns_grow_start;
2078 extern unsigned int halt_poll_ns_shrink;
2079
2080 struct kvm_device {
2081 const struct kvm_device_ops *ops;
2082 struct kvm *kvm;
2083 void *private;
2084 struct list_head vm_node;
2085 };
2086
2087
2088 struct kvm_device_ops {
2089 const char *name;
2090
2091
2092
2093
2094
2095
2096 int (*create)(struct kvm_device *dev, u32 type);
2097
2098
2099
2100
2101
2102 void (*init)(struct kvm_device *dev);
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112 void (*destroy)(struct kvm_device *dev);
2113
2114
2115
2116
2117
2118
2119
2120
2121 void (*release)(struct kvm_device *dev);
2122
2123 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2124 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2125 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2126 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
2127 unsigned long arg);
2128 int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
2129 };
2130
2131 void kvm_device_get(struct kvm_device *dev);
2132 void kvm_device_put(struct kvm_device *dev);
2133 struct kvm_device *kvm_device_from_filp(struct file *filp);
2134 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
2135 void kvm_unregister_device_ops(u32 type);
2136
2137 extern struct kvm_device_ops kvm_mpic_ops;
2138 extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
2139 extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
2140
2141 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
2142
2143 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
2144 {
2145 vcpu->spin_loop.in_spin_loop = val;
2146 }
2147 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
2148 {
2149 vcpu->spin_loop.dy_eligible = val;
2150 }
2151
2152 #else
2153
2154 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
2155 {
2156 }
2157
2158 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
2159 {
2160 }
2161 #endif
2162
2163 static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
2164 {
2165 return (memslot && memslot->id < KVM_USER_MEM_SLOTS &&
2166 !(memslot->flags & KVM_MEMSLOT_INVALID));
2167 }
2168
2169 struct kvm_vcpu *kvm_get_running_vcpu(void);
2170 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
2171
2172 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
2173 bool kvm_arch_has_irq_bypass(void);
2174 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
2175 struct irq_bypass_producer *);
2176 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
2177 struct irq_bypass_producer *);
2178 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
2179 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
2180 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
2181 uint32_t guest_irq, bool set);
2182 bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *,
2183 struct kvm_kernel_irq_routing_entry *);
2184 #endif
2185
2186 #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
2187
2188 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
2189 {
2190 return vcpu->valid_wakeup;
2191 }
2192
2193 #else
2194 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
2195 {
2196 return true;
2197 }
2198 #endif
2199
2200 #ifdef CONFIG_HAVE_KVM_NO_POLL
2201
2202 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
2203 #else
2204 static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
2205 {
2206 return false;
2207 }
2208 #endif
2209
2210 #ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
2211 long kvm_arch_vcpu_async_ioctl(struct file *filp,
2212 unsigned int ioctl, unsigned long arg);
2213 #else
2214 static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
2215 unsigned int ioctl,
2216 unsigned long arg)
2217 {
2218 return -ENOIOCTLCMD;
2219 }
2220 #endif
2221
2222 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
2223 unsigned long start, unsigned long end);
2224
2225 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
2226
2227 #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
2228 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
2229 #else
2230 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
2231 {
2232 return 0;
2233 }
2234 #endif
2235
2236 typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
2237
2238 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
2239 uintptr_t data, const char *name,
2240 struct task_struct **thread_ptr);
2241
2242 #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
2243 static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
2244 {
2245 vcpu->run->exit_reason = KVM_EXIT_INTR;
2246 vcpu->stat.signal_exits++;
2247 }
2248 #endif
2249
2250
2251
2252
2253
2254
2255 #define KVM_DIRTY_RING_RSVD_ENTRIES 64
2256
2257
2258 #define KVM_DIRTY_RING_MAX_ENTRIES 65536
2259
2260 #endif