0001
0002
0003
0004
0005
0006
0007
0008 #ifndef _ASM_X86_KVM_HOST_H
0009 #define _ASM_X86_KVM_HOST_H
0010
0011 #include <linux/types.h>
0012 #include <linux/mm.h>
0013 #include <linux/mmu_notifier.h>
0014 #include <linux/tracepoint.h>
0015 #include <linux/cpumask.h>
0016 #include <linux/irq_work.h>
0017 #include <linux/irq.h>
0018 #include <linux/workqueue.h>
0019
0020 #include <linux/kvm.h>
0021 #include <linux/kvm_para.h>
0022 #include <linux/kvm_types.h>
0023 #include <linux/perf_event.h>
0024 #include <linux/pvclock_gtod.h>
0025 #include <linux/clocksource.h>
0026 #include <linux/irqbypass.h>
0027 #include <linux/hyperv.h>
0028
0029 #include <asm/apic.h>
0030 #include <asm/pvclock-abi.h>
0031 #include <asm/desc.h>
0032 #include <asm/mtrr.h>
0033 #include <asm/msr-index.h>
0034 #include <asm/asm.h>
0035 #include <asm/kvm_page_track.h>
0036 #include <asm/kvm_vcpu_regs.h>
0037 #include <asm/hyperv-tlfs.h>
0038
0039 #define __KVM_HAVE_ARCH_VCPU_DEBUGFS
0040
0041 #define KVM_MAX_VCPUS 1024
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 #define KVM_VCPU_ID_RATIO 4
0053 #define KVM_MAX_VCPU_IDS (KVM_MAX_VCPUS * KVM_VCPU_ID_RATIO)
0054
0055
0056 #define KVM_INTERNAL_MEM_SLOTS 3
0057
0058 #define KVM_HALT_POLL_NS_DEFAULT 200000
0059
0060 #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
0061
0062 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
0063 KVM_DIRTY_LOG_INITIALLY_SET)
0064
0065 #define KVM_BUS_LOCK_DETECTION_VALID_MODE (KVM_BUS_LOCK_DETECTION_OFF | \
0066 KVM_BUS_LOCK_DETECTION_EXIT)
0067
0068 #define KVM_X86_NOTIFY_VMEXIT_VALID_BITS (KVM_X86_NOTIFY_VMEXIT_ENABLED | \
0069 KVM_X86_NOTIFY_VMEXIT_USER)
0070
0071
0072 #define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0)
0073 #define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1)
0074 #define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2)
0075 #define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3)
0076 #define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4)
0077 #define KVM_REQ_LOAD_MMU_PGD KVM_ARCH_REQ(5)
0078 #define KVM_REQ_EVENT KVM_ARCH_REQ(6)
0079 #define KVM_REQ_APF_HALT KVM_ARCH_REQ(7)
0080 #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8)
0081 #define KVM_REQ_NMI KVM_ARCH_REQ(9)
0082 #define KVM_REQ_PMU KVM_ARCH_REQ(10)
0083 #define KVM_REQ_PMI KVM_ARCH_REQ(11)
0084 #define KVM_REQ_SMI KVM_ARCH_REQ(12)
0085 #define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13)
0086 #define KVM_REQ_MCLOCK_INPROGRESS \
0087 KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
0088 #define KVM_REQ_SCAN_IOAPIC \
0089 KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
0090 #define KVM_REQ_GLOBAL_CLOCK_UPDATE KVM_ARCH_REQ(16)
0091 #define KVM_REQ_APIC_PAGE_RELOAD \
0092 KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
0093 #define KVM_REQ_HV_CRASH KVM_ARCH_REQ(18)
0094 #define KVM_REQ_IOAPIC_EOI_EXIT KVM_ARCH_REQ(19)
0095 #define KVM_REQ_HV_RESET KVM_ARCH_REQ(20)
0096 #define KVM_REQ_HV_EXIT KVM_ARCH_REQ(21)
0097 #define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22)
0098 #define KVM_REQ_LOAD_EOI_EXITMAP KVM_ARCH_REQ(23)
0099 #define KVM_REQ_GET_NESTED_STATE_PAGES KVM_ARCH_REQ(24)
0100 #define KVM_REQ_APICV_UPDATE \
0101 KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
0102 #define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
0103 #define KVM_REQ_TLB_FLUSH_GUEST \
0104 KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
0105 #define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
0106 #define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
0107 #define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
0108 KVM_ARCH_REQ_FLAGS(30, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
0109 #define KVM_REQ_MMU_FREE_OBSOLETE_ROOTS \
0110 KVM_ARCH_REQ_FLAGS(31, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
0111
0112 #define CR0_RESERVED_BITS \
0113 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
0114 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
0115 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
0116
0117 #define CR4_RESERVED_BITS \
0118 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
0119 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
0120 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
0121 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
0122 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
0123 | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP))
0124
0125 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
0126
0127
0128
0129 #define INVALID_PAGE (~(hpa_t)0)
0130 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
0131
0132 #define INVALID_GPA (~(gpa_t)0)
0133
0134
0135 #define KVM_MAX_HUGEPAGE_LEVEL PG_LEVEL_1G
0136 #define KVM_NR_PAGE_SIZES (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1)
0137 #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
0138 #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
0139 #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
0140 #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
0141 #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
0142
0143 #define KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO 50
0144 #define KVM_MIN_ALLOC_MMU_PAGES 64UL
0145 #define KVM_MMU_HASH_SHIFT 12
0146 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
0147 #define KVM_MIN_FREE_MMU_PAGES 5
0148 #define KVM_REFILL_PAGES 25
0149 #define KVM_MAX_CPUID_ENTRIES 256
0150 #define KVM_NR_FIXED_MTRR_REGION 88
0151 #define KVM_NR_VAR_MTRR 8
0152
0153 #define ASYNC_PF_PER_VCPU 64
0154
0155 enum kvm_reg {
0156 VCPU_REGS_RAX = __VCPU_REGS_RAX,
0157 VCPU_REGS_RCX = __VCPU_REGS_RCX,
0158 VCPU_REGS_RDX = __VCPU_REGS_RDX,
0159 VCPU_REGS_RBX = __VCPU_REGS_RBX,
0160 VCPU_REGS_RSP = __VCPU_REGS_RSP,
0161 VCPU_REGS_RBP = __VCPU_REGS_RBP,
0162 VCPU_REGS_RSI = __VCPU_REGS_RSI,
0163 VCPU_REGS_RDI = __VCPU_REGS_RDI,
0164 #ifdef CONFIG_X86_64
0165 VCPU_REGS_R8 = __VCPU_REGS_R8,
0166 VCPU_REGS_R9 = __VCPU_REGS_R9,
0167 VCPU_REGS_R10 = __VCPU_REGS_R10,
0168 VCPU_REGS_R11 = __VCPU_REGS_R11,
0169 VCPU_REGS_R12 = __VCPU_REGS_R12,
0170 VCPU_REGS_R13 = __VCPU_REGS_R13,
0171 VCPU_REGS_R14 = __VCPU_REGS_R14,
0172 VCPU_REGS_R15 = __VCPU_REGS_R15,
0173 #endif
0174 VCPU_REGS_RIP,
0175 NR_VCPU_REGS,
0176
0177 VCPU_EXREG_PDPTR = NR_VCPU_REGS,
0178 VCPU_EXREG_CR0,
0179 VCPU_EXREG_CR3,
0180 VCPU_EXREG_CR4,
0181 VCPU_EXREG_RFLAGS,
0182 VCPU_EXREG_SEGMENTS,
0183 VCPU_EXREG_EXIT_INFO_1,
0184 VCPU_EXREG_EXIT_INFO_2,
0185 };
0186
0187 enum {
0188 VCPU_SREG_ES,
0189 VCPU_SREG_CS,
0190 VCPU_SREG_SS,
0191 VCPU_SREG_DS,
0192 VCPU_SREG_FS,
0193 VCPU_SREG_GS,
0194 VCPU_SREG_TR,
0195 VCPU_SREG_LDTR,
0196 };
0197
0198 enum exit_fastpath_completion {
0199 EXIT_FASTPATH_NONE,
0200 EXIT_FASTPATH_REENTER_GUEST,
0201 EXIT_FASTPATH_EXIT_HANDLED,
0202 };
0203 typedef enum exit_fastpath_completion fastpath_t;
0204
0205 struct x86_emulate_ctxt;
0206 struct x86_exception;
0207 enum x86_intercept;
0208 enum x86_intercept_stage;
0209
0210 #define KVM_NR_DB_REGS 4
0211
0212 #define DR6_BUS_LOCK (1 << 11)
0213 #define DR6_BD (1 << 13)
0214 #define DR6_BS (1 << 14)
0215 #define DR6_BT (1 << 15)
0216 #define DR6_RTM (1 << 16)
0217
0218
0219
0220
0221
0222
0223
0224
0225 #define DR6_ACTIVE_LOW 0xffff0ff0
0226 #define DR6_VOLATILE 0x0001e80f
0227 #define DR6_FIXED_1 (DR6_ACTIVE_LOW & ~DR6_VOLATILE)
0228
0229 #define DR7_BP_EN_MASK 0x000000ff
0230 #define DR7_GE (1 << 9)
0231 #define DR7_GD (1 << 13)
0232 #define DR7_FIXED_1 0x00000400
0233 #define DR7_VOLATILE 0xffff2bff
0234
0235 #define KVM_GUESTDBG_VALID_MASK \
0236 (KVM_GUESTDBG_ENABLE | \
0237 KVM_GUESTDBG_SINGLESTEP | \
0238 KVM_GUESTDBG_USE_HW_BP | \
0239 KVM_GUESTDBG_USE_SW_BP | \
0240 KVM_GUESTDBG_INJECT_BP | \
0241 KVM_GUESTDBG_INJECT_DB | \
0242 KVM_GUESTDBG_BLOCKIRQ)
0243
0244
0245 #define PFERR_PRESENT_BIT 0
0246 #define PFERR_WRITE_BIT 1
0247 #define PFERR_USER_BIT 2
0248 #define PFERR_RSVD_BIT 3
0249 #define PFERR_FETCH_BIT 4
0250 #define PFERR_PK_BIT 5
0251 #define PFERR_SGX_BIT 15
0252 #define PFERR_GUEST_FINAL_BIT 32
0253 #define PFERR_GUEST_PAGE_BIT 33
0254 #define PFERR_IMPLICIT_ACCESS_BIT 48
0255
0256 #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
0257 #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
0258 #define PFERR_USER_MASK (1U << PFERR_USER_BIT)
0259 #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
0260 #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
0261 #define PFERR_PK_MASK (1U << PFERR_PK_BIT)
0262 #define PFERR_SGX_MASK (1U << PFERR_SGX_BIT)
0263 #define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
0264 #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
0265 #define PFERR_IMPLICIT_ACCESS (1ULL << PFERR_IMPLICIT_ACCESS_BIT)
0266
0267 #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \
0268 PFERR_WRITE_MASK | \
0269 PFERR_PRESENT_MASK)
0270
0271
0272 #define KVM_APIC_CHECK_VAPIC 0
0273
0274
0275
0276
0277
0278
0279 #define KVM_APIC_PV_EOI_PENDING 1
0280
0281 struct kvm_kernel_irq_routing_entry;
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321 union kvm_mmu_page_role {
0322 u32 word;
0323 struct {
0324 unsigned level:4;
0325 unsigned has_4_byte_gpte:1;
0326 unsigned quadrant:2;
0327 unsigned direct:1;
0328 unsigned access:3;
0329 unsigned invalid:1;
0330 unsigned efer_nx:1;
0331 unsigned cr0_wp:1;
0332 unsigned smep_andnot_wp:1;
0333 unsigned smap_andnot_wp:1;
0334 unsigned ad_disabled:1;
0335 unsigned guest_mode:1;
0336 unsigned passthrough:1;
0337 unsigned :5;
0338
0339
0340
0341
0342
0343
0344
0345 unsigned smm:8;
0346 };
0347 };
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368 union kvm_mmu_extended_role {
0369 u32 word;
0370 struct {
0371 unsigned int valid:1;
0372 unsigned int execonly:1;
0373 unsigned int cr4_pse:1;
0374 unsigned int cr4_pke:1;
0375 unsigned int cr4_smap:1;
0376 unsigned int cr4_smep:1;
0377 unsigned int cr4_la57:1;
0378 unsigned int efer_lma:1;
0379 };
0380 };
0381
0382 union kvm_cpu_role {
0383 u64 as_u64;
0384 struct {
0385 union kvm_mmu_page_role base;
0386 union kvm_mmu_extended_role ext;
0387 };
0388 };
0389
0390 struct kvm_rmap_head {
0391 unsigned long val;
0392 };
0393
0394 struct kvm_pio_request {
0395 unsigned long linear_rip;
0396 unsigned long count;
0397 int in;
0398 int port;
0399 int size;
0400 };
0401
0402 #define PT64_ROOT_MAX_LEVEL 5
0403
0404 struct rsvd_bits_validate {
0405 u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL];
0406 u64 bad_mt_xwr;
0407 };
0408
0409 struct kvm_mmu_root_info {
0410 gpa_t pgd;
0411 hpa_t hpa;
0412 };
0413
0414 #define KVM_MMU_ROOT_INFO_INVALID \
0415 ((struct kvm_mmu_root_info) { .pgd = INVALID_PAGE, .hpa = INVALID_PAGE })
0416
0417 #define KVM_MMU_NUM_PREV_ROOTS 3
0418
0419 #define KVM_HAVE_MMU_RWLOCK
0420
0421 struct kvm_mmu_page;
0422 struct kvm_page_fault;
0423
0424
0425
0426
0427
0428
0429 struct kvm_mmu {
0430 unsigned long (*get_guest_pgd)(struct kvm_vcpu *vcpu);
0431 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
0432 int (*page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
0433 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
0434 struct x86_exception *fault);
0435 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
0436 gpa_t gva_or_gpa, u64 access,
0437 struct x86_exception *exception);
0438 int (*sync_page)(struct kvm_vcpu *vcpu,
0439 struct kvm_mmu_page *sp);
0440 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
0441 struct kvm_mmu_root_info root;
0442 union kvm_cpu_role cpu_role;
0443 union kvm_mmu_page_role root_role;
0444
0445
0446
0447
0448
0449
0450
0451 u32 pkru_mask;
0452
0453 struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
0454
0455
0456
0457
0458
0459
0460 u8 permissions[16];
0461
0462 u64 *pae_root;
0463 u64 *pml4_root;
0464 u64 *pml5_root;
0465
0466
0467
0468
0469
0470
0471 struct rsvd_bits_validate shadow_zero_check;
0472
0473 struct rsvd_bits_validate guest_rsvd_check;
0474
0475 u64 pdptrs[4];
0476 };
0477
0478 struct kvm_tlb_range {
0479 u64 start_gfn;
0480 u64 pages;
0481 };
0482
0483 enum pmc_type {
0484 KVM_PMC_GP = 0,
0485 KVM_PMC_FIXED,
0486 };
0487
0488 struct kvm_pmc {
0489 enum pmc_type type;
0490 u8 idx;
0491 u64 counter;
0492 u64 eventsel;
0493 struct perf_event *perf_event;
0494 struct kvm_vcpu *vcpu;
0495
0496
0497
0498
0499 u64 current_config;
0500 bool is_paused;
0501 bool intr;
0502 };
0503
0504 #define KVM_PMC_MAX_FIXED 3
0505 struct kvm_pmu {
0506 unsigned nr_arch_gp_counters;
0507 unsigned nr_arch_fixed_counters;
0508 unsigned available_event_types;
0509 u64 fixed_ctr_ctrl;
0510 u64 fixed_ctr_ctrl_mask;
0511 u64 global_ctrl;
0512 u64 global_status;
0513 u64 counter_bitmask[2];
0514 u64 global_ctrl_mask;
0515 u64 global_ovf_ctrl_mask;
0516 u64 reserved_bits;
0517 u64 raw_event_mask;
0518 u8 version;
0519 struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
0520 struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
0521 struct irq_work irq_work;
0522 DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX);
0523 DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX);
0524 DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX);
0525
0526 u64 ds_area;
0527 u64 pebs_enable;
0528 u64 pebs_enable_mask;
0529 u64 pebs_data_cfg;
0530 u64 pebs_data_cfg_mask;
0531
0532
0533
0534
0535
0536
0537
0538
0539 u64 host_cross_mapped_mask;
0540
0541
0542
0543
0544
0545 bool need_cleanup;
0546
0547
0548
0549
0550
0551 u8 event_count;
0552 };
0553
0554 struct kvm_pmu_ops;
0555
0556 enum {
0557 KVM_DEBUGREG_BP_ENABLED = 1,
0558 KVM_DEBUGREG_WONT_EXIT = 2,
0559 };
0560
0561 struct kvm_mtrr_range {
0562 u64 base;
0563 u64 mask;
0564 struct list_head node;
0565 };
0566
0567 struct kvm_mtrr {
0568 struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
0569 mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
0570 u64 deftype;
0571
0572 struct list_head head;
0573 };
0574
0575
0576 struct kvm_vcpu_hv_stimer {
0577 struct hrtimer timer;
0578 int index;
0579 union hv_stimer_config config;
0580 u64 count;
0581 u64 exp_time;
0582 struct hv_message msg;
0583 bool msg_pending;
0584 };
0585
0586
0587 struct kvm_vcpu_hv_synic {
0588 u64 version;
0589 u64 control;
0590 u64 msg_page;
0591 u64 evt_page;
0592 atomic64_t sint[HV_SYNIC_SINT_COUNT];
0593 atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
0594 DECLARE_BITMAP(auto_eoi_bitmap, 256);
0595 DECLARE_BITMAP(vec_bitmap, 256);
0596 bool active;
0597 bool dont_zero_synic_pages;
0598 };
0599
0600
0601 struct kvm_vcpu_hv {
0602 struct kvm_vcpu *vcpu;
0603 u32 vp_index;
0604 u64 hv_vapic;
0605 s64 runtime_offset;
0606 struct kvm_vcpu_hv_synic synic;
0607 struct kvm_hyperv_exit exit;
0608 struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
0609 DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
0610 bool enforce_cpuid;
0611 struct {
0612 u32 features_eax;
0613 u32 features_ebx;
0614 u32 features_edx;
0615 u32 enlightenments_eax;
0616 u32 enlightenments_ebx;
0617 u32 syndbg_cap_eax;
0618 } cpuid_cache;
0619 };
0620
0621
0622 struct kvm_vcpu_xen {
0623 u64 hypercall_rip;
0624 u32 current_runstate;
0625 u8 upcall_vector;
0626 struct gfn_to_pfn_cache vcpu_info_cache;
0627 struct gfn_to_pfn_cache vcpu_time_info_cache;
0628 struct gfn_to_pfn_cache runstate_cache;
0629 u64 last_steal;
0630 u64 runstate_entry_time;
0631 u64 runstate_times[4];
0632 unsigned long evtchn_pending_sel;
0633 u32 vcpu_id;
0634 u32 timer_virq;
0635 u64 timer_expires;
0636 atomic_t timer_pending;
0637 struct hrtimer timer;
0638 int poll_evtchn;
0639 struct timer_list poll_timer;
0640 };
0641
0642 struct kvm_vcpu_arch {
0643
0644
0645
0646
0647 unsigned long regs[NR_VCPU_REGS];
0648 u32 regs_avail;
0649 u32 regs_dirty;
0650
0651 unsigned long cr0;
0652 unsigned long cr0_guest_owned_bits;
0653 unsigned long cr2;
0654 unsigned long cr3;
0655 unsigned long cr4;
0656 unsigned long cr4_guest_owned_bits;
0657 unsigned long cr4_guest_rsvd_bits;
0658 unsigned long cr8;
0659 u32 host_pkru;
0660 u32 pkru;
0661 u32 hflags;
0662 u64 efer;
0663 u64 apic_base;
0664 struct kvm_lapic *apic;
0665 bool load_eoi_exitmap_pending;
0666 DECLARE_BITMAP(ioapic_handled_vectors, 256);
0667 unsigned long apic_attention;
0668 int32_t apic_arb_prio;
0669 int mp_state;
0670 u64 ia32_misc_enable_msr;
0671 u64 smbase;
0672 u64 smi_count;
0673 bool at_instruction_boundary;
0674 bool tpr_access_reporting;
0675 bool xsaves_enabled;
0676 bool xfd_no_write_intercept;
0677 u64 ia32_xss;
0678 u64 microcode_version;
0679 u64 arch_capabilities;
0680 u64 perf_capabilities;
0681
0682
0683
0684
0685
0686
0687
0688
0689 struct kvm_mmu *mmu;
0690
0691
0692 struct kvm_mmu root_mmu;
0693
0694
0695 struct kvm_mmu guest_mmu;
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705 struct kvm_mmu nested_mmu;
0706
0707
0708
0709
0710
0711 struct kvm_mmu *walk_mmu;
0712
0713 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
0714 struct kvm_mmu_memory_cache mmu_shadow_page_cache;
0715 struct kvm_mmu_memory_cache mmu_shadowed_info_cache;
0716 struct kvm_mmu_memory_cache mmu_page_header_cache;
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729 struct fpu_guest guest_fpu;
0730
0731 u64 xcr0;
0732 u64 guest_supported_xcr0;
0733
0734 struct kvm_pio_request pio;
0735 void *pio_data;
0736 void *sev_pio_data;
0737 unsigned sev_pio_count;
0738
0739 u8 event_exit_inst_len;
0740
0741 struct kvm_queued_exception {
0742 bool pending;
0743 bool injected;
0744 bool has_error_code;
0745 u8 nr;
0746 u32 error_code;
0747 unsigned long payload;
0748 bool has_payload;
0749 u8 nested_apf;
0750 } exception;
0751
0752 struct kvm_queued_interrupt {
0753 bool injected;
0754 bool soft;
0755 u8 nr;
0756 } interrupt;
0757
0758 int halt_request;
0759
0760 int cpuid_nent;
0761 struct kvm_cpuid_entry2 *cpuid_entries;
0762 u32 kvm_cpuid_base;
0763
0764 u64 reserved_gpa_bits;
0765 int maxphyaddr;
0766
0767
0768
0769 struct x86_emulate_ctxt *emulate_ctxt;
0770 bool emulate_regs_need_sync_to_vcpu;
0771 bool emulate_regs_need_sync_from_vcpu;
0772 int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
0773
0774 gpa_t time;
0775 struct pvclock_vcpu_time_info hv_clock;
0776 unsigned int hw_tsc_khz;
0777 struct gfn_to_pfn_cache pv_time;
0778
0779 bool pvclock_set_guest_stopped_request;
0780
0781 struct {
0782 u8 preempted;
0783 u64 msr_val;
0784 u64 last_steal;
0785 struct gfn_to_hva_cache cache;
0786 } st;
0787
0788 u64 l1_tsc_offset;
0789 u64 tsc_offset;
0790 u64 last_guest_tsc;
0791 u64 last_host_tsc;
0792 u64 tsc_offset_adjustment;
0793 u64 this_tsc_nsec;
0794 u64 this_tsc_write;
0795 u64 this_tsc_generation;
0796 bool tsc_catchup;
0797 bool tsc_always_catchup;
0798 s8 virtual_tsc_shift;
0799 u32 virtual_tsc_mult;
0800 u32 virtual_tsc_khz;
0801 s64 ia32_tsc_adjust_msr;
0802 u64 msr_ia32_power_ctl;
0803 u64 l1_tsc_scaling_ratio;
0804 u64 tsc_scaling_ratio;
0805
0806 atomic_t nmi_queued;
0807 unsigned nmi_pending;
0808 bool nmi_injected;
0809 bool smi_pending;
0810 u8 handling_intr_from_guest;
0811
0812 struct kvm_mtrr mtrr_state;
0813 u64 pat;
0814
0815 unsigned switch_db_regs;
0816 unsigned long db[KVM_NR_DB_REGS];
0817 unsigned long dr6;
0818 unsigned long dr7;
0819 unsigned long eff_db[KVM_NR_DB_REGS];
0820 unsigned long guest_debug_dr7;
0821 u64 msr_platform_info;
0822 u64 msr_misc_features_enables;
0823
0824 u64 mcg_cap;
0825 u64 mcg_status;
0826 u64 mcg_ctl;
0827 u64 mcg_ext_ctl;
0828 u64 *mce_banks;
0829 u64 *mci_ctl2_banks;
0830
0831
0832 u64 mmio_gva;
0833 unsigned mmio_access;
0834 gfn_t mmio_gfn;
0835 u64 mmio_gen;
0836
0837 struct kvm_pmu pmu;
0838
0839
0840 unsigned long singlestep_rip;
0841
0842 bool hyperv_enabled;
0843 struct kvm_vcpu_hv *hyperv;
0844 struct kvm_vcpu_xen xen;
0845
0846 cpumask_var_t wbinvd_dirty_mask;
0847
0848 unsigned long last_retry_eip;
0849 unsigned long last_retry_addr;
0850
0851 struct {
0852 bool halted;
0853 gfn_t gfns[ASYNC_PF_PER_VCPU];
0854 struct gfn_to_hva_cache data;
0855 u64 msr_en_val;
0856 u64 msr_int_val;
0857 u16 vec;
0858 u32 id;
0859 bool send_user_only;
0860 u32 host_apf_flags;
0861 unsigned long nested_apf_token;
0862 bool delivery_as_pf_vmexit;
0863 bool pageready_pending;
0864 } apf;
0865
0866
0867 struct {
0868 u64 length;
0869 u64 status;
0870 } osvw;
0871
0872 struct {
0873 u64 msr_val;
0874 struct gfn_to_hva_cache data;
0875 } pv_eoi;
0876
0877 u64 msr_kvm_poll_control;
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894 bool write_fault_to_shadow_pgtable;
0895
0896
0897 unsigned long exit_qualification;
0898
0899
0900 struct {
0901 bool pv_unhalted;
0902 } pv;
0903
0904 int pending_ioapic_eoi;
0905 int pending_external_vector;
0906
0907
0908 bool preempted_in_kernel;
0909
0910
0911 bool l1tf_flush_l1d;
0912
0913
0914 int last_vmentry_cpu;
0915
0916
0917 u64 msr_hwcr;
0918
0919
0920 struct {
0921
0922
0923
0924
0925 u32 features;
0926
0927
0928
0929
0930
0931 bool enforce;
0932 } pv_cpuid;
0933
0934
0935 bool guest_state_protected;
0936
0937
0938
0939
0940
0941 bool pdptrs_from_userspace;
0942
0943 #if IS_ENABLED(CONFIG_HYPERV)
0944 hpa_t hv_root_tdp;
0945 #endif
0946 };
0947
0948 struct kvm_lpage_info {
0949 int disallow_lpage;
0950 };
0951
0952 struct kvm_arch_memory_slot {
0953 struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
0954 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
0955 unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
0956 };
0957
0958
0959
0960
0961
0962
0963
0964
0965 #define KVM_APIC_MODE_XAPIC_CLUSTER 4
0966 #define KVM_APIC_MODE_XAPIC_FLAT 8
0967 #define KVM_APIC_MODE_X2APIC 16
0968
0969 struct kvm_apic_map {
0970 struct rcu_head rcu;
0971 u8 mode;
0972 u32 max_apic_id;
0973 union {
0974 struct kvm_lapic *xapic_flat_map[8];
0975 struct kvm_lapic *xapic_cluster_map[16][4];
0976 };
0977 struct kvm_lapic *phys_map[];
0978 };
0979
0980
0981 struct kvm_hv_syndbg {
0982 struct {
0983 u64 control;
0984 u64 status;
0985 u64 send_page;
0986 u64 recv_page;
0987 u64 pending_page;
0988 } control;
0989 u64 options;
0990 };
0991
0992
0993 enum hv_tsc_page_status {
0994
0995 HV_TSC_PAGE_UNSET = 0,
0996
0997 HV_TSC_PAGE_GUEST_CHANGED,
0998
0999 HV_TSC_PAGE_HOST_CHANGED,
1000
1001 HV_TSC_PAGE_SET,
1002
1003 HV_TSC_PAGE_BROKEN,
1004 };
1005
1006
1007 struct kvm_hv {
1008 struct mutex hv_lock;
1009 u64 hv_guest_os_id;
1010 u64 hv_hypercall;
1011 u64 hv_tsc_page;
1012 enum hv_tsc_page_status hv_tsc_page_status;
1013
1014
1015 u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
1016 u64 hv_crash_ctl;
1017
1018 struct ms_hyperv_tsc_page tsc_ref;
1019
1020 struct idr conn_to_evt;
1021
1022 u64 hv_reenlightenment_control;
1023 u64 hv_tsc_emulation_control;
1024 u64 hv_tsc_emulation_status;
1025
1026
1027 atomic_t num_mismatched_vp_indexes;
1028
1029
1030
1031
1032
1033 unsigned int synic_auto_eoi_used;
1034
1035 struct hv_partition_assist_pg *hv_pa_pg;
1036 struct kvm_hv_syndbg hv_syndbg;
1037 };
1038
1039 struct msr_bitmap_range {
1040 u32 flags;
1041 u32 nmsrs;
1042 u32 base;
1043 unsigned long *bitmap;
1044 };
1045
1046
1047 struct kvm_xen {
1048 u32 xen_version;
1049 bool long_mode;
1050 u8 upcall_vector;
1051 struct gfn_to_pfn_cache shinfo_cache;
1052 struct idr evtchn_ports;
1053 unsigned long poll_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
1054 };
1055
1056 enum kvm_irqchip_mode {
1057 KVM_IRQCHIP_NONE,
1058 KVM_IRQCHIP_KERNEL,
1059 KVM_IRQCHIP_SPLIT,
1060 };
1061
1062 struct kvm_x86_msr_filter {
1063 u8 count;
1064 bool default_allow:1;
1065 struct msr_bitmap_range ranges[16];
1066 };
1067
1068 enum kvm_apicv_inhibit {
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078 APICV_INHIBIT_REASON_DISABLE,
1079
1080
1081
1082
1083
1084 APICV_INHIBIT_REASON_HYPERV,
1085
1086
1087
1088
1089
1090 APICV_INHIBIT_REASON_ABSENT,
1091
1092
1093
1094
1095
1096 APICV_INHIBIT_REASON_BLOCKIRQ,
1097
1098
1099
1100
1101
1102
1103 APICV_INHIBIT_REASON_APIC_ID_MODIFIED,
1104 APICV_INHIBIT_REASON_APIC_BASE_MODIFIED,
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117 APICV_INHIBIT_REASON_NESTED,
1118
1119
1120
1121
1122
1123
1124 APICV_INHIBIT_REASON_IRQWIN,
1125
1126
1127
1128
1129
1130 APICV_INHIBIT_REASON_PIT_REINJ,
1131
1132
1133
1134
1135 APICV_INHIBIT_REASON_SEV,
1136 };
1137
1138 struct kvm_arch {
1139 unsigned long n_used_mmu_pages;
1140 unsigned long n_requested_mmu_pages;
1141 unsigned long n_max_mmu_pages;
1142 unsigned int indirect_shadow_pages;
1143 u8 mmu_valid_gen;
1144 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
1145 struct list_head active_mmu_pages;
1146 struct list_head zapped_obsolete_pages;
1147 struct list_head lpage_disallowed_mmu_pages;
1148 struct kvm_page_track_notifier_node mmu_sp_tracker;
1149 struct kvm_page_track_notifier_head track_notifier_head;
1150
1151
1152
1153
1154
1155
1156 spinlock_t mmu_unsync_pages_lock;
1157
1158 struct list_head assigned_dev_head;
1159 struct iommu_domain *iommu_domain;
1160 bool iommu_noncoherent;
1161 #define __KVM_HAVE_ARCH_NONCOHERENT_DMA
1162 atomic_t noncoherent_dma_count;
1163 #define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
1164 atomic_t assigned_device_count;
1165 struct kvm_pic *vpic;
1166 struct kvm_ioapic *vioapic;
1167 struct kvm_pit *vpit;
1168 atomic_t vapics_in_nmi_mode;
1169 struct mutex apic_map_lock;
1170 struct kvm_apic_map __rcu *apic_map;
1171 atomic_t apic_map_dirty;
1172
1173
1174 struct rw_semaphore apicv_update_lock;
1175
1176 bool apic_access_memslot_enabled;
1177 unsigned long apicv_inhibit_reasons;
1178
1179 gpa_t wall_clock;
1180
1181 bool mwait_in_guest;
1182 bool hlt_in_guest;
1183 bool pause_in_guest;
1184 bool cstate_in_guest;
1185
1186 unsigned long irq_sources_bitmap;
1187 s64 kvmclock_offset;
1188
1189
1190
1191
1192
1193 raw_spinlock_t tsc_write_lock;
1194 u64 last_tsc_nsec;
1195 u64 last_tsc_write;
1196 u32 last_tsc_khz;
1197 u64 last_tsc_offset;
1198 u64 cur_tsc_nsec;
1199 u64 cur_tsc_write;
1200 u64 cur_tsc_offset;
1201 u64 cur_tsc_generation;
1202 int nr_vcpus_matched_tsc;
1203
1204 u32 default_tsc_khz;
1205
1206 seqcount_raw_spinlock_t pvclock_sc;
1207 bool use_master_clock;
1208 u64 master_kernel_ns;
1209 u64 master_cycle_now;
1210 struct delayed_work kvmclock_update_work;
1211 struct delayed_work kvmclock_sync_work;
1212
1213 struct kvm_xen_hvm_config xen_hvm_config;
1214
1215
1216 struct hlist_head mask_notifier_list;
1217
1218 struct kvm_hv hyperv;
1219 struct kvm_xen xen;
1220
1221 bool backwards_tsc_observed;
1222 bool boot_vcpu_runs_old_kvmclock;
1223 u32 bsp_vcpu_id;
1224
1225 u64 disabled_quirks;
1226 int cpu_dirty_logging_count;
1227
1228 enum kvm_irqchip_mode irqchip_mode;
1229 u8 nr_reserved_ioapic_pins;
1230
1231 bool disabled_lapic_found;
1232
1233 bool x2apic_format;
1234 bool x2apic_broadcast_quirk_disabled;
1235
1236 bool guest_can_read_msr_platform_info;
1237 bool exception_payload_enabled;
1238
1239 bool triple_fault_event;
1240
1241 bool bus_lock_detection_enabled;
1242 bool enable_pmu;
1243
1244 u32 notify_window;
1245 u32 notify_vmexit_flags;
1246
1247
1248
1249
1250
1251 bool exit_on_emulation_error;
1252
1253
1254 u32 user_space_msr_mask;
1255 struct kvm_x86_msr_filter __rcu *msr_filter;
1256
1257 u32 hypercall_exit_enabled;
1258
1259
1260 bool sgx_provisioning_allowed;
1261
1262 struct kvm_pmu_event_filter __rcu *pmu_event_filter;
1263 struct task_struct *nx_lpage_recovery_thread;
1264
1265 #ifdef CONFIG_X86_64
1266
1267
1268
1269
1270
1271
1272
1273 bool tdp_mmu_enabled;
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293 struct list_head tdp_mmu_roots;
1294
1295
1296
1297
1298
1299
1300 struct list_head tdp_mmu_pages;
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314 spinlock_t tdp_mmu_pages_lock;
1315 struct workqueue_struct *tdp_mmu_zap_wq;
1316 #endif
1317
1318
1319
1320
1321
1322
1323 bool shadow_root_allocated;
1324
1325 #if IS_ENABLED(CONFIG_HYPERV)
1326 hpa_t hv_root_tdp;
1327 spinlock_t hv_root_tdp_lock;
1328 #endif
1329
1330
1331
1332
1333
1334 u32 max_vcpu_ids;
1335
1336 bool disable_nx_huge_pages;
1337
1338
1339
1340
1341
1342
1343
1344
1345 struct kvm_mmu_memory_cache split_shadow_page_cache;
1346 struct kvm_mmu_memory_cache split_page_header_cache;
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357 #define SPLIT_DESC_CACHE_MIN_NR_OBJECTS (SPTE_ENT_PER_PAGE + 1)
1358 struct kvm_mmu_memory_cache split_desc_cache;
1359 };
1360
1361 struct kvm_vm_stat {
1362 struct kvm_vm_stat_generic generic;
1363 u64 mmu_shadow_zapped;
1364 u64 mmu_pte_write;
1365 u64 mmu_pde_zapped;
1366 u64 mmu_flooded;
1367 u64 mmu_recycled;
1368 u64 mmu_cache_miss;
1369 u64 mmu_unsync;
1370 union {
1371 struct {
1372 atomic64_t pages_4k;
1373 atomic64_t pages_2m;
1374 atomic64_t pages_1g;
1375 };
1376 atomic64_t pages[KVM_NR_PAGE_SIZES];
1377 };
1378 u64 nx_lpage_splits;
1379 u64 max_mmu_page_hash_collisions;
1380 u64 max_mmu_rmap_size;
1381 };
1382
1383 struct kvm_vcpu_stat {
1384 struct kvm_vcpu_stat_generic generic;
1385 u64 pf_taken;
1386 u64 pf_fixed;
1387 u64 pf_emulate;
1388 u64 pf_spurious;
1389 u64 pf_fast;
1390 u64 pf_mmio_spte_created;
1391 u64 pf_guest;
1392 u64 tlb_flush;
1393 u64 invlpg;
1394
1395 u64 exits;
1396 u64 io_exits;
1397 u64 mmio_exits;
1398 u64 signal_exits;
1399 u64 irq_window_exits;
1400 u64 nmi_window_exits;
1401 u64 l1d_flush;
1402 u64 halt_exits;
1403 u64 request_irq_exits;
1404 u64 irq_exits;
1405 u64 host_state_reload;
1406 u64 fpu_reload;
1407 u64 insn_emulation;
1408 u64 insn_emulation_fail;
1409 u64 hypercalls;
1410 u64 irq_injections;
1411 u64 nmi_injections;
1412 u64 req_event;
1413 u64 nested_run;
1414 u64 directed_yield_attempted;
1415 u64 directed_yield_successful;
1416 u64 preemption_reported;
1417 u64 preemption_other;
1418 u64 guest_mode;
1419 u64 notify_window_exits;
1420 };
1421
1422 struct x86_instruction_info;
1423
1424 struct msr_data {
1425 bool host_initiated;
1426 u32 index;
1427 u64 data;
1428 };
1429
1430 struct kvm_lapic_irq {
1431 u32 vector;
1432 u16 delivery_mode;
1433 u16 dest_mode;
1434 bool level;
1435 u16 trig_mode;
1436 u32 shorthand;
1437 u32 dest_id;
1438 bool msi_redir_hint;
1439 };
1440
1441 static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
1442 {
1443 return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
1444 }
1445
1446 struct kvm_x86_ops {
1447 const char *name;
1448
1449 int (*hardware_enable)(void);
1450 void (*hardware_disable)(void);
1451 void (*hardware_unsetup)(void);
1452 bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
1453 void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
1454
1455 unsigned int vm_size;
1456 int (*vm_init)(struct kvm *kvm);
1457 void (*vm_destroy)(struct kvm *kvm);
1458
1459
1460 int (*vcpu_precreate)(struct kvm *kvm);
1461 int (*vcpu_create)(struct kvm_vcpu *vcpu);
1462 void (*vcpu_free)(struct kvm_vcpu *vcpu);
1463 void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
1464
1465 void (*prepare_switch_to_guest)(struct kvm_vcpu *vcpu);
1466 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
1467 void (*vcpu_put)(struct kvm_vcpu *vcpu);
1468
1469 void (*update_exception_bitmap)(struct kvm_vcpu *vcpu);
1470 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1471 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1472 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
1473 void (*get_segment)(struct kvm_vcpu *vcpu,
1474 struct kvm_segment *var, int seg);
1475 int (*get_cpl)(struct kvm_vcpu *vcpu);
1476 void (*set_segment)(struct kvm_vcpu *vcpu,
1477 struct kvm_segment *var, int seg);
1478 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
1479 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
1480 void (*post_set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
1481 bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0);
1482 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
1483 int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
1484 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1485 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1486 void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1487 void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1488 void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
1489 void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
1490 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
1491 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
1492 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
1493 bool (*get_if_flag)(struct kvm_vcpu *vcpu);
1494
1495 void (*flush_tlb_all)(struct kvm_vcpu *vcpu);
1496 void (*flush_tlb_current)(struct kvm_vcpu *vcpu);
1497 int (*tlb_remote_flush)(struct kvm *kvm);
1498 int (*tlb_remote_flush_with_range)(struct kvm *kvm,
1499 struct kvm_tlb_range *range);
1500
1501
1502
1503
1504
1505
1506
1507 void (*flush_tlb_gva)(struct kvm_vcpu *vcpu, gva_t addr);
1508
1509
1510
1511
1512
1513 void (*flush_tlb_guest)(struct kvm_vcpu *vcpu);
1514
1515 int (*vcpu_pre_run)(struct kvm_vcpu *vcpu);
1516 enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu);
1517 int (*handle_exit)(struct kvm_vcpu *vcpu,
1518 enum exit_fastpath_completion exit_fastpath);
1519 int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
1520 void (*update_emulated_instruction)(struct kvm_vcpu *vcpu);
1521 void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
1522 u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
1523 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
1524 unsigned char *hypercall_addr);
1525 void (*inject_irq)(struct kvm_vcpu *vcpu, bool reinjected);
1526 void (*inject_nmi)(struct kvm_vcpu *vcpu);
1527 void (*queue_exception)(struct kvm_vcpu *vcpu);
1528 void (*cancel_injection)(struct kvm_vcpu *vcpu);
1529 int (*interrupt_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1530 int (*nmi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1531 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
1532 void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
1533 void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
1534 void (*enable_irq_window)(struct kvm_vcpu *vcpu);
1535 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
1536 bool (*check_apicv_inhibit_reasons)(enum kvm_apicv_inhibit reason);
1537 void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
1538 void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
1539 void (*hwapic_isr_update)(int isr);
1540 bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
1541 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
1542 void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
1543 void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
1544 void (*deliver_interrupt)(struct kvm_lapic *apic, int delivery_mode,
1545 int trig_mode, int vector);
1546 int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
1547 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
1548 int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
1549 u8 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1550
1551 void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
1552 int root_level);
1553
1554 bool (*has_wbinvd_exit)(void);
1555
1556 u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
1557 u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu);
1558 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
1559 void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu, u64 multiplier);
1560
1561
1562
1563
1564
1565 void (*get_exit_info)(struct kvm_vcpu *vcpu, u32 *reason,
1566 u64 *info1, u64 *info2,
1567 u32 *exit_int_info, u32 *exit_int_info_err_code);
1568
1569 int (*check_intercept)(struct kvm_vcpu *vcpu,
1570 struct x86_instruction_info *info,
1571 enum x86_intercept_stage stage,
1572 struct x86_exception *exception);
1573 void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
1574
1575 void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
1576
1577 void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
1578
1579
1580
1581
1582
1583 int cpu_dirty_log_size;
1584 void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu);
1585
1586 const struct kvm_x86_nested_ops *nested_ops;
1587
1588 void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
1589 void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
1590
1591 int (*pi_update_irte)(struct kvm *kvm, unsigned int host_irq,
1592 uint32_t guest_irq, bool set);
1593 void (*pi_start_assignment)(struct kvm *kvm);
1594 void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
1595 bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
1596
1597 int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
1598 bool *expired);
1599 void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
1600
1601 void (*setup_mce)(struct kvm_vcpu *vcpu);
1602
1603 int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1604 int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
1605 int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
1606 void (*enable_smi_window)(struct kvm_vcpu *vcpu);
1607
1608 int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp);
1609 int (*mem_enc_register_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1610 int (*mem_enc_unregister_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1611 int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
1612 int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
1613 void (*guest_memory_reclaimed)(struct kvm *kvm);
1614
1615 int (*get_msr_feature)(struct kvm_msr_entry *entry);
1616
1617 bool (*can_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type,
1618 void *insn, int insn_len);
1619
1620 bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
1621 int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
1622
1623 void (*migrate_timers)(struct kvm_vcpu *vcpu);
1624 void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
1625 int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
1626
1627 void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
1628
1629
1630
1631
1632 unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
1633 };
1634
1635 struct kvm_x86_nested_ops {
1636 void (*leave_nested)(struct kvm_vcpu *vcpu);
1637 int (*check_events)(struct kvm_vcpu *vcpu);
1638 bool (*handle_page_fault_workaround)(struct kvm_vcpu *vcpu,
1639 struct x86_exception *fault);
1640 bool (*hv_timer_pending)(struct kvm_vcpu *vcpu);
1641 void (*triple_fault)(struct kvm_vcpu *vcpu);
1642 int (*get_state)(struct kvm_vcpu *vcpu,
1643 struct kvm_nested_state __user *user_kvm_nested_state,
1644 unsigned user_data_size);
1645 int (*set_state)(struct kvm_vcpu *vcpu,
1646 struct kvm_nested_state __user *user_kvm_nested_state,
1647 struct kvm_nested_state *kvm_state);
1648 bool (*get_nested_state_pages)(struct kvm_vcpu *vcpu);
1649 int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
1650
1651 int (*enable_evmcs)(struct kvm_vcpu *vcpu,
1652 uint16_t *vmcs_version);
1653 uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu);
1654 };
1655
1656 struct kvm_x86_init_ops {
1657 int (*cpu_has_kvm_support)(void);
1658 int (*disabled_by_bios)(void);
1659 int (*check_processor_compatibility)(void);
1660 int (*hardware_setup)(void);
1661 unsigned int (*handle_intel_pt_intr)(void);
1662
1663 struct kvm_x86_ops *runtime_ops;
1664 struct kvm_pmu_ops *pmu_ops;
1665 };
1666
1667 struct kvm_arch_async_pf {
1668 u32 token;
1669 gfn_t gfn;
1670 unsigned long cr3;
1671 bool direct_map;
1672 };
1673
1674 extern u32 __read_mostly kvm_nr_uret_msrs;
1675 extern u64 __read_mostly host_efer;
1676 extern bool __read_mostly allow_smaller_maxphyaddr;
1677 extern bool __read_mostly enable_apicv;
1678 extern struct kvm_x86_ops kvm_x86_ops;
1679
1680 #define KVM_X86_OP(func) \
1681 DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
1682 #define KVM_X86_OP_OPTIONAL KVM_X86_OP
1683 #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
1684 #include <asm/kvm-x86-ops.h>
1685
1686 #define __KVM_HAVE_ARCH_VM_ALLOC
1687 static inline struct kvm *kvm_arch_alloc_vm(void)
1688 {
1689 return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1690 }
1691
1692 #define __KVM_HAVE_ARCH_VM_FREE
1693 void kvm_arch_free_vm(struct kvm *kvm);
1694
1695 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
1696 static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
1697 {
1698 if (kvm_x86_ops.tlb_remote_flush &&
1699 !static_call(kvm_x86_tlb_remote_flush)(kvm))
1700 return 0;
1701 else
1702 return -ENOTSUPP;
1703 }
1704
1705 #define kvm_arch_pmi_in_guest(vcpu) \
1706 ((vcpu) && (vcpu)->arch.handling_intr_from_guest)
1707
1708 void __init kvm_mmu_x86_module_init(void);
1709 int kvm_mmu_vendor_module_init(void);
1710 void kvm_mmu_vendor_module_exit(void);
1711
1712 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
1713 int kvm_mmu_create(struct kvm_vcpu *vcpu);
1714 int kvm_mmu_init_vm(struct kvm *kvm);
1715 void kvm_mmu_uninit_vm(struct kvm *kvm);
1716
1717 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
1718 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
1719 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
1720 const struct kvm_memory_slot *memslot,
1721 int start_level);
1722 void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
1723 const struct kvm_memory_slot *memslot,
1724 int target_level);
1725 void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
1726 const struct kvm_memory_slot *memslot,
1727 u64 start, u64 end,
1728 int target_level);
1729 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
1730 const struct kvm_memory_slot *memslot);
1731 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
1732 const struct kvm_memory_slot *memslot);
1733 void kvm_mmu_zap_all(struct kvm *kvm);
1734 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
1735 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
1736
1737 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
1738
1739 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1740 const void *val, int bytes);
1741
1742 struct kvm_irq_mask_notifier {
1743 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
1744 int irq;
1745 struct hlist_node link;
1746 };
1747
1748 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
1749 struct kvm_irq_mask_notifier *kimn);
1750 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
1751 struct kvm_irq_mask_notifier *kimn);
1752 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
1753 bool mask);
1754
1755 extern bool tdp_enabled;
1756
1757 u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798 #define EMULTYPE_NO_DECODE (1 << 0)
1799 #define EMULTYPE_TRAP_UD (1 << 1)
1800 #define EMULTYPE_SKIP (1 << 2)
1801 #define EMULTYPE_ALLOW_RETRY_PF (1 << 3)
1802 #define EMULTYPE_TRAP_UD_FORCED (1 << 4)
1803 #define EMULTYPE_VMWARE_GP (1 << 5)
1804 #define EMULTYPE_PF (1 << 6)
1805 #define EMULTYPE_COMPLETE_USER_EXIT (1 << 7)
1806
1807 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
1808 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
1809 void *insn, int insn_len);
1810 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu,
1811 u64 *data, u8 ndata);
1812 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu);
1813
1814 void kvm_enable_efer_bits(u64);
1815 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
1816 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
1817 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
1818 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
1819 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
1820 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
1821 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu);
1822 int kvm_emulate_invd(struct kvm_vcpu *vcpu);
1823 int kvm_emulate_mwait(struct kvm_vcpu *vcpu);
1824 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu);
1825 int kvm_emulate_monitor(struct kvm_vcpu *vcpu);
1826
1827 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
1828 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
1829 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
1830 int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu);
1831 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu);
1832 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
1833
1834 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
1835 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
1836 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
1837
1838 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
1839 int reason, bool has_error_code, u32 error_code);
1840
1841 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0);
1842 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4);
1843 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
1844 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
1845 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
1846 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
1847 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
1848 void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
1849 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
1850 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
1851 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu);
1852
1853 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1854 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1855
1856 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
1857 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
1858 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu);
1859
1860 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1861 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1862 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload);
1863 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1864 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1865 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
1866 bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
1867 struct x86_exception *fault);
1868 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
1869 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
1870
1871 static inline int __kvm_irq_line_state(unsigned long *irq_state,
1872 int irq_source_id, int level)
1873 {
1874
1875 if (level)
1876 __set_bit(irq_source_id, irq_state);
1877 else
1878 __clear_bit(irq_source_id, irq_state);
1879
1880 return !!(*irq_state);
1881 }
1882
1883 #define KVM_MMU_ROOT_CURRENT BIT(0)
1884 #define KVM_MMU_ROOT_PREVIOUS(i) BIT(1+i)
1885 #define KVM_MMU_ROOTS_ALL (~0UL)
1886
1887 int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
1888 void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
1889
1890 void kvm_inject_nmi(struct kvm_vcpu *vcpu);
1891
1892 void kvm_update_dr7(struct kvm_vcpu *vcpu);
1893
1894 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
1895 void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
1896 ulong roots_to_free);
1897 void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu);
1898 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
1899 struct x86_exception *exception);
1900 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
1901 struct x86_exception *exception);
1902 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
1903 struct x86_exception *exception);
1904 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
1905 struct x86_exception *exception);
1906
1907 bool kvm_apicv_activated(struct kvm *kvm);
1908 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu);
1909 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
1910 void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
1911 enum kvm_apicv_inhibit reason, bool set);
1912 void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
1913 enum kvm_apicv_inhibit reason, bool set);
1914
1915 static inline void kvm_set_apicv_inhibit(struct kvm *kvm,
1916 enum kvm_apicv_inhibit reason)
1917 {
1918 kvm_set_or_clear_apicv_inhibit(kvm, reason, true);
1919 }
1920
1921 static inline void kvm_clear_apicv_inhibit(struct kvm *kvm,
1922 enum kvm_apicv_inhibit reason)
1923 {
1924 kvm_set_or_clear_apicv_inhibit(kvm, reason, false);
1925 }
1926
1927 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
1928
1929 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
1930 void *insn, int insn_len);
1931 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
1932 void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
1933 gva_t gva, hpa_t root_hpa);
1934 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
1935 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
1936
1937 void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
1938 int tdp_max_root_level, int tdp_huge_page_level);
1939
1940 static inline u16 kvm_read_ldt(void)
1941 {
1942 u16 ldt;
1943 asm("sldt %0" : "=g"(ldt));
1944 return ldt;
1945 }
1946
1947 static inline void kvm_load_ldt(u16 sel)
1948 {
1949 asm("lldt %0" : : "rm"(sel));
1950 }
1951
1952 #ifdef CONFIG_X86_64
1953 static inline unsigned long read_msr(unsigned long msr)
1954 {
1955 u64 value;
1956
1957 rdmsrl(msr, value);
1958 return value;
1959 }
1960 #endif
1961
1962 static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
1963 {
1964 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
1965 }
1966
1967 #define TSS_IOPB_BASE_OFFSET 0x66
1968 #define TSS_BASE_SIZE 0x68
1969 #define TSS_IOPB_SIZE (65536 / 8)
1970 #define TSS_REDIRECTION_SIZE (256 / 8)
1971 #define RMODE_TSS_SIZE \
1972 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
1973
1974 enum {
1975 TASK_SWITCH_CALL = 0,
1976 TASK_SWITCH_IRET = 1,
1977 TASK_SWITCH_JMP = 2,
1978 TASK_SWITCH_GATE = 3,
1979 };
1980
1981 #define HF_GIF_MASK (1 << 0)
1982 #define HF_NMI_MASK (1 << 3)
1983 #define HF_IRET_MASK (1 << 4)
1984 #define HF_GUEST_MASK (1 << 5)
1985 #define HF_SMM_MASK (1 << 6)
1986 #define HF_SMM_INSIDE_NMI_MASK (1 << 7)
1987
1988 #define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
1989 #define KVM_ADDRESS_SPACE_NUM 2
1990
1991 #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
1992 #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
1993
1994 #define KVM_ARCH_WANT_MMU_NOTIFIER
1995
1996 int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
1997 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
1998 int kvm_cpu_has_extint(struct kvm_vcpu *v);
1999 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
2000 int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
2001 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
2002
2003 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
2004 unsigned long ipi_bitmap_high, u32 min,
2005 unsigned long icr, int op_64_bit);
2006
2007 int kvm_add_user_return_msr(u32 msr);
2008 int kvm_find_user_return_msr(u32 msr);
2009 int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
2010
2011 static inline bool kvm_is_supported_user_return_msr(u32 msr)
2012 {
2013 return kvm_find_user_return_msr(msr) >= 0;
2014 }
2015
2016 u64 kvm_scale_tsc(u64 tsc, u64 ratio);
2017 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
2018 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier);
2019 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier);
2020
2021 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
2022 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
2023
2024 void kvm_make_scan_ioapic_request(struct kvm *kvm);
2025 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
2026 unsigned long *vcpu_bitmap);
2027
2028 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2029 struct kvm_async_pf *work);
2030 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2031 struct kvm_async_pf *work);
2032 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2033 struct kvm_async_pf *work);
2034 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu);
2035 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
2036 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
2037
2038 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
2039 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
2040 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
2041
2042 void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
2043 u32 size);
2044 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
2045 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
2046
2047 bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
2048 struct kvm_vcpu **dest_vcpu);
2049
2050 void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
2051 struct kvm_lapic_irq *irq);
2052
2053 static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
2054 {
2055
2056 return (irq->delivery_mode == APIC_DM_FIXED ||
2057 irq->delivery_mode == APIC_DM_LOWEST);
2058 }
2059
2060 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
2061 {
2062 static_call_cond(kvm_x86_vcpu_blocking)(vcpu);
2063 }
2064
2065 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
2066 {
2067 static_call_cond(kvm_x86_vcpu_unblocking)(vcpu);
2068 }
2069
2070 static inline int kvm_cpu_get_apicid(int mps_cpu)
2071 {
2072 #ifdef CONFIG_X86_LOCAL_APIC
2073 return default_cpu_present_to_apicid(mps_cpu);
2074 #else
2075 WARN_ON_ONCE(1);
2076 return BAD_APICID;
2077 #endif
2078 }
2079
2080 #define put_smstate(type, buf, offset, val) \
2081 *(type *)((buf) + (offset) - 0x7e00) = val
2082
2083 #define GET_SMSTATE(type, buf, offset) \
2084 (*(type *)((buf) + (offset) - 0x7e00))
2085
2086 int kvm_cpu_dirty_log_size(void);
2087
2088 int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
2089
2090 #define KVM_CLOCK_VALID_FLAGS \
2091 (KVM_CLOCK_TSC_STABLE | KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC)
2092
2093 #define KVM_X86_VALID_QUIRKS \
2094 (KVM_X86_QUIRK_LINT0_REENABLED | \
2095 KVM_X86_QUIRK_CD_NW_CLEARED | \
2096 KVM_X86_QUIRK_LAPIC_MMIO_HOLE | \
2097 KVM_X86_QUIRK_OUT_7E_INC_RIP | \
2098 KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT | \
2099 KVM_X86_QUIRK_FIX_HYPERCALL_INSN | \
2100 KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS)
2101
2102 #endif