0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #ifndef __SVM_SVM_H
0016 #define __SVM_SVM_H
0017
0018 #include <linux/kvm_types.h>
0019 #include <linux/kvm_host.h>
0020 #include <linux/bits.h>
0021
0022 #include <asm/svm.h>
0023 #include <asm/sev-common.h>
0024
0025 #include "kvm_cache_regs.h"
0026
0027 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
0028
0029 #define IOPM_SIZE PAGE_SIZE * 3
0030 #define MSRPM_SIZE PAGE_SIZE * 2
0031
0032 #define MAX_DIRECT_ACCESS_MSRS 46
0033 #define MSRPM_OFFSETS 32
0034 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
0035 extern bool npt_enabled;
0036 extern int vgif;
0037 extern bool intercept_smi;
0038
0039 enum avic_modes {
0040 AVIC_MODE_NONE = 0,
0041 AVIC_MODE_X1,
0042 AVIC_MODE_X2,
0043 };
0044
0045 extern enum avic_modes avic_mode;
0046
0047
0048
0049
0050
0051
0052 enum {
0053 VMCB_INTERCEPTS,
0054
0055 VMCB_PERM_MAP,
0056 VMCB_ASID,
0057 VMCB_INTR,
0058 VMCB_NPT,
0059 VMCB_CR,
0060 VMCB_DR,
0061 VMCB_DT,
0062 VMCB_SEG,
0063 VMCB_CR2,
0064 VMCB_LBR,
0065 VMCB_AVIC,
0066
0067
0068
0069 VMCB_SW = 31,
0070 };
0071
0072 #define VMCB_ALL_CLEAN_MASK ( \
0073 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
0074 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
0075 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
0076 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
0077 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
0078 (1U << VMCB_SW))
0079
0080
0081 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
0082
0083 struct kvm_sev_info {
0084 bool active;
0085 bool es_active;
0086 unsigned int asid;
0087 unsigned int handle;
0088 int fd;
0089 unsigned long pages_locked;
0090 struct list_head regions_list;
0091 u64 ap_jump_table;
0092 struct kvm *enc_context_owner;
0093 struct list_head mirror_vms;
0094 struct list_head mirror_entry;
0095 struct misc_cg *misc_cg;
0096 atomic_t migration_in_progress;
0097 };
0098
0099 struct kvm_svm {
0100 struct kvm kvm;
0101
0102
0103 u32 avic_vm_id;
0104 struct page *avic_logical_id_table_page;
0105 struct page *avic_physical_id_table_page;
0106 struct hlist_node hnode;
0107
0108 struct kvm_sev_info sev_info;
0109 };
0110
0111 struct kvm_vcpu;
0112
0113 struct kvm_vmcb_info {
0114 struct vmcb *ptr;
0115 unsigned long pa;
0116 int cpu;
0117 uint64_t asid_generation;
0118 };
0119
0120 struct vmcb_save_area_cached {
0121 u64 efer;
0122 u64 cr4;
0123 u64 cr3;
0124 u64 cr0;
0125 u64 dr7;
0126 u64 dr6;
0127 };
0128
0129 struct vmcb_ctrl_area_cached {
0130 u32 intercepts[MAX_INTERCEPT];
0131 u16 pause_filter_thresh;
0132 u16 pause_filter_count;
0133 u64 iopm_base_pa;
0134 u64 msrpm_base_pa;
0135 u64 tsc_offset;
0136 u32 asid;
0137 u8 tlb_ctl;
0138 u32 int_ctl;
0139 u32 int_vector;
0140 u32 int_state;
0141 u32 exit_code;
0142 u32 exit_code_hi;
0143 u64 exit_info_1;
0144 u64 exit_info_2;
0145 u32 exit_int_info;
0146 u32 exit_int_info_err;
0147 u64 nested_ctl;
0148 u32 event_inj;
0149 u32 event_inj_err;
0150 u64 next_rip;
0151 u64 nested_cr3;
0152 u64 virt_ext;
0153 u32 clean;
0154 u8 reserved_sw[32];
0155 };
0156
0157 struct svm_nested_state {
0158 struct kvm_vmcb_info vmcb02;
0159 u64 hsave_msr;
0160 u64 vm_cr_msr;
0161 u64 vmcb12_gpa;
0162 u64 last_vmcb12_gpa;
0163
0164
0165 u32 *msrpm;
0166
0167
0168
0169 bool nested_run_pending;
0170
0171
0172 struct vmcb_ctrl_area_cached ctl;
0173
0174
0175
0176
0177
0178 struct vmcb_save_area_cached save;
0179
0180 bool initialized;
0181
0182
0183
0184
0185
0186
0187
0188
0189 bool force_msr_bitmap_recalc;
0190 };
0191
0192 struct vcpu_sev_es_state {
0193
0194 struct sev_es_save_area *vmsa;
0195 struct ghcb *ghcb;
0196 struct kvm_host_map ghcb_map;
0197 bool received_first_sipi;
0198
0199
0200 void *ghcb_sa;
0201 u32 ghcb_sa_len;
0202 bool ghcb_sa_sync;
0203 bool ghcb_sa_free;
0204 };
0205
0206 struct vcpu_svm {
0207 struct kvm_vcpu vcpu;
0208
0209 struct vmcb *vmcb;
0210 struct kvm_vmcb_info vmcb01;
0211 struct kvm_vmcb_info *current_vmcb;
0212 struct svm_cpu_data *svm_data;
0213 u32 asid;
0214 u32 sysenter_esp_hi;
0215 u32 sysenter_eip_hi;
0216 uint64_t tsc_aux;
0217
0218 u64 msr_decfg;
0219
0220 u64 next_rip;
0221
0222 u64 spec_ctrl;
0223
0224 u64 tsc_ratio_msr;
0225
0226
0227
0228
0229
0230 u64 virt_spec_ctrl;
0231
0232 u32 *msrpm;
0233
0234 ulong nmi_iret_rip;
0235
0236 struct svm_nested_state nested;
0237
0238 bool nmi_singlestep;
0239 u64 nmi_singlestep_guest_rflags;
0240 bool nmi_l1_to_l2;
0241
0242 unsigned long soft_int_csbase;
0243 unsigned long soft_int_old_rip;
0244 unsigned long soft_int_next_rip;
0245 bool soft_int_injected;
0246
0247
0248 bool nrips_enabled : 1;
0249 bool tsc_scaling_enabled : 1;
0250 bool v_vmload_vmsave_enabled : 1;
0251 bool lbrv_enabled : 1;
0252 bool pause_filter_enabled : 1;
0253 bool pause_threshold_enabled : 1;
0254 bool vgif_enabled : 1;
0255
0256 u32 ldr_reg;
0257 u32 dfr_reg;
0258 struct page *avic_backing_page;
0259 u64 *avic_physical_id_cache;
0260
0261
0262
0263
0264
0265
0266
0267 struct list_head ir_list;
0268 spinlock_t ir_list_lock;
0269
0270
0271 struct {
0272 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
0273 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
0274 } shadow_msr_intercept;
0275
0276 struct vcpu_sev_es_state sev_es;
0277
0278 bool guest_state_loaded;
0279
0280 bool x2avic_msrs_intercepted;
0281 };
0282
0283 struct svm_cpu_data {
0284 int cpu;
0285
0286 u64 asid_generation;
0287 u32 max_asid;
0288 u32 next_asid;
0289 u32 min_asid;
0290 struct kvm_ldttss_desc *tss_desc;
0291
0292 struct page *save_area;
0293 struct vmcb *current_vmcb;
0294
0295
0296 struct vmcb **sev_vmcbs;
0297 };
0298
0299 DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
0300
0301 void recalc_intercepts(struct vcpu_svm *svm);
0302
0303 static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
0304 {
0305 return container_of(kvm, struct kvm_svm, kvm);
0306 }
0307
0308 static __always_inline bool sev_guest(struct kvm *kvm)
0309 {
0310 #ifdef CONFIG_KVM_AMD_SEV
0311 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
0312
0313 return sev->active;
0314 #else
0315 return false;
0316 #endif
0317 }
0318
0319 static __always_inline bool sev_es_guest(struct kvm *kvm)
0320 {
0321 #ifdef CONFIG_KVM_AMD_SEV
0322 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
0323
0324 return sev->es_active && !WARN_ON_ONCE(!sev->active);
0325 #else
0326 return false;
0327 #endif
0328 }
0329
0330 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
0331 {
0332 vmcb->control.clean = 0;
0333 }
0334
0335 static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
0336 {
0337 vmcb->control.clean = VMCB_ALL_CLEAN_MASK
0338 & ~VMCB_ALWAYS_DIRTY_MASK;
0339 }
0340
0341 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
0342 {
0343 vmcb->control.clean &= ~(1 << bit);
0344 }
0345
0346 static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
0347 {
0348 return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
0349 }
0350
0351 static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
0352 {
0353 return container_of(vcpu, struct vcpu_svm, vcpu);
0354 }
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364 #define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR)
0365
0366 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
0367 {
0368 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
0369 __set_bit(bit, (unsigned long *)&control->intercepts);
0370 }
0371
0372 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
0373 {
0374 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
0375 __clear_bit(bit, (unsigned long *)&control->intercepts);
0376 }
0377
0378 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
0379 {
0380 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
0381 return test_bit(bit, (unsigned long *)&control->intercepts);
0382 }
0383
0384 static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
0385 {
0386 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
0387 return test_bit(bit, (unsigned long *)&control->intercepts);
0388 }
0389
0390 static inline void set_dr_intercepts(struct vcpu_svm *svm)
0391 {
0392 struct vmcb *vmcb = svm->vmcb01.ptr;
0393
0394 if (!sev_es_guest(svm->vcpu.kvm)) {
0395 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
0396 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
0397 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
0398 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
0399 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
0400 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
0401 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
0402 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
0403 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
0404 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
0405 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
0406 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
0407 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
0408 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
0409 }
0410
0411 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
0412 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
0413
0414 recalc_intercepts(svm);
0415 }
0416
0417 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
0418 {
0419 struct vmcb *vmcb = svm->vmcb01.ptr;
0420
0421 vmcb->control.intercepts[INTERCEPT_DR] = 0;
0422
0423
0424 if (sev_es_guest(svm->vcpu.kvm)) {
0425 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
0426 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
0427 }
0428
0429 recalc_intercepts(svm);
0430 }
0431
0432 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
0433 {
0434 struct vmcb *vmcb = svm->vmcb01.ptr;
0435
0436 WARN_ON_ONCE(bit >= 32);
0437 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
0438
0439 recalc_intercepts(svm);
0440 }
0441
0442 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
0443 {
0444 struct vmcb *vmcb = svm->vmcb01.ptr;
0445
0446 WARN_ON_ONCE(bit >= 32);
0447 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
0448
0449 recalc_intercepts(svm);
0450 }
0451
0452 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
0453 {
0454 struct vmcb *vmcb = svm->vmcb01.ptr;
0455
0456 vmcb_set_intercept(&vmcb->control, bit);
0457
0458 recalc_intercepts(svm);
0459 }
0460
0461 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
0462 {
0463 struct vmcb *vmcb = svm->vmcb01.ptr;
0464
0465 vmcb_clr_intercept(&vmcb->control, bit);
0466
0467 recalc_intercepts(svm);
0468 }
0469
0470 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
0471 {
0472 return vmcb_is_intercept(&svm->vmcb->control, bit);
0473 }
0474
0475 static inline bool nested_vgif_enabled(struct vcpu_svm *svm)
0476 {
0477 return svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
0478 }
0479
0480 static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm)
0481 {
0482 if (!vgif)
0483 return NULL;
0484
0485 if (is_guest_mode(&svm->vcpu) && !nested_vgif_enabled(svm))
0486 return svm->nested.vmcb02.ptr;
0487 else
0488 return svm->vmcb01.ptr;
0489 }
0490
0491 static inline void enable_gif(struct vcpu_svm *svm)
0492 {
0493 struct vmcb *vmcb = get_vgif_vmcb(svm);
0494
0495 if (vmcb)
0496 vmcb->control.int_ctl |= V_GIF_MASK;
0497 else
0498 svm->vcpu.arch.hflags |= HF_GIF_MASK;
0499 }
0500
0501 static inline void disable_gif(struct vcpu_svm *svm)
0502 {
0503 struct vmcb *vmcb = get_vgif_vmcb(svm);
0504
0505 if (vmcb)
0506 vmcb->control.int_ctl &= ~V_GIF_MASK;
0507 else
0508 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
0509 }
0510
0511 static inline bool gif_set(struct vcpu_svm *svm)
0512 {
0513 struct vmcb *vmcb = get_vgif_vmcb(svm);
0514
0515 if (vmcb)
0516 return !!(vmcb->control.int_ctl & V_GIF_MASK);
0517 else
0518 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
0519 }
0520
0521 static inline bool nested_npt_enabled(struct vcpu_svm *svm)
0522 {
0523 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
0524 }
0525
0526 static inline bool is_x2apic_msrpm_offset(u32 offset)
0527 {
0528
0529 u32 msr = offset * 16;
0530
0531 return (msr >= APIC_BASE_MSR) &&
0532 (msr < (APIC_BASE_MSR + 0x100));
0533 }
0534
0535
0536 #define MSR_INVALID 0xffffffffU
0537
0538 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
0539
0540 extern bool dump_invalid_vmcb;
0541
0542 u32 svm_msrpm_offset(u32 msr);
0543 u32 *svm_vcpu_alloc_msrpm(void);
0544 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
0545 void svm_vcpu_free_msrpm(u32 *msrpm);
0546 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
0547 void svm_update_lbrv(struct kvm_vcpu *vcpu);
0548
0549 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
0550 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
0551 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
0552 void disable_nmi_singlestep(struct vcpu_svm *svm);
0553 bool svm_smi_blocked(struct kvm_vcpu *vcpu);
0554 bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
0555 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
0556 void svm_set_gif(struct vcpu_svm *svm, bool value);
0557 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
0558 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
0559 int read, int write);
0560 void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable);
0561 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
0562 int trig_mode, int vec);
0563
0564
0565
0566 #define NESTED_EXIT_HOST 0
0567 #define NESTED_EXIT_DONE 1
0568 #define NESTED_EXIT_CONTINUE 2
0569
0570 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
0571 {
0572 struct vcpu_svm *svm = to_svm(vcpu);
0573
0574 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
0575 }
0576
0577 static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
0578 {
0579 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
0580 }
0581
0582 static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
0583 {
0584 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
0585 }
0586
0587 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
0588 {
0589 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
0590 }
0591
0592 int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
0593 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
0594 void svm_leave_nested(struct kvm_vcpu *vcpu);
0595 void svm_free_nested(struct vcpu_svm *svm);
0596 int svm_allocate_nested(struct vcpu_svm *svm);
0597 int nested_svm_vmrun(struct kvm_vcpu *vcpu);
0598 void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
0599 struct vmcb_save_area *from_save);
0600 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
0601 int nested_svm_vmexit(struct vcpu_svm *svm);
0602
0603 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
0604 {
0605 svm->vmcb->control.exit_code = exit_code;
0606 svm->vmcb->control.exit_info_1 = 0;
0607 svm->vmcb->control.exit_info_2 = 0;
0608 return nested_svm_vmexit(svm);
0609 }
0610
0611 int nested_svm_exit_handled(struct vcpu_svm *svm);
0612 int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
0613 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
0614 bool has_error_code, u32 error_code);
0615 int nested_svm_exit_special(struct vcpu_svm *svm);
0616 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
0617 void __svm_write_tsc_multiplier(u64 multiplier);
0618 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
0619 struct vmcb_control_area *control);
0620 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
0621 struct vmcb_save_area *save);
0622 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
0623 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
0624 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
0625
0626 extern struct kvm_x86_nested_ops svm_nested_ops;
0627
0628
0629
0630 bool avic_hardware_setup(struct kvm_x86_ops *ops);
0631 int avic_ga_log_notifier(u32 ga_tag);
0632 void avic_vm_destroy(struct kvm *kvm);
0633 int avic_vm_init(struct kvm *kvm);
0634 void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb);
0635 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
0636 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
0637 int avic_init_vcpu(struct vcpu_svm *svm);
0638 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
0639 void avic_vcpu_put(struct kvm_vcpu *vcpu);
0640 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
0641 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
0642 bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason);
0643 int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
0644 uint32_t guest_irq, bool set);
0645 void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
0646 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
0647 void avic_ring_doorbell(struct kvm_vcpu *vcpu);
0648 unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu);
0649 void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
0650
0651
0652
0653
0654 #define GHCB_VERSION_MAX 1ULL
0655 #define GHCB_VERSION_MIN 1ULL
0656
0657
0658 extern unsigned int max_sev_asid;
0659
0660 void sev_vm_destroy(struct kvm *kvm);
0661 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp);
0662 int sev_mem_enc_register_region(struct kvm *kvm,
0663 struct kvm_enc_region *range);
0664 int sev_mem_enc_unregister_region(struct kvm *kvm,
0665 struct kvm_enc_region *range);
0666 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd);
0667 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
0668 void sev_guest_memory_reclaimed(struct kvm *kvm);
0669
0670 void pre_sev_run(struct vcpu_svm *svm, int cpu);
0671 void __init sev_set_cpu_caps(void);
0672 void __init sev_hardware_setup(void);
0673 void sev_hardware_unsetup(void);
0674 int sev_cpu_init(struct svm_cpu_data *sd);
0675 void sev_init_vmcb(struct vcpu_svm *svm);
0676 void sev_free_vcpu(struct kvm_vcpu *vcpu);
0677 int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
0678 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
0679 void sev_es_vcpu_reset(struct vcpu_svm *svm);
0680 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
0681 void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa);
0682 void sev_es_unmap_ghcb(struct vcpu_svm *svm);
0683
0684
0685
0686 void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
0687 void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
0688
0689 #endif