0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #define pr_fmt(fmt) "SEV: " fmt
0011
0012 #include <linux/sched/debug.h> /* For show_regs() */
0013 #include <linux/percpu-defs.h>
0014 #include <linux/cc_platform.h>
0015 #include <linux/printk.h>
0016 #include <linux/mm_types.h>
0017 #include <linux/set_memory.h>
0018 #include <linux/memblock.h>
0019 #include <linux/kernel.h>
0020 #include <linux/mm.h>
0021 #include <linux/cpumask.h>
0022 #include <linux/efi.h>
0023 #include <linux/platform_device.h>
0024 #include <linux/io.h>
0025
0026 #include <asm/cpu_entry_area.h>
0027 #include <asm/stacktrace.h>
0028 #include <asm/sev.h>
0029 #include <asm/insn-eval.h>
0030 #include <asm/fpu/xcr.h>
0031 #include <asm/processor.h>
0032 #include <asm/realmode.h>
0033 #include <asm/setup.h>
0034 #include <asm/traps.h>
0035 #include <asm/svm.h>
0036 #include <asm/smp.h>
0037 #include <asm/cpu.h>
0038 #include <asm/apic.h>
0039 #include <asm/cpuid.h>
0040 #include <asm/cmdline.h>
0041
0042 #define DR7_RESET_VALUE 0x400
0043
0044
0045 #define AP_INIT_CS_LIMIT 0xffff
0046 #define AP_INIT_DS_LIMIT 0xffff
0047 #define AP_INIT_LDTR_LIMIT 0xffff
0048 #define AP_INIT_GDTR_LIMIT 0xffff
0049 #define AP_INIT_IDTR_LIMIT 0xffff
0050 #define AP_INIT_TR_LIMIT 0xffff
0051 #define AP_INIT_RFLAGS_DEFAULT 0x2
0052 #define AP_INIT_DR6_DEFAULT 0xffff0ff0
0053 #define AP_INIT_GPAT_DEFAULT 0x0007040600070406ULL
0054 #define AP_INIT_XCR0_DEFAULT 0x1
0055 #define AP_INIT_X87_FTW_DEFAULT 0x5555
0056 #define AP_INIT_X87_FCW_DEFAULT 0x0040
0057 #define AP_INIT_CR0_DEFAULT 0x60000010
0058 #define AP_INIT_MXCSR_DEFAULT 0x1f80
0059
0060
0061 static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
0062
0063
0064
0065
0066
0067 static struct ghcb *boot_ghcb __section(".data");
0068
0069
0070 static u64 sev_hv_features __ro_after_init;
0071
0072
0073 struct sev_es_runtime_data {
0074 struct ghcb ghcb_page;
0075
0076
0077
0078
0079
0080
0081
0082
0083 struct ghcb backup_ghcb;
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098 bool ghcb_active;
0099 bool backup_ghcb_active;
0100
0101
0102
0103
0104
0105
0106 unsigned long dr7;
0107 };
0108
0109 struct ghcb_state {
0110 struct ghcb *ghcb;
0111 };
0112
0113 static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
0114 DEFINE_STATIC_KEY_FALSE(sev_es_enable_key);
0115
0116 static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
0117
0118 struct sev_config {
0119 __u64 debug : 1,
0120 __reserved : 63;
0121 };
0122
0123 static struct sev_config sev_cfg __read_mostly;
0124
0125 static __always_inline bool on_vc_stack(struct pt_regs *regs)
0126 {
0127 unsigned long sp = regs->sp;
0128
0129
0130 if (user_mode(regs))
0131 return false;
0132
0133
0134 if (ip_within_syscall_gap(regs))
0135 return false;
0136
0137 return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
0138 }
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156 void noinstr __sev_es_ist_enter(struct pt_regs *regs)
0157 {
0158 unsigned long old_ist, new_ist;
0159
0160
0161 new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
0162
0163
0164
0165
0166
0167
0168 if (on_vc_stack(regs))
0169 new_ist = regs->sp;
0170
0171
0172
0173
0174
0175 new_ist -= sizeof(old_ist);
0176 *(unsigned long *)new_ist = old_ist;
0177
0178
0179 this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
0180 }
0181
0182 void noinstr __sev_es_ist_exit(void)
0183 {
0184 unsigned long ist;
0185
0186
0187 ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
0188
0189 if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
0190 return;
0191
0192
0193 this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
0194 }
0195
0196
0197
0198
0199
0200
0201
0202 static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
0203 {
0204 struct sev_es_runtime_data *data;
0205 struct ghcb *ghcb;
0206
0207 WARN_ON(!irqs_disabled());
0208
0209 data = this_cpu_read(runtime_data);
0210 ghcb = &data->ghcb_page;
0211
0212 if (unlikely(data->ghcb_active)) {
0213
0214
0215 if (unlikely(data->backup_ghcb_active)) {
0216
0217
0218
0219
0220
0221
0222 data->ghcb_active = false;
0223 data->backup_ghcb_active = false;
0224
0225 instrumentation_begin();
0226 panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
0227 instrumentation_end();
0228 }
0229
0230
0231 data->backup_ghcb_active = true;
0232
0233 state->ghcb = &data->backup_ghcb;
0234
0235
0236 *state->ghcb = *ghcb;
0237 } else {
0238 state->ghcb = NULL;
0239 data->ghcb_active = true;
0240 }
0241
0242 return ghcb;
0243 }
0244
0245 static inline u64 sev_es_rd_ghcb_msr(void)
0246 {
0247 return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
0248 }
0249
0250 static __always_inline void sev_es_wr_ghcb_msr(u64 val)
0251 {
0252 u32 low, high;
0253
0254 low = (u32)(val);
0255 high = (u32)(val >> 32);
0256
0257 native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
0258 }
0259
0260 static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
0261 unsigned char *buffer)
0262 {
0263 return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
0264 }
0265
0266 static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
0267 {
0268 char buffer[MAX_INSN_SIZE];
0269 int insn_bytes;
0270
0271 insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
0272 if (insn_bytes == 0) {
0273
0274 ctxt->fi.vector = X86_TRAP_PF;
0275 ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
0276 ctxt->fi.cr2 = ctxt->regs->ip;
0277 return ES_EXCEPTION;
0278 } else if (insn_bytes == -EINVAL) {
0279
0280 ctxt->fi.vector = X86_TRAP_GP;
0281 ctxt->fi.error_code = 0;
0282 ctxt->fi.cr2 = 0;
0283 return ES_EXCEPTION;
0284 }
0285
0286 if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
0287 return ES_DECODE_FAILED;
0288
0289 if (ctxt->insn.immediate.got)
0290 return ES_OK;
0291 else
0292 return ES_DECODE_FAILED;
0293 }
0294
0295 static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
0296 {
0297 char buffer[MAX_INSN_SIZE];
0298 int res, ret;
0299
0300 res = vc_fetch_insn_kernel(ctxt, buffer);
0301 if (res) {
0302 ctxt->fi.vector = X86_TRAP_PF;
0303 ctxt->fi.error_code = X86_PF_INSTR;
0304 ctxt->fi.cr2 = ctxt->regs->ip;
0305 return ES_EXCEPTION;
0306 }
0307
0308 ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
0309 if (ret < 0)
0310 return ES_DECODE_FAILED;
0311 else
0312 return ES_OK;
0313 }
0314
0315 static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
0316 {
0317 if (user_mode(ctxt->regs))
0318 return __vc_decode_user_insn(ctxt);
0319 else
0320 return __vc_decode_kern_insn(ctxt);
0321 }
0322
0323 static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
0324 char *dst, char *buf, size_t size)
0325 {
0326 unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347 switch (size) {
0348 case 1: {
0349 u8 d1;
0350 u8 __user *target = (u8 __user *)dst;
0351
0352 memcpy(&d1, buf, 1);
0353 if (__put_user(d1, target))
0354 goto fault;
0355 break;
0356 }
0357 case 2: {
0358 u16 d2;
0359 u16 __user *target = (u16 __user *)dst;
0360
0361 memcpy(&d2, buf, 2);
0362 if (__put_user(d2, target))
0363 goto fault;
0364 break;
0365 }
0366 case 4: {
0367 u32 d4;
0368 u32 __user *target = (u32 __user *)dst;
0369
0370 memcpy(&d4, buf, 4);
0371 if (__put_user(d4, target))
0372 goto fault;
0373 break;
0374 }
0375 case 8: {
0376 u64 d8;
0377 u64 __user *target = (u64 __user *)dst;
0378
0379 memcpy(&d8, buf, 8);
0380 if (__put_user(d8, target))
0381 goto fault;
0382 break;
0383 }
0384 default:
0385 WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
0386 return ES_UNSUPPORTED;
0387 }
0388
0389 return ES_OK;
0390
0391 fault:
0392 if (user_mode(ctxt->regs))
0393 error_code |= X86_PF_USER;
0394
0395 ctxt->fi.vector = X86_TRAP_PF;
0396 ctxt->fi.error_code = error_code;
0397 ctxt->fi.cr2 = (unsigned long)dst;
0398
0399 return ES_EXCEPTION;
0400 }
0401
0402 static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
0403 char *src, char *buf, size_t size)
0404 {
0405 unsigned long error_code = X86_PF_PROT;
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426 switch (size) {
0427 case 1: {
0428 u8 d1;
0429 u8 __user *s = (u8 __user *)src;
0430
0431 if (__get_user(d1, s))
0432 goto fault;
0433 memcpy(buf, &d1, 1);
0434 break;
0435 }
0436 case 2: {
0437 u16 d2;
0438 u16 __user *s = (u16 __user *)src;
0439
0440 if (__get_user(d2, s))
0441 goto fault;
0442 memcpy(buf, &d2, 2);
0443 break;
0444 }
0445 case 4: {
0446 u32 d4;
0447 u32 __user *s = (u32 __user *)src;
0448
0449 if (__get_user(d4, s))
0450 goto fault;
0451 memcpy(buf, &d4, 4);
0452 break;
0453 }
0454 case 8: {
0455 u64 d8;
0456 u64 __user *s = (u64 __user *)src;
0457 if (__get_user(d8, s))
0458 goto fault;
0459 memcpy(buf, &d8, 8);
0460 break;
0461 }
0462 default:
0463 WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
0464 return ES_UNSUPPORTED;
0465 }
0466
0467 return ES_OK;
0468
0469 fault:
0470 if (user_mode(ctxt->regs))
0471 error_code |= X86_PF_USER;
0472
0473 ctxt->fi.vector = X86_TRAP_PF;
0474 ctxt->fi.error_code = error_code;
0475 ctxt->fi.cr2 = (unsigned long)src;
0476
0477 return ES_EXCEPTION;
0478 }
0479
0480 static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
0481 unsigned long vaddr, phys_addr_t *paddr)
0482 {
0483 unsigned long va = (unsigned long)vaddr;
0484 unsigned int level;
0485 phys_addr_t pa;
0486 pgd_t *pgd;
0487 pte_t *pte;
0488
0489 pgd = __va(read_cr3_pa());
0490 pgd = &pgd[pgd_index(va)];
0491 pte = lookup_address_in_pgd(pgd, va, &level);
0492 if (!pte) {
0493 ctxt->fi.vector = X86_TRAP_PF;
0494 ctxt->fi.cr2 = vaddr;
0495 ctxt->fi.error_code = 0;
0496
0497 if (user_mode(ctxt->regs))
0498 ctxt->fi.error_code |= X86_PF_USER;
0499
0500 return ES_EXCEPTION;
0501 }
0502
0503 if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
0504
0505 return ES_UNSUPPORTED;
0506
0507 pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
0508 pa |= va & ~page_level_mask(level);
0509
0510 *paddr = pa;
0511
0512 return ES_OK;
0513 }
0514
0515
0516 #include "sev-shared.c"
0517
0518 static noinstr void __sev_put_ghcb(struct ghcb_state *state)
0519 {
0520 struct sev_es_runtime_data *data;
0521 struct ghcb *ghcb;
0522
0523 WARN_ON(!irqs_disabled());
0524
0525 data = this_cpu_read(runtime_data);
0526 ghcb = &data->ghcb_page;
0527
0528 if (state->ghcb) {
0529
0530 *ghcb = *state->ghcb;
0531 data->backup_ghcb_active = false;
0532 state->ghcb = NULL;
0533 } else {
0534
0535
0536
0537
0538 vc_ghcb_invalidate(ghcb);
0539 data->ghcb_active = false;
0540 }
0541 }
0542
0543 void noinstr __sev_es_nmi_complete(void)
0544 {
0545 struct ghcb_state state;
0546 struct ghcb *ghcb;
0547
0548 ghcb = __sev_get_ghcb(&state);
0549
0550 vc_ghcb_invalidate(ghcb);
0551 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
0552 ghcb_set_sw_exit_info_1(ghcb, 0);
0553 ghcb_set_sw_exit_info_2(ghcb, 0);
0554
0555 sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
0556 VMGEXIT();
0557
0558 __sev_put_ghcb(&state);
0559 }
0560
0561 static u64 __init get_secrets_page(void)
0562 {
0563 u64 pa_data = boot_params.cc_blob_address;
0564 struct cc_blob_sev_info info;
0565 void *map;
0566
0567
0568
0569
0570
0571 if (!pa_data)
0572 return 0;
0573
0574 map = early_memremap(pa_data, sizeof(info));
0575 if (!map) {
0576 pr_err("Unable to locate SNP secrets page: failed to map the Confidential Computing blob.\n");
0577 return 0;
0578 }
0579 memcpy(&info, map, sizeof(info));
0580 early_memunmap(map, sizeof(info));
0581
0582
0583 if (!info.secrets_phys || info.secrets_len != PAGE_SIZE)
0584 return 0;
0585
0586 return info.secrets_phys;
0587 }
0588
0589 static u64 __init get_snp_jump_table_addr(void)
0590 {
0591 struct snp_secrets_page_layout *layout;
0592 void __iomem *mem;
0593 u64 pa, addr;
0594
0595 pa = get_secrets_page();
0596 if (!pa)
0597 return 0;
0598
0599 mem = ioremap_encrypted(pa, PAGE_SIZE);
0600 if (!mem) {
0601 pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n");
0602 return 0;
0603 }
0604
0605 layout = (__force struct snp_secrets_page_layout *)mem;
0606
0607 addr = layout->os_area.ap_jump_table_pa;
0608 iounmap(mem);
0609
0610 return addr;
0611 }
0612
0613 static u64 __init get_jump_table_addr(void)
0614 {
0615 struct ghcb_state state;
0616 unsigned long flags;
0617 struct ghcb *ghcb;
0618 u64 ret = 0;
0619
0620 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
0621 return get_snp_jump_table_addr();
0622
0623 local_irq_save(flags);
0624
0625 ghcb = __sev_get_ghcb(&state);
0626
0627 vc_ghcb_invalidate(ghcb);
0628 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
0629 ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
0630 ghcb_set_sw_exit_info_2(ghcb, 0);
0631
0632 sev_es_wr_ghcb_msr(__pa(ghcb));
0633 VMGEXIT();
0634
0635 if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
0636 ghcb_sw_exit_info_2_is_valid(ghcb))
0637 ret = ghcb->save.sw_exit_info_2;
0638
0639 __sev_put_ghcb(&state);
0640
0641 local_irq_restore(flags);
0642
0643 return ret;
0644 }
0645
0646 static void pvalidate_pages(unsigned long vaddr, unsigned int npages, bool validate)
0647 {
0648 unsigned long vaddr_end;
0649 int rc;
0650
0651 vaddr = vaddr & PAGE_MASK;
0652 vaddr_end = vaddr + (npages << PAGE_SHIFT);
0653
0654 while (vaddr < vaddr_end) {
0655 rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
0656 if (WARN(rc, "Failed to validate address 0x%lx ret %d", vaddr, rc))
0657 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
0658
0659 vaddr = vaddr + PAGE_SIZE;
0660 }
0661 }
0662
0663 static void __init early_set_pages_state(unsigned long paddr, unsigned int npages, enum psc_op op)
0664 {
0665 unsigned long paddr_end;
0666 u64 val;
0667
0668 paddr = paddr & PAGE_MASK;
0669 paddr_end = paddr + (npages << PAGE_SHIFT);
0670
0671 while (paddr < paddr_end) {
0672
0673
0674
0675
0676 sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
0677 VMGEXIT();
0678
0679 val = sev_es_rd_ghcb_msr();
0680
0681 if (WARN(GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP,
0682 "Wrong PSC response code: 0x%x\n",
0683 (unsigned int)GHCB_RESP_CODE(val)))
0684 goto e_term;
0685
0686 if (WARN(GHCB_MSR_PSC_RESP_VAL(val),
0687 "Failed to change page state to '%s' paddr 0x%lx error 0x%llx\n",
0688 op == SNP_PAGE_STATE_PRIVATE ? "private" : "shared",
0689 paddr, GHCB_MSR_PSC_RESP_VAL(val)))
0690 goto e_term;
0691
0692 paddr = paddr + PAGE_SIZE;
0693 }
0694
0695 return;
0696
0697 e_term:
0698 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
0699 }
0700
0701 void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
0702 unsigned int npages)
0703 {
0704
0705
0706
0707
0708
0709
0710 if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
0711 return;
0712
0713
0714
0715
0716
0717 early_set_pages_state(paddr, npages, SNP_PAGE_STATE_PRIVATE);
0718
0719
0720 pvalidate_pages(vaddr, npages, true);
0721 }
0722
0723 void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
0724 unsigned int npages)
0725 {
0726
0727
0728
0729
0730
0731
0732 if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
0733 return;
0734
0735
0736 pvalidate_pages(vaddr, npages, false);
0737
0738
0739 early_set_pages_state(paddr, npages, SNP_PAGE_STATE_SHARED);
0740 }
0741
0742 void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op)
0743 {
0744 unsigned long vaddr, npages;
0745
0746 vaddr = (unsigned long)__va(paddr);
0747 npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
0748
0749 if (op == SNP_PAGE_STATE_PRIVATE)
0750 early_snp_set_memory_private(vaddr, paddr, npages);
0751 else if (op == SNP_PAGE_STATE_SHARED)
0752 early_snp_set_memory_shared(vaddr, paddr, npages);
0753 else
0754 WARN(1, "invalid memory op %d\n", op);
0755 }
0756
0757 static int vmgexit_psc(struct snp_psc_desc *desc)
0758 {
0759 int cur_entry, end_entry, ret = 0;
0760 struct snp_psc_desc *data;
0761 struct ghcb_state state;
0762 struct es_em_ctxt ctxt;
0763 unsigned long flags;
0764 struct ghcb *ghcb;
0765
0766
0767
0768
0769
0770 local_irq_save(flags);
0771
0772 ghcb = __sev_get_ghcb(&state);
0773 if (!ghcb) {
0774 ret = 1;
0775 goto out_unlock;
0776 }
0777
0778
0779 data = (struct snp_psc_desc *)ghcb->shared_buffer;
0780 memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794 cur_entry = data->hdr.cur_entry;
0795 end_entry = data->hdr.end_entry;
0796
0797 while (data->hdr.cur_entry <= data->hdr.end_entry) {
0798 ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
0799
0800
0801 ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
0802
0803
0804
0805
0806
0807 if (WARN(ret || ghcb->save.sw_exit_info_2,
0808 "SNP: PSC failed ret=%d exit_info_2=%llx\n",
0809 ret, ghcb->save.sw_exit_info_2)) {
0810 ret = 1;
0811 goto out;
0812 }
0813
0814
0815 if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
0816 ret = 1;
0817 goto out;
0818 }
0819
0820
0821
0822
0823
0824 if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
0825 "SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
0826 end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
0827 ret = 1;
0828 goto out;
0829 }
0830 }
0831
0832 out:
0833 __sev_put_ghcb(&state);
0834
0835 out_unlock:
0836 local_irq_restore(flags);
0837
0838 return ret;
0839 }
0840
0841 static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
0842 unsigned long vaddr_end, int op)
0843 {
0844 struct psc_hdr *hdr;
0845 struct psc_entry *e;
0846 unsigned long pfn;
0847 int i;
0848
0849 hdr = &data->hdr;
0850 e = data->entries;
0851
0852 memset(data, 0, sizeof(*data));
0853 i = 0;
0854
0855 while (vaddr < vaddr_end) {
0856 if (is_vmalloc_addr((void *)vaddr))
0857 pfn = vmalloc_to_pfn((void *)vaddr);
0858 else
0859 pfn = __pa(vaddr) >> PAGE_SHIFT;
0860
0861 e->gfn = pfn;
0862 e->operation = op;
0863 hdr->end_entry = i;
0864
0865
0866
0867
0868
0869 e->pagesize = RMP_PG_SIZE_4K;
0870
0871 vaddr = vaddr + PAGE_SIZE;
0872 e++;
0873 i++;
0874 }
0875
0876 if (vmgexit_psc(data))
0877 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
0878 }
0879
0880 static void set_pages_state(unsigned long vaddr, unsigned int npages, int op)
0881 {
0882 unsigned long vaddr_end, next_vaddr;
0883 struct snp_psc_desc *desc;
0884
0885 desc = kmalloc(sizeof(*desc), GFP_KERNEL_ACCOUNT);
0886 if (!desc)
0887 panic("SNP: failed to allocate memory for PSC descriptor\n");
0888
0889 vaddr = vaddr & PAGE_MASK;
0890 vaddr_end = vaddr + (npages << PAGE_SHIFT);
0891
0892 while (vaddr < vaddr_end) {
0893
0894 next_vaddr = min_t(unsigned long, vaddr_end,
0895 (VMGEXIT_PSC_MAX_ENTRY * PAGE_SIZE) + vaddr);
0896
0897 __set_pages_state(desc, vaddr, next_vaddr, op);
0898
0899 vaddr = next_vaddr;
0900 }
0901
0902 kfree(desc);
0903 }
0904
0905 void snp_set_memory_shared(unsigned long vaddr, unsigned int npages)
0906 {
0907 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
0908 return;
0909
0910 pvalidate_pages(vaddr, npages, false);
0911
0912 set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
0913 }
0914
0915 void snp_set_memory_private(unsigned long vaddr, unsigned int npages)
0916 {
0917 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
0918 return;
0919
0920 set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
0921
0922 pvalidate_pages(vaddr, npages, true);
0923 }
0924
0925 static int snp_set_vmsa(void *va, bool vmsa)
0926 {
0927 u64 attrs;
0928
0929
0930
0931
0932
0933
0934
0935
0936 attrs = 1;
0937 if (vmsa)
0938 attrs |= RMPADJUST_VMSA_PAGE_BIT;
0939
0940 return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
0941 }
0942
0943 #define __ATTR_BASE (SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
0944 #define INIT_CS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK)
0945 #define INIT_DS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_WRITE_MASK)
0946
0947 #define INIT_LDTR_ATTRIBS (SVM_SELECTOR_P_MASK | 2)
0948 #define INIT_TR_ATTRIBS (SVM_SELECTOR_P_MASK | 3)
0949
0950 static void *snp_alloc_vmsa_page(void)
0951 {
0952 struct page *p;
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962 p = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1);
0963 if (!p)
0964 return NULL;
0965
0966 split_page(p, 1);
0967
0968
0969 __free_page(p);
0970
0971 return page_address(p + 1);
0972 }
0973
0974 static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
0975 {
0976 int err;
0977
0978 err = snp_set_vmsa(vmsa, false);
0979 if (err)
0980 pr_err("clear VMSA page failed (%u), leaking page\n", err);
0981 else
0982 free_page((unsigned long)vmsa);
0983 }
0984
0985 static int wakeup_cpu_via_vmgexit(int apic_id, unsigned long start_ip)
0986 {
0987 struct sev_es_save_area *cur_vmsa, *vmsa;
0988 struct ghcb_state state;
0989 unsigned long flags;
0990 struct ghcb *ghcb;
0991 u8 sipi_vector;
0992 int cpu, ret;
0993 u64 cr4;
0994
0995
0996
0997
0998
0999 if (!(sev_hv_features & GHCB_HV_FT_SNP_AP_CREATION))
1000 return -EOPNOTSUPP;
1001
1002
1003
1004
1005
1006
1007 if (WARN_ONCE(start_ip != real_mode_header->trampoline_start,
1008 "Unsupported SNP start_ip: %lx\n", start_ip))
1009 return -EINVAL;
1010
1011
1012 start_ip = real_mode_header->sev_es_trampoline_start;
1013
1014
1015 for_each_present_cpu(cpu) {
1016 if (arch_match_cpu_phys_id(cpu, apic_id))
1017 break;
1018 }
1019 if (cpu >= nr_cpu_ids)
1020 return -EINVAL;
1021
1022 cur_vmsa = per_cpu(sev_vmsa, cpu);
1023
1024
1025
1026
1027
1028
1029
1030
1031 vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page();
1032 if (!vmsa)
1033 return -ENOMEM;
1034
1035
1036 cr4 = native_read_cr4() & X86_CR4_MCE;
1037
1038
1039 sipi_vector = (start_ip >> 12);
1040 vmsa->cs.base = sipi_vector << 12;
1041 vmsa->cs.limit = AP_INIT_CS_LIMIT;
1042 vmsa->cs.attrib = INIT_CS_ATTRIBS;
1043 vmsa->cs.selector = sipi_vector << 8;
1044
1045
1046 vmsa->rip = start_ip & 0xfff;
1047
1048
1049 vmsa->ds.limit = AP_INIT_DS_LIMIT;
1050 vmsa->ds.attrib = INIT_DS_ATTRIBS;
1051 vmsa->es = vmsa->ds;
1052 vmsa->fs = vmsa->ds;
1053 vmsa->gs = vmsa->ds;
1054 vmsa->ss = vmsa->ds;
1055
1056 vmsa->gdtr.limit = AP_INIT_GDTR_LIMIT;
1057 vmsa->ldtr.limit = AP_INIT_LDTR_LIMIT;
1058 vmsa->ldtr.attrib = INIT_LDTR_ATTRIBS;
1059 vmsa->idtr.limit = AP_INIT_IDTR_LIMIT;
1060 vmsa->tr.limit = AP_INIT_TR_LIMIT;
1061 vmsa->tr.attrib = INIT_TR_ATTRIBS;
1062
1063 vmsa->cr4 = cr4;
1064 vmsa->cr0 = AP_INIT_CR0_DEFAULT;
1065 vmsa->dr7 = DR7_RESET_VALUE;
1066 vmsa->dr6 = AP_INIT_DR6_DEFAULT;
1067 vmsa->rflags = AP_INIT_RFLAGS_DEFAULT;
1068 vmsa->g_pat = AP_INIT_GPAT_DEFAULT;
1069 vmsa->xcr0 = AP_INIT_XCR0_DEFAULT;
1070 vmsa->mxcsr = AP_INIT_MXCSR_DEFAULT;
1071 vmsa->x87_ftw = AP_INIT_X87_FTW_DEFAULT;
1072 vmsa->x87_fcw = AP_INIT_X87_FCW_DEFAULT;
1073
1074
1075 vmsa->efer = EFER_SVME;
1076
1077
1078
1079
1080
1081
1082 vmsa->vmpl = 0;
1083 vmsa->sev_features = sev_status >> 2;
1084
1085
1086 ret = snp_set_vmsa(vmsa, true);
1087 if (ret) {
1088 pr_err("set VMSA page failed (%u)\n", ret);
1089 free_page((unsigned long)vmsa);
1090
1091 return -EINVAL;
1092 }
1093
1094
1095 local_irq_save(flags);
1096
1097 ghcb = __sev_get_ghcb(&state);
1098
1099 vc_ghcb_invalidate(ghcb);
1100 ghcb_set_rax(ghcb, vmsa->sev_features);
1101 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
1102 ghcb_set_sw_exit_info_1(ghcb, ((u64)apic_id << 32) | SVM_VMGEXIT_AP_CREATE);
1103 ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
1104
1105 sev_es_wr_ghcb_msr(__pa(ghcb));
1106 VMGEXIT();
1107
1108 if (!ghcb_sw_exit_info_1_is_valid(ghcb) ||
1109 lower_32_bits(ghcb->save.sw_exit_info_1)) {
1110 pr_err("SNP AP Creation error\n");
1111 ret = -EINVAL;
1112 }
1113
1114 __sev_put_ghcb(&state);
1115
1116 local_irq_restore(flags);
1117
1118
1119 if (ret) {
1120 snp_cleanup_vmsa(vmsa);
1121 vmsa = NULL;
1122 }
1123
1124
1125 if (cur_vmsa)
1126 snp_cleanup_vmsa(cur_vmsa);
1127
1128
1129 per_cpu(sev_vmsa, cpu) = vmsa;
1130
1131 return ret;
1132 }
1133
1134 void snp_set_wakeup_secondary_cpu(void)
1135 {
1136 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1137 return;
1138
1139
1140
1141
1142
1143
1144 apic->wakeup_secondary_cpu = wakeup_cpu_via_vmgexit;
1145 }
1146
1147 int __init sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
1148 {
1149 u16 startup_cs, startup_ip;
1150 phys_addr_t jump_table_pa;
1151 u64 jump_table_addr;
1152 u16 __iomem *jump_table;
1153
1154 jump_table_addr = get_jump_table_addr();
1155
1156
1157 if (!jump_table_addr)
1158 return 0;
1159
1160
1161 if (jump_table_addr & ~PAGE_MASK)
1162 return -EINVAL;
1163
1164 jump_table_pa = jump_table_addr & PAGE_MASK;
1165
1166 startup_cs = (u16)(rmh->trampoline_start >> 4);
1167 startup_ip = (u16)(rmh->sev_es_trampoline_start -
1168 rmh->trampoline_start);
1169
1170 jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE);
1171 if (!jump_table)
1172 return -EIO;
1173
1174 writew(startup_ip, &jump_table[0]);
1175 writew(startup_cs, &jump_table[1]);
1176
1177 iounmap(jump_table);
1178
1179 return 0;
1180 }
1181
1182
1183
1184
1185
1186
1187 int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
1188 {
1189 struct sev_es_runtime_data *data;
1190 unsigned long address, pflags;
1191 int cpu;
1192 u64 pfn;
1193
1194 if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1195 return 0;
1196
1197 pflags = _PAGE_NX | _PAGE_RW;
1198
1199 for_each_possible_cpu(cpu) {
1200 data = per_cpu(runtime_data, cpu);
1201
1202 address = __pa(&data->ghcb_page);
1203 pfn = address >> PAGE_SHIFT;
1204
1205 if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags))
1206 return 1;
1207 }
1208
1209 return 0;
1210 }
1211
1212 static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1213 {
1214 struct pt_regs *regs = ctxt->regs;
1215 enum es_result ret;
1216 u64 exit_info_1;
1217
1218
1219 exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0;
1220
1221 ghcb_set_rcx(ghcb, regs->cx);
1222 if (exit_info_1) {
1223 ghcb_set_rax(ghcb, regs->ax);
1224 ghcb_set_rdx(ghcb, regs->dx);
1225 }
1226
1227 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0);
1228
1229 if ((ret == ES_OK) && (!exit_info_1)) {
1230 regs->ax = ghcb->save.rax;
1231 regs->dx = ghcb->save.rdx;
1232 }
1233
1234 return ret;
1235 }
1236
1237 static void snp_register_per_cpu_ghcb(void)
1238 {
1239 struct sev_es_runtime_data *data;
1240 struct ghcb *ghcb;
1241
1242 data = this_cpu_read(runtime_data);
1243 ghcb = &data->ghcb_page;
1244
1245 snp_register_ghcb_early(__pa(ghcb));
1246 }
1247
1248 void setup_ghcb(void)
1249 {
1250 if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1251 return;
1252
1253
1254 if (!sev_es_negotiate_protocol())
1255 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1256
1257
1258
1259
1260
1261
1262
1263
1264 if (initial_vc_handler == (unsigned long)kernel_exc_vmm_communication) {
1265 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1266 snp_register_per_cpu_ghcb();
1267
1268 return;
1269 }
1270
1271
1272
1273
1274
1275 memset(&boot_ghcb_page, 0, PAGE_SIZE);
1276
1277
1278 boot_ghcb = &boot_ghcb_page;
1279
1280
1281 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1282 snp_register_ghcb_early(__pa(&boot_ghcb_page));
1283 }
1284
1285 #ifdef CONFIG_HOTPLUG_CPU
1286 static void sev_es_ap_hlt_loop(void)
1287 {
1288 struct ghcb_state state;
1289 struct ghcb *ghcb;
1290
1291 ghcb = __sev_get_ghcb(&state);
1292
1293 while (true) {
1294 vc_ghcb_invalidate(ghcb);
1295 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP);
1296 ghcb_set_sw_exit_info_1(ghcb, 0);
1297 ghcb_set_sw_exit_info_2(ghcb, 0);
1298
1299 sev_es_wr_ghcb_msr(__pa(ghcb));
1300 VMGEXIT();
1301
1302
1303 if (ghcb_sw_exit_info_2_is_valid(ghcb) &&
1304 ghcb->save.sw_exit_info_2)
1305 break;
1306 }
1307
1308 __sev_put_ghcb(&state);
1309 }
1310
1311
1312
1313
1314
1315
1316
1317 static void sev_es_play_dead(void)
1318 {
1319 play_dead_common();
1320
1321
1322
1323 sev_es_ap_hlt_loop();
1324
1325
1326
1327
1328
1329 start_cpu0();
1330 }
1331 #else
1332 #define sev_es_play_dead native_play_dead
1333 #endif
1334
1335 #ifdef CONFIG_SMP
1336 static void __init sev_es_setup_play_dead(void)
1337 {
1338 smp_ops.play_dead = sev_es_play_dead;
1339 }
1340 #else
1341 static inline void sev_es_setup_play_dead(void) { }
1342 #endif
1343
1344 static void __init alloc_runtime_data(int cpu)
1345 {
1346 struct sev_es_runtime_data *data;
1347
1348 data = memblock_alloc(sizeof(*data), PAGE_SIZE);
1349 if (!data)
1350 panic("Can't allocate SEV-ES runtime data");
1351
1352 per_cpu(runtime_data, cpu) = data;
1353 }
1354
1355 static void __init init_ghcb(int cpu)
1356 {
1357 struct sev_es_runtime_data *data;
1358 int err;
1359
1360 data = per_cpu(runtime_data, cpu);
1361
1362 err = early_set_memory_decrypted((unsigned long)&data->ghcb_page,
1363 sizeof(data->ghcb_page));
1364 if (err)
1365 panic("Can't map GHCBs unencrypted");
1366
1367 memset(&data->ghcb_page, 0, sizeof(data->ghcb_page));
1368
1369 data->ghcb_active = false;
1370 data->backup_ghcb_active = false;
1371 }
1372
1373 void __init sev_es_init_vc_handling(void)
1374 {
1375 int cpu;
1376
1377 BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
1378
1379 if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1380 return;
1381
1382 if (!sev_es_check_cpu_features())
1383 panic("SEV-ES CPU Features missing");
1384
1385
1386
1387
1388
1389 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) {
1390 sev_hv_features = get_hv_features();
1391
1392 if (!(sev_hv_features & GHCB_HV_FT_SNP))
1393 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
1394 }
1395
1396
1397 static_branch_enable(&sev_es_enable_key);
1398
1399
1400 for_each_possible_cpu(cpu) {
1401 alloc_runtime_data(cpu);
1402 init_ghcb(cpu);
1403 }
1404
1405 sev_es_setup_play_dead();
1406
1407
1408 initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
1409 }
1410
1411 static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
1412 {
1413 int trapnr = ctxt->fi.vector;
1414
1415 if (trapnr == X86_TRAP_PF)
1416 native_write_cr2(ctxt->fi.cr2);
1417
1418 ctxt->regs->orig_ax = ctxt->fi.error_code;
1419 do_early_exception(ctxt->regs, trapnr);
1420 }
1421
1422 static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
1423 {
1424 long *reg_array;
1425 int offset;
1426
1427 reg_array = (long *)ctxt->regs;
1428 offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs);
1429
1430 if (offset < 0)
1431 return NULL;
1432
1433 offset /= sizeof(long);
1434
1435 return reg_array + offset;
1436 }
1437 static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
1438 unsigned int bytes, bool read)
1439 {
1440 u64 exit_code, exit_info_1, exit_info_2;
1441 unsigned long ghcb_pa = __pa(ghcb);
1442 enum es_result res;
1443 phys_addr_t paddr;
1444 void __user *ref;
1445
1446 ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs);
1447 if (ref == (void __user *)-1L)
1448 return ES_UNSUPPORTED;
1449
1450 exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
1451
1452 res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
1453 if (res != ES_OK) {
1454 if (res == ES_EXCEPTION && !read)
1455 ctxt->fi.error_code |= X86_PF_WRITE;
1456
1457 return res;
1458 }
1459
1460 exit_info_1 = paddr;
1461
1462 exit_info_2 = bytes;
1463
1464 ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
1465
1466 return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
1467 }
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488 static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
1489 unsigned int bytes)
1490 {
1491 unsigned long ds_base, es_base;
1492 unsigned char *src, *dst;
1493 unsigned char buffer[8];
1494 enum es_result ret;
1495 bool rep;
1496 int off;
1497
1498 ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS);
1499 es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
1500
1501 if (ds_base == -1L || es_base == -1L) {
1502 ctxt->fi.vector = X86_TRAP_GP;
1503 ctxt->fi.error_code = 0;
1504 return ES_EXCEPTION;
1505 }
1506
1507 src = ds_base + (unsigned char *)ctxt->regs->si;
1508 dst = es_base + (unsigned char *)ctxt->regs->di;
1509
1510 ret = vc_read_mem(ctxt, src, buffer, bytes);
1511 if (ret != ES_OK)
1512 return ret;
1513
1514 ret = vc_write_mem(ctxt, dst, buffer, bytes);
1515 if (ret != ES_OK)
1516 return ret;
1517
1518 if (ctxt->regs->flags & X86_EFLAGS_DF)
1519 off = -bytes;
1520 else
1521 off = bytes;
1522
1523 ctxt->regs->si += off;
1524 ctxt->regs->di += off;
1525
1526 rep = insn_has_rep_prefix(&ctxt->insn);
1527 if (rep)
1528 ctxt->regs->cx -= 1;
1529
1530 if (!rep || ctxt->regs->cx == 0)
1531 return ES_OK;
1532 else
1533 return ES_RETRY;
1534 }
1535
1536 static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1537 {
1538 struct insn *insn = &ctxt->insn;
1539 unsigned int bytes = 0;
1540 enum mmio_type mmio;
1541 enum es_result ret;
1542 u8 sign_byte;
1543 long *reg_data;
1544
1545 mmio = insn_decode_mmio(insn, &bytes);
1546 if (mmio == MMIO_DECODE_FAILED)
1547 return ES_DECODE_FAILED;
1548
1549 if (mmio != MMIO_WRITE_IMM && mmio != MMIO_MOVS) {
1550 reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs);
1551 if (!reg_data)
1552 return ES_DECODE_FAILED;
1553 }
1554
1555 switch (mmio) {
1556 case MMIO_WRITE:
1557 memcpy(ghcb->shared_buffer, reg_data, bytes);
1558 ret = vc_do_mmio(ghcb, ctxt, bytes, false);
1559 break;
1560 case MMIO_WRITE_IMM:
1561 memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
1562 ret = vc_do_mmio(ghcb, ctxt, bytes, false);
1563 break;
1564 case MMIO_READ:
1565 ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1566 if (ret)
1567 break;
1568
1569
1570 if (bytes == 4)
1571 *reg_data = 0;
1572
1573 memcpy(reg_data, ghcb->shared_buffer, bytes);
1574 break;
1575 case MMIO_READ_ZERO_EXTEND:
1576 ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1577 if (ret)
1578 break;
1579
1580
1581 memset(reg_data, 0, insn->opnd_bytes);
1582 memcpy(reg_data, ghcb->shared_buffer, bytes);
1583 break;
1584 case MMIO_READ_SIGN_EXTEND:
1585 ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1586 if (ret)
1587 break;
1588
1589 if (bytes == 1) {
1590 u8 *val = (u8 *)ghcb->shared_buffer;
1591
1592 sign_byte = (*val & 0x80) ? 0xff : 0x00;
1593 } else {
1594 u16 *val = (u16 *)ghcb->shared_buffer;
1595
1596 sign_byte = (*val & 0x8000) ? 0xff : 0x00;
1597 }
1598
1599
1600 memset(reg_data, sign_byte, insn->opnd_bytes);
1601 memcpy(reg_data, ghcb->shared_buffer, bytes);
1602 break;
1603 case MMIO_MOVS:
1604 ret = vc_handle_mmio_movs(ctxt, bytes);
1605 break;
1606 default:
1607 ret = ES_UNSUPPORTED;
1608 break;
1609 }
1610
1611 return ret;
1612 }
1613
1614 static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
1615 struct es_em_ctxt *ctxt)
1616 {
1617 struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1618 long val, *reg = vc_insn_get_rm(ctxt);
1619 enum es_result ret;
1620
1621 if (!reg)
1622 return ES_DECODE_FAILED;
1623
1624 val = *reg;
1625
1626
1627 if (val >> 32) {
1628 ctxt->fi.vector = X86_TRAP_GP;
1629 ctxt->fi.error_code = 0;
1630 return ES_EXCEPTION;
1631 }
1632
1633
1634 val = (val & 0xffff23ffL) | BIT(10);
1635
1636
1637 if (!data && (val & ~DR7_RESET_VALUE))
1638 return ES_UNSUPPORTED;
1639
1640
1641 ghcb_set_rax(ghcb, val);
1642 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
1643 if (ret != ES_OK)
1644 return ret;
1645
1646 if (data)
1647 data->dr7 = val;
1648
1649 return ES_OK;
1650 }
1651
1652 static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
1653 struct es_em_ctxt *ctxt)
1654 {
1655 struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1656 long *reg = vc_insn_get_rm(ctxt);
1657
1658 if (!reg)
1659 return ES_DECODE_FAILED;
1660
1661 if (data)
1662 *reg = data->dr7;
1663 else
1664 *reg = DR7_RESET_VALUE;
1665
1666 return ES_OK;
1667 }
1668
1669 static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
1670 struct es_em_ctxt *ctxt)
1671 {
1672 return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
1673 }
1674
1675 static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1676 {
1677 enum es_result ret;
1678
1679 ghcb_set_rcx(ghcb, ctxt->regs->cx);
1680
1681 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
1682 if (ret != ES_OK)
1683 return ret;
1684
1685 if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb)))
1686 return ES_VMM_ERROR;
1687
1688 ctxt->regs->ax = ghcb->save.rax;
1689 ctxt->regs->dx = ghcb->save.rdx;
1690
1691 return ES_OK;
1692 }
1693
1694 static enum es_result vc_handle_monitor(struct ghcb *ghcb,
1695 struct es_em_ctxt *ctxt)
1696 {
1697
1698
1699
1700
1701 return ES_OK;
1702 }
1703
1704 static enum es_result vc_handle_mwait(struct ghcb *ghcb,
1705 struct es_em_ctxt *ctxt)
1706 {
1707
1708 return ES_OK;
1709 }
1710
1711 static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
1712 struct es_em_ctxt *ctxt)
1713 {
1714 enum es_result ret;
1715
1716 ghcb_set_rax(ghcb, ctxt->regs->ax);
1717 ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
1718
1719 if (x86_platform.hyper.sev_es_hcall_prepare)
1720 x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
1721
1722 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
1723 if (ret != ES_OK)
1724 return ret;
1725
1726 if (!ghcb_rax_is_valid(ghcb))
1727 return ES_VMM_ERROR;
1728
1729 ctxt->regs->ax = ghcb->save.rax;
1730
1731
1732
1733
1734
1735
1736 if (x86_platform.hyper.sev_es_hcall_finish &&
1737 !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
1738 return ES_VMM_ERROR;
1739
1740 return ES_OK;
1741 }
1742
1743 static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
1744 struct es_em_ctxt *ctxt)
1745 {
1746
1747
1748
1749
1750
1751 ctxt->fi.vector = X86_TRAP_AC;
1752 ctxt->fi.error_code = 0;
1753 return ES_EXCEPTION;
1754 }
1755
1756 static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
1757 struct ghcb *ghcb,
1758 unsigned long exit_code)
1759 {
1760 enum es_result result;
1761
1762 switch (exit_code) {
1763 case SVM_EXIT_READ_DR7:
1764 result = vc_handle_dr7_read(ghcb, ctxt);
1765 break;
1766 case SVM_EXIT_WRITE_DR7:
1767 result = vc_handle_dr7_write(ghcb, ctxt);
1768 break;
1769 case SVM_EXIT_EXCP_BASE + X86_TRAP_AC:
1770 result = vc_handle_trap_ac(ghcb, ctxt);
1771 break;
1772 case SVM_EXIT_RDTSC:
1773 case SVM_EXIT_RDTSCP:
1774 result = vc_handle_rdtsc(ghcb, ctxt, exit_code);
1775 break;
1776 case SVM_EXIT_RDPMC:
1777 result = vc_handle_rdpmc(ghcb, ctxt);
1778 break;
1779 case SVM_EXIT_INVD:
1780 pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
1781 result = ES_UNSUPPORTED;
1782 break;
1783 case SVM_EXIT_CPUID:
1784 result = vc_handle_cpuid(ghcb, ctxt);
1785 break;
1786 case SVM_EXIT_IOIO:
1787 result = vc_handle_ioio(ghcb, ctxt);
1788 break;
1789 case SVM_EXIT_MSR:
1790 result = vc_handle_msr(ghcb, ctxt);
1791 break;
1792 case SVM_EXIT_VMMCALL:
1793 result = vc_handle_vmmcall(ghcb, ctxt);
1794 break;
1795 case SVM_EXIT_WBINVD:
1796 result = vc_handle_wbinvd(ghcb, ctxt);
1797 break;
1798 case SVM_EXIT_MONITOR:
1799 result = vc_handle_monitor(ghcb, ctxt);
1800 break;
1801 case SVM_EXIT_MWAIT:
1802 result = vc_handle_mwait(ghcb, ctxt);
1803 break;
1804 case SVM_EXIT_NPF:
1805 result = vc_handle_mmio(ghcb, ctxt);
1806 break;
1807 default:
1808
1809
1810
1811 result = ES_UNSUPPORTED;
1812 }
1813
1814 return result;
1815 }
1816
1817 static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
1818 {
1819 long error_code = ctxt->fi.error_code;
1820 int trapnr = ctxt->fi.vector;
1821
1822 ctxt->regs->orig_ax = ctxt->fi.error_code;
1823
1824 switch (trapnr) {
1825 case X86_TRAP_GP:
1826 exc_general_protection(ctxt->regs, error_code);
1827 break;
1828 case X86_TRAP_UD:
1829 exc_invalid_op(ctxt->regs);
1830 break;
1831 case X86_TRAP_PF:
1832 write_cr2(ctxt->fi.cr2);
1833 exc_page_fault(ctxt->regs, error_code);
1834 break;
1835 case X86_TRAP_AC:
1836 exc_alignment_check(ctxt->regs, error_code);
1837 break;
1838 default:
1839 pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
1840 BUG();
1841 }
1842 }
1843
1844 static __always_inline bool is_vc2_stack(unsigned long sp)
1845 {
1846 return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
1847 }
1848
1849 static __always_inline bool vc_from_invalid_context(struct pt_regs *regs)
1850 {
1851 unsigned long sp, prev_sp;
1852
1853 sp = (unsigned long)regs;
1854 prev_sp = regs->sp;
1855
1856
1857
1858
1859
1860
1861 return is_vc2_stack(sp) && !is_vc2_stack(prev_sp);
1862 }
1863
1864 static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
1865 {
1866 struct ghcb_state state;
1867 struct es_em_ctxt ctxt;
1868 enum es_result result;
1869 struct ghcb *ghcb;
1870 bool ret = true;
1871
1872 ghcb = __sev_get_ghcb(&state);
1873
1874 vc_ghcb_invalidate(ghcb);
1875 result = vc_init_em_ctxt(&ctxt, regs, error_code);
1876
1877 if (result == ES_OK)
1878 result = vc_handle_exitcode(&ctxt, ghcb, error_code);
1879
1880 __sev_put_ghcb(&state);
1881
1882
1883 switch (result) {
1884 case ES_OK:
1885 vc_finish_insn(&ctxt);
1886 break;
1887 case ES_UNSUPPORTED:
1888 pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
1889 error_code, regs->ip);
1890 ret = false;
1891 break;
1892 case ES_VMM_ERROR:
1893 pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
1894 error_code, regs->ip);
1895 ret = false;
1896 break;
1897 case ES_DECODE_FAILED:
1898 pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
1899 error_code, regs->ip);
1900 ret = false;
1901 break;
1902 case ES_EXCEPTION:
1903 vc_forward_exception(&ctxt);
1904 break;
1905 case ES_RETRY:
1906
1907 break;
1908 default:
1909 pr_emerg("Unknown result in %s():%d\n", __func__, result);
1910
1911
1912
1913
1914 BUG();
1915 }
1916
1917 return ret;
1918 }
1919
1920 static __always_inline bool vc_is_db(unsigned long error_code)
1921 {
1922 return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
1923 }
1924
1925
1926
1927
1928
1929 DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
1930 {
1931 irqentry_state_t irq_state;
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944 if (unlikely(vc_from_invalid_context(regs))) {
1945 instrumentation_begin();
1946 panic("Can't handle #VC exception from unsupported context\n");
1947 instrumentation_end();
1948 }
1949
1950
1951
1952
1953 if (vc_is_db(error_code)) {
1954 exc_debug(regs);
1955 return;
1956 }
1957
1958 irq_state = irqentry_nmi_enter(regs);
1959
1960 instrumentation_begin();
1961
1962 if (!vc_raw_handle_exception(regs, error_code)) {
1963
1964 show_regs(regs);
1965
1966
1967 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1968
1969
1970 panic("Returned from Terminate-Request to Hypervisor\n");
1971 }
1972
1973 instrumentation_end();
1974 irqentry_nmi_exit(regs, irq_state);
1975 }
1976
1977
1978
1979
1980
1981 DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
1982 {
1983
1984
1985
1986 if (vc_is_db(error_code)) {
1987 noist_exc_debug(regs);
1988 return;
1989 }
1990
1991 irqentry_enter_from_user_mode(regs);
1992 instrumentation_begin();
1993
1994 if (!vc_raw_handle_exception(regs, error_code)) {
1995
1996
1997
1998
1999
2000 force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
2001 }
2002
2003 instrumentation_end();
2004 irqentry_exit_to_user_mode(regs);
2005 }
2006
2007 bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
2008 {
2009 unsigned long exit_code = regs->orig_ax;
2010 struct es_em_ctxt ctxt;
2011 enum es_result result;
2012
2013 vc_ghcb_invalidate(boot_ghcb);
2014
2015 result = vc_init_em_ctxt(&ctxt, regs, exit_code);
2016 if (result == ES_OK)
2017 result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code);
2018
2019
2020 switch (result) {
2021 case ES_OK:
2022 vc_finish_insn(&ctxt);
2023 break;
2024 case ES_UNSUPPORTED:
2025 early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
2026 exit_code, regs->ip);
2027 goto fail;
2028 case ES_VMM_ERROR:
2029 early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
2030 exit_code, regs->ip);
2031 goto fail;
2032 case ES_DECODE_FAILED:
2033 early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
2034 exit_code, regs->ip);
2035 goto fail;
2036 case ES_EXCEPTION:
2037 vc_early_forward_exception(&ctxt);
2038 break;
2039 case ES_RETRY:
2040
2041 break;
2042 default:
2043 BUG();
2044 }
2045
2046 return true;
2047
2048 fail:
2049 show_regs(regs);
2050
2051 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
2052 }
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067 static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
2068 {
2069 struct cc_blob_sev_info *cc_info;
2070
2071
2072 if (bp->cc_blob_address) {
2073 cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
2074 goto found_cc_info;
2075 }
2076
2077
2078
2079
2080
2081
2082 cc_info = find_cc_blob_setup_data(bp);
2083 if (!cc_info)
2084 return NULL;
2085
2086 found_cc_info:
2087 if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
2088 snp_abort();
2089
2090 return cc_info;
2091 }
2092
2093 bool __init snp_init(struct boot_params *bp)
2094 {
2095 struct cc_blob_sev_info *cc_info;
2096
2097 if (!bp)
2098 return false;
2099
2100 cc_info = find_cc_blob(bp);
2101 if (!cc_info)
2102 return false;
2103
2104 setup_cpuid_table(cc_info);
2105
2106
2107
2108
2109
2110 bp->cc_blob_address = (u32)(unsigned long)cc_info;
2111
2112 return true;
2113 }
2114
2115 void __init __noreturn snp_abort(void)
2116 {
2117 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
2118 }
2119
2120 static void dump_cpuid_table(void)
2121 {
2122 const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
2123 int i = 0;
2124
2125 pr_info("count=%d reserved=0x%x reserved2=0x%llx\n",
2126 cpuid_table->count, cpuid_table->__reserved1, cpuid_table->__reserved2);
2127
2128 for (i = 0; i < SNP_CPUID_COUNT_MAX; i++) {
2129 const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
2130
2131 pr_info("index=%3d fn=0x%08x subfn=0x%08x: eax=0x%08x ebx=0x%08x ecx=0x%08x edx=0x%08x xcr0_in=0x%016llx xss_in=0x%016llx reserved=0x%016llx\n",
2132 i, fn->eax_in, fn->ecx_in, fn->eax, fn->ebx, fn->ecx,
2133 fn->edx, fn->xcr0_in, fn->xss_in, fn->__reserved);
2134 }
2135 }
2136
2137
2138
2139
2140
2141
2142
2143
2144 static int __init report_cpuid_table(void)
2145 {
2146 const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
2147
2148 if (!cpuid_table->count)
2149 return 0;
2150
2151 pr_info("Using SNP CPUID table, %d entries present.\n",
2152 cpuid_table->count);
2153
2154 if (sev_cfg.debug)
2155 dump_cpuid_table();
2156
2157 return 0;
2158 }
2159 arch_initcall(report_cpuid_table);
2160
2161 static int __init init_sev_config(char *str)
2162 {
2163 char *s;
2164
2165 while ((s = strsep(&str, ","))) {
2166 if (!strcmp(s, "debug")) {
2167 sev_cfg.debug = true;
2168 continue;
2169 }
2170
2171 pr_info("SEV command-line option '%s' was not recognized\n", s);
2172 }
2173
2174 return 1;
2175 }
2176 __setup("sev=", init_sev_config);
2177
2178 int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err)
2179 {
2180 struct ghcb_state state;
2181 struct es_em_ctxt ctxt;
2182 unsigned long flags;
2183 struct ghcb *ghcb;
2184 int ret;
2185
2186 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
2187 return -ENODEV;
2188
2189 if (!fw_err)
2190 return -EINVAL;
2191
2192
2193
2194
2195
2196 local_irq_save(flags);
2197
2198 ghcb = __sev_get_ghcb(&state);
2199 if (!ghcb) {
2200 ret = -EIO;
2201 goto e_restore_irq;
2202 }
2203
2204 vc_ghcb_invalidate(ghcb);
2205
2206 if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
2207 ghcb_set_rax(ghcb, input->data_gpa);
2208 ghcb_set_rbx(ghcb, input->data_npages);
2209 }
2210
2211 ret = sev_es_ghcb_hv_call(ghcb, &ctxt, exit_code, input->req_gpa, input->resp_gpa);
2212 if (ret)
2213 goto e_put;
2214
2215 if (ghcb->save.sw_exit_info_2) {
2216
2217 if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
2218 ghcb->save.sw_exit_info_2 == SNP_GUEST_REQ_INVALID_LEN)
2219 input->data_npages = ghcb_get_rbx(ghcb);
2220
2221 *fw_err = ghcb->save.sw_exit_info_2;
2222
2223 ret = -EIO;
2224 }
2225
2226 e_put:
2227 __sev_put_ghcb(&state);
2228 e_restore_irq:
2229 local_irq_restore(flags);
2230
2231 return ret;
2232 }
2233 EXPORT_SYMBOL_GPL(snp_issue_guest_request);
2234
2235 static struct platform_device sev_guest_device = {
2236 .name = "sev-guest",
2237 .id = -1,
2238 };
2239
2240 static int __init snp_init_platform_device(void)
2241 {
2242 struct sev_guest_platform_data data;
2243 u64 gpa;
2244
2245 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
2246 return -ENODEV;
2247
2248 gpa = get_secrets_page();
2249 if (!gpa)
2250 return -ENODEV;
2251
2252 data.secrets_gpa = gpa;
2253 if (platform_device_add_data(&sev_guest_device, &data, sizeof(data)))
2254 return -ENODEV;
2255
2256 if (platform_device_register(&sev_guest_device))
2257 return -ENODEV;
2258
2259 pr_info("SNP guest platform device initialized.\n");
2260 return 0;
2261 }
2262 device_initcall(snp_init_platform_device);