0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #define DISABLE_BRANCH_PROFILING
0011
0012 #include <linux/linkage.h>
0013 #include <linux/init.h>
0014 #include <linux/mm.h>
0015 #include <linux/dma-direct.h>
0016 #include <linux/swiotlb.h>
0017 #include <linux/mem_encrypt.h>
0018 #include <linux/device.h>
0019 #include <linux/kernel.h>
0020 #include <linux/bitops.h>
0021 #include <linux/dma-mapping.h>
0022 #include <linux/virtio_config.h>
0023 #include <linux/virtio_anchor.h>
0024 #include <linux/cc_platform.h>
0025
0026 #include <asm/tlbflush.h>
0027 #include <asm/fixmap.h>
0028 #include <asm/setup.h>
0029 #include <asm/mem_encrypt.h>
0030 #include <asm/bootparam.h>
0031 #include <asm/set_memory.h>
0032 #include <asm/cacheflush.h>
0033 #include <asm/processor-flags.h>
0034 #include <asm/msr.h>
0035 #include <asm/cmdline.h>
0036 #include <asm/sev.h>
0037
0038 #include "mm_internal.h"
0039
0040
0041
0042
0043
0044
0045 u64 sme_me_mask __section(".data") = 0;
0046 u64 sev_status __section(".data") = 0;
0047 u64 sev_check_data __section(".data") = 0;
0048 EXPORT_SYMBOL(sme_me_mask);
0049
0050
0051 static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
0052
0053
0054
0055
0056
0057
0058 static inline void __init snp_memcpy(void *dst, void *src, size_t sz,
0059 unsigned long paddr, bool decrypt)
0060 {
0061 unsigned long npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
0062
0063 if (decrypt) {
0064
0065
0066
0067
0068 early_snp_set_memory_shared((unsigned long)__va(paddr), paddr, npages);
0069
0070 memcpy(dst, src, sz);
0071
0072
0073 early_snp_set_memory_private((unsigned long)__va(paddr), paddr, npages);
0074 } else {
0075
0076
0077
0078
0079 memcpy(dst, src, sz);
0080 }
0081 }
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092 static void __init __sme_early_enc_dec(resource_size_t paddr,
0093 unsigned long size, bool enc)
0094 {
0095 void *src, *dst;
0096 size_t len;
0097
0098 if (!sme_me_mask)
0099 return;
0100
0101 wbinvd();
0102
0103
0104
0105
0106
0107 while (size) {
0108 len = min_t(size_t, sizeof(sme_early_buffer), size);
0109
0110
0111
0112
0113
0114 src = enc ? early_memremap_decrypted_wp(paddr, len) :
0115 early_memremap_encrypted_wp(paddr, len);
0116
0117 dst = enc ? early_memremap_encrypted(paddr, len) :
0118 early_memremap_decrypted(paddr, len);
0119
0120
0121
0122
0123
0124
0125 BUG_ON(!src || !dst);
0126
0127
0128
0129
0130
0131 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) {
0132 snp_memcpy(sme_early_buffer, src, len, paddr, enc);
0133 snp_memcpy(dst, sme_early_buffer, len, paddr, !enc);
0134 } else {
0135 memcpy(sme_early_buffer, src, len);
0136 memcpy(dst, sme_early_buffer, len);
0137 }
0138
0139 early_memunmap(dst, len);
0140 early_memunmap(src, len);
0141
0142 paddr += len;
0143 size -= len;
0144 }
0145 }
0146
0147 void __init sme_early_encrypt(resource_size_t paddr, unsigned long size)
0148 {
0149 __sme_early_enc_dec(paddr, size, true);
0150 }
0151
0152 void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
0153 {
0154 __sme_early_enc_dec(paddr, size, false);
0155 }
0156
0157 static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
0158 bool map)
0159 {
0160 unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
0161 pmdval_t pmd_flags, pmd;
0162
0163
0164 pmd_flags = __sme_clr(early_pmd_flags);
0165
0166 do {
0167 pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
0168 __early_make_pgtable((unsigned long)vaddr, pmd);
0169
0170 vaddr += PMD_SIZE;
0171 paddr += PMD_SIZE;
0172 size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
0173 } while (size);
0174
0175 flush_tlb_local();
0176 }
0177
0178 void __init sme_unmap_bootdata(char *real_mode_data)
0179 {
0180 struct boot_params *boot_data;
0181 unsigned long cmdline_paddr;
0182
0183 if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
0184 return;
0185
0186
0187 boot_data = (struct boot_params *)real_mode_data;
0188 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
0189
0190 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false);
0191
0192 if (!cmdline_paddr)
0193 return;
0194
0195 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false);
0196 }
0197
0198 void __init sme_map_bootdata(char *real_mode_data)
0199 {
0200 struct boot_params *boot_data;
0201 unsigned long cmdline_paddr;
0202
0203 if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
0204 return;
0205
0206 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true);
0207
0208
0209 boot_data = (struct boot_params *)real_mode_data;
0210 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
0211
0212 if (!cmdline_paddr)
0213 return;
0214
0215 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
0216 }
0217
0218 void __init sev_setup_arch(void)
0219 {
0220 phys_addr_t total_mem = memblock_phys_mem_size();
0221 unsigned long size;
0222
0223 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
0224 return;
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244 size = total_mem * 6 / 100;
0245 size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
0246 swiotlb_adjust_size(size);
0247
0248
0249 virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
0250 }
0251
0252 static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot)
0253 {
0254 unsigned long pfn = 0;
0255 pgprot_t prot;
0256
0257 switch (level) {
0258 case PG_LEVEL_4K:
0259 pfn = pte_pfn(*kpte);
0260 prot = pte_pgprot(*kpte);
0261 break;
0262 case PG_LEVEL_2M:
0263 pfn = pmd_pfn(*(pmd_t *)kpte);
0264 prot = pmd_pgprot(*(pmd_t *)kpte);
0265 break;
0266 case PG_LEVEL_1G:
0267 pfn = pud_pfn(*(pud_t *)kpte);
0268 prot = pud_pgprot(*(pud_t *)kpte);
0269 break;
0270 default:
0271 WARN_ONCE(1, "Invalid level for kpte\n");
0272 return 0;
0273 }
0274
0275 if (ret_prot)
0276 *ret_prot = prot;
0277
0278 return pfn;
0279 }
0280
0281 static bool amd_enc_tlb_flush_required(bool enc)
0282 {
0283 return true;
0284 }
0285
0286 static bool amd_enc_cache_flush_required(void)
0287 {
0288 return !cpu_feature_enabled(X86_FEATURE_SME_COHERENT);
0289 }
0290
0291 static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
0292 {
0293 #ifdef CONFIG_PARAVIRT
0294 unsigned long sz = npages << PAGE_SHIFT;
0295 unsigned long vaddr_end = vaddr + sz;
0296
0297 while (vaddr < vaddr_end) {
0298 int psize, pmask, level;
0299 unsigned long pfn;
0300 pte_t *kpte;
0301
0302 kpte = lookup_address(vaddr, &level);
0303 if (!kpte || pte_none(*kpte)) {
0304 WARN_ONCE(1, "kpte lookup for vaddr\n");
0305 return;
0306 }
0307
0308 pfn = pg_level_to_pfn(level, kpte, NULL);
0309 if (!pfn)
0310 continue;
0311
0312 psize = page_level_size(level);
0313 pmask = page_level_mask(level);
0314
0315 notify_page_enc_status_changed(pfn, psize >> PAGE_SHIFT, enc);
0316
0317 vaddr = (vaddr & pmask) + psize;
0318 }
0319 #endif
0320 }
0321
0322 static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
0323 {
0324
0325
0326
0327
0328 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc)
0329 snp_set_memory_shared(vaddr, npages);
0330 }
0331
0332
0333 static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc)
0334 {
0335
0336
0337
0338
0339 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && enc)
0340 snp_set_memory_private(vaddr, npages);
0341
0342 if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
0343 enc_dec_hypercall(vaddr, npages, enc);
0344
0345 return true;
0346 }
0347
0348 static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
0349 {
0350 pgprot_t old_prot, new_prot;
0351 unsigned long pfn, pa, size;
0352 pte_t new_pte;
0353
0354 pfn = pg_level_to_pfn(level, kpte, &old_prot);
0355 if (!pfn)
0356 return;
0357
0358 new_prot = old_prot;
0359 if (enc)
0360 pgprot_val(new_prot) |= _PAGE_ENC;
0361 else
0362 pgprot_val(new_prot) &= ~_PAGE_ENC;
0363
0364
0365 if (pgprot_val(old_prot) == pgprot_val(new_prot))
0366 return;
0367
0368 pa = pfn << PAGE_SHIFT;
0369 size = page_level_size(level);
0370
0371
0372
0373
0374
0375
0376 clflush_cache_range(__va(pa), size);
0377
0378
0379 if (enc) {
0380 sme_early_encrypt(pa, size);
0381 } else {
0382 sme_early_decrypt(pa, size);
0383
0384
0385
0386
0387
0388 early_snp_set_memory_shared((unsigned long)__va(pa), pa, 1);
0389 }
0390
0391
0392 new_pte = pfn_pte(pfn, new_prot);
0393 set_pte_atomic(kpte, new_pte);
0394
0395
0396
0397
0398
0399 if (enc)
0400 early_snp_set_memory_private((unsigned long)__va(pa), pa, 1);
0401 }
0402
0403 static int __init early_set_memory_enc_dec(unsigned long vaddr,
0404 unsigned long size, bool enc)
0405 {
0406 unsigned long vaddr_end, vaddr_next, start;
0407 unsigned long psize, pmask;
0408 int split_page_size_mask;
0409 int level, ret;
0410 pte_t *kpte;
0411
0412 start = vaddr;
0413 vaddr_next = vaddr;
0414 vaddr_end = vaddr + size;
0415
0416 for (; vaddr < vaddr_end; vaddr = vaddr_next) {
0417 kpte = lookup_address(vaddr, &level);
0418 if (!kpte || pte_none(*kpte)) {
0419 ret = 1;
0420 goto out;
0421 }
0422
0423 if (level == PG_LEVEL_4K) {
0424 __set_clr_pte_enc(kpte, level, enc);
0425 vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE;
0426 continue;
0427 }
0428
0429 psize = page_level_size(level);
0430 pmask = page_level_mask(level);
0431
0432
0433
0434
0435
0436
0437
0438 if (vaddr == (vaddr & pmask) &&
0439 ((vaddr_end - vaddr) >= psize)) {
0440 __set_clr_pte_enc(kpte, level, enc);
0441 vaddr_next = (vaddr & pmask) + psize;
0442 continue;
0443 }
0444
0445
0446
0447
0448
0449
0450
0451 if (level == PG_LEVEL_2M)
0452 split_page_size_mask = 0;
0453 else
0454 split_page_size_mask = 1 << PG_LEVEL_2M;
0455
0456
0457
0458
0459
0460 kernel_physical_mapping_change(__pa(vaddr & pmask),
0461 __pa((vaddr_end & pmask) + psize),
0462 split_page_size_mask);
0463 }
0464
0465 ret = 0;
0466
0467 early_set_mem_enc_dec_hypercall(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
0468 out:
0469 __flush_tlb_all();
0470 return ret;
0471 }
0472
0473 int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size)
0474 {
0475 return early_set_memory_enc_dec(vaddr, size, false);
0476 }
0477
0478 int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
0479 {
0480 return early_set_memory_enc_dec(vaddr, size, true);
0481 }
0482
0483 void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
0484 {
0485 enc_dec_hypercall(vaddr, npages, enc);
0486 }
0487
0488 void __init sme_early_init(void)
0489 {
0490 if (!sme_me_mask)
0491 return;
0492
0493 early_pmd_flags = __sme_set(early_pmd_flags);
0494
0495 __supported_pte_mask = __sme_set(__supported_pte_mask);
0496
0497
0498 add_encrypt_protection_map();
0499
0500 x86_platform.guest.enc_status_change_prepare = amd_enc_status_change_prepare;
0501 x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish;
0502 x86_platform.guest.enc_tlb_flush_required = amd_enc_tlb_flush_required;
0503 x86_platform.guest.enc_cache_flush_required = amd_enc_cache_flush_required;
0504 }
0505
0506 void __init mem_encrypt_free_decrypted_mem(void)
0507 {
0508 unsigned long vaddr, vaddr_end, npages;
0509 int r;
0510
0511 vaddr = (unsigned long)__start_bss_decrypted_unused;
0512 vaddr_end = (unsigned long)__end_bss_decrypted;
0513 npages = (vaddr_end - vaddr) >> PAGE_SHIFT;
0514
0515
0516
0517
0518
0519 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
0520 r = set_memory_encrypted(vaddr, npages);
0521 if (r) {
0522 pr_warn("failed to free unused decrypted pages\n");
0523 return;
0524 }
0525 }
0526
0527 free_init_pages("unused decrypted", vaddr, vaddr_end);
0528 }