0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #define pr_fmt(fmt) "efi: " fmt
0020
0021 #include <linux/kernel.h>
0022 #include <linux/init.h>
0023 #include <linux/mm.h>
0024 #include <linux/types.h>
0025 #include <linux/spinlock.h>
0026 #include <linux/memblock.h>
0027 #include <linux/ioport.h>
0028 #include <linux/mc146818rtc.h>
0029 #include <linux/efi.h>
0030 #include <linux/export.h>
0031 #include <linux/uaccess.h>
0032 #include <linux/io.h>
0033 #include <linux/reboot.h>
0034 #include <linux/slab.h>
0035 #include <linux/ucs2_string.h>
0036 #include <linux/cc_platform.h>
0037 #include <linux/sched/task.h>
0038
0039 #include <asm/setup.h>
0040 #include <asm/page.h>
0041 #include <asm/e820/api.h>
0042 #include <asm/tlbflush.h>
0043 #include <asm/proto.h>
0044 #include <asm/efi.h>
0045 #include <asm/cacheflush.h>
0046 #include <asm/fixmap.h>
0047 #include <asm/realmode.h>
0048 #include <asm/time.h>
0049 #include <asm/pgalloc.h>
0050 #include <asm/sev.h>
0051
0052
0053
0054
0055
0056 static u64 efi_va = EFI_VA_START;
0057 static struct mm_struct *efi_prev_mm;
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068 int __init efi_alloc_page_tables(void)
0069 {
0070 pgd_t *pgd, *efi_pgd;
0071 p4d_t *p4d;
0072 pud_t *pud;
0073 gfp_t gfp_mask;
0074
0075 gfp_mask = GFP_KERNEL | __GFP_ZERO;
0076 efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
0077 if (!efi_pgd)
0078 goto fail;
0079
0080 pgd = efi_pgd + pgd_index(EFI_VA_END);
0081 p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
0082 if (!p4d)
0083 goto free_pgd;
0084
0085 pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
0086 if (!pud)
0087 goto free_p4d;
0088
0089 efi_mm.pgd = efi_pgd;
0090 mm_init_cpumask(&efi_mm);
0091 init_new_context(NULL, &efi_mm);
0092
0093 return 0;
0094
0095 free_p4d:
0096 if (pgtable_l5_enabled())
0097 free_page((unsigned long)pgd_page_vaddr(*pgd));
0098 free_pgd:
0099 free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
0100 fail:
0101 return -ENOMEM;
0102 }
0103
0104
0105
0106
0107 void efi_sync_low_kernel_mappings(void)
0108 {
0109 unsigned num_entries;
0110 pgd_t *pgd_k, *pgd_efi;
0111 p4d_t *p4d_k, *p4d_efi;
0112 pud_t *pud_k, *pud_efi;
0113 pgd_t *efi_pgd = efi_mm.pgd;
0114
0115 pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
0116 pgd_k = pgd_offset_k(PAGE_OFFSET);
0117
0118 num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
0119 memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
0120
0121 pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
0122 pgd_k = pgd_offset_k(EFI_VA_END);
0123 p4d_efi = p4d_offset(pgd_efi, 0);
0124 p4d_k = p4d_offset(pgd_k, 0);
0125
0126 num_entries = p4d_index(EFI_VA_END);
0127 memcpy(p4d_efi, p4d_k, sizeof(p4d_t) * num_entries);
0128
0129
0130
0131
0132
0133 BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
0134 BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
0135
0136 p4d_efi = p4d_offset(pgd_efi, EFI_VA_END);
0137 p4d_k = p4d_offset(pgd_k, EFI_VA_END);
0138 pud_efi = pud_offset(p4d_efi, 0);
0139 pud_k = pud_offset(p4d_k, 0);
0140
0141 num_entries = pud_index(EFI_VA_END);
0142 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
0143
0144 pud_efi = pud_offset(p4d_efi, EFI_VA_START);
0145 pud_k = pud_offset(p4d_k, EFI_VA_START);
0146
0147 num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
0148 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
0149 }
0150
0151
0152
0153
0154 static inline phys_addr_t
0155 virt_to_phys_or_null_size(void *va, unsigned long size)
0156 {
0157 phys_addr_t pa;
0158
0159 if (!va)
0160 return 0;
0161
0162 if (virt_addr_valid(va))
0163 return virt_to_phys(va);
0164
0165 pa = slow_virt_to_phys(va);
0166
0167
0168 if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK))
0169 return 0;
0170
0171 return pa;
0172 }
0173
0174 #define virt_to_phys_or_null(addr) \
0175 virt_to_phys_or_null_size((addr), sizeof(*(addr)))
0176
0177 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
0178 {
0179 unsigned long pfn, text, pf, rodata;
0180 struct page *page;
0181 unsigned npages;
0182 pgd_t *pgd = efi_mm.pgd;
0183
0184
0185
0186
0187
0188
0189
0190 pfn = pa_memmap >> PAGE_SHIFT;
0191 pf = _PAGE_NX | _PAGE_RW | _PAGE_ENC;
0192 if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
0193 pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
0194 return 1;
0195 }
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208 if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
0209 pr_err("Failed to create 1:1 mapping for the first page!\n");
0210 return 1;
0211 }
0212
0213
0214
0215
0216
0217 if (sev_es_efi_map_ghcbs(pgd)) {
0218 pr_err("Failed to create 1:1 mapping for the GHCBs!\n");
0219 return 1;
0220 }
0221
0222
0223
0224
0225
0226
0227
0228 if (!efi_is_mixed())
0229 return 0;
0230
0231 page = alloc_page(GFP_KERNEL|__GFP_DMA32);
0232 if (!page) {
0233 pr_err("Unable to allocate EFI runtime stack < 4GB\n");
0234 return 1;
0235 }
0236
0237 efi_mixed_mode_stack_pa = page_to_phys(page + 1);
0238
0239 npages = (_etext - _text) >> PAGE_SHIFT;
0240 text = __pa(_text);
0241 pfn = text >> PAGE_SHIFT;
0242
0243 pf = _PAGE_ENC;
0244 if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, pf)) {
0245 pr_err("Failed to map kernel text 1:1\n");
0246 return 1;
0247 }
0248
0249 npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
0250 rodata = __pa(__start_rodata);
0251 pfn = rodata >> PAGE_SHIFT;
0252
0253 pf = _PAGE_NX | _PAGE_ENC;
0254 if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
0255 pr_err("Failed to map kernel rodata 1:1\n");
0256 return 1;
0257 }
0258
0259 return 0;
0260 }
0261
0262 static void __init __map_region(efi_memory_desc_t *md, u64 va)
0263 {
0264 unsigned long flags = _PAGE_RW;
0265 unsigned long pfn;
0266 pgd_t *pgd = efi_mm.pgd;
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280 if (md->type != EFI_BOOT_SERVICES_CODE &&
0281 md->type != EFI_RUNTIME_SERVICES_CODE)
0282 flags |= _PAGE_NX;
0283
0284 if (!(md->attribute & EFI_MEMORY_WB))
0285 flags |= _PAGE_PCD;
0286
0287 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
0288 md->type != EFI_MEMORY_MAPPED_IO)
0289 flags |= _PAGE_ENC;
0290
0291 pfn = md->phys_addr >> PAGE_SHIFT;
0292 if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
0293 pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
0294 md->phys_addr, va);
0295 }
0296
0297 void __init efi_map_region(efi_memory_desc_t *md)
0298 {
0299 unsigned long size = md->num_pages << PAGE_SHIFT;
0300 u64 pa = md->phys_addr;
0301
0302
0303
0304
0305
0306
0307 __map_region(md, md->phys_addr);
0308
0309
0310
0311
0312
0313
0314 if (efi_is_mixed()) {
0315 md->virt_addr = md->phys_addr;
0316 return;
0317 }
0318
0319 efi_va -= size;
0320
0321
0322 if (!(pa & (PMD_SIZE - 1))) {
0323 efi_va &= PMD_MASK;
0324 } else {
0325 u64 pa_offset = pa & (PMD_SIZE - 1);
0326 u64 prev_va = efi_va;
0327
0328
0329 efi_va = (efi_va & PMD_MASK) + pa_offset;
0330
0331 if (efi_va > prev_va)
0332 efi_va -= PMD_SIZE;
0333 }
0334
0335 if (efi_va < EFI_VA_END) {
0336 pr_warn(FW_WARN "VA address range overflow!\n");
0337 return;
0338 }
0339
0340
0341 __map_region(md, efi_va);
0342 md->virt_addr = efi_va;
0343 }
0344
0345
0346
0347
0348
0349
0350 void __init efi_map_region_fixed(efi_memory_desc_t *md)
0351 {
0352 __map_region(md, md->phys_addr);
0353 __map_region(md, md->virt_addr);
0354 }
0355
0356 void __init parse_efi_setup(u64 phys_addr, u32 data_len)
0357 {
0358 efi_setup = phys_addr + sizeof(struct setup_data);
0359 }
0360
0361 static int __init efi_update_mappings(efi_memory_desc_t *md, unsigned long pf)
0362 {
0363 unsigned long pfn;
0364 pgd_t *pgd = efi_mm.pgd;
0365 int err1, err2;
0366
0367
0368 pfn = md->phys_addr >> PAGE_SHIFT;
0369 err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
0370 if (err1) {
0371 pr_err("Error while updating 1:1 mapping PA 0x%llx -> VA 0x%llx!\n",
0372 md->phys_addr, md->virt_addr);
0373 }
0374
0375 err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
0376 if (err2) {
0377 pr_err("Error while updating VA mapping PA 0x%llx -> VA 0x%llx!\n",
0378 md->phys_addr, md->virt_addr);
0379 }
0380
0381 return err1 || err2;
0382 }
0383
0384 static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md)
0385 {
0386 unsigned long pf = 0;
0387
0388 if (md->attribute & EFI_MEMORY_XP)
0389 pf |= _PAGE_NX;
0390
0391 if (!(md->attribute & EFI_MEMORY_RO))
0392 pf |= _PAGE_RW;
0393
0394 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
0395 pf |= _PAGE_ENC;
0396
0397 return efi_update_mappings(md, pf);
0398 }
0399
0400 void __init efi_runtime_update_mappings(void)
0401 {
0402 efi_memory_desc_t *md;
0403
0404
0405
0406
0407
0408 if (efi_enabled(EFI_MEM_ATTR)) {
0409 efi_memattr_apply_permissions(NULL, efi_update_mem_attr);
0410 return;
0411 }
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422 if (!efi_enabled(EFI_NX_PE_DATA))
0423 return;
0424
0425 for_each_efi_memory_desc(md) {
0426 unsigned long pf = 0;
0427
0428 if (!(md->attribute & EFI_MEMORY_RUNTIME))
0429 continue;
0430
0431 if (!(md->attribute & EFI_MEMORY_WB))
0432 pf |= _PAGE_PCD;
0433
0434 if ((md->attribute & EFI_MEMORY_XP) ||
0435 (md->type == EFI_RUNTIME_SERVICES_DATA))
0436 pf |= _PAGE_NX;
0437
0438 if (!(md->attribute & EFI_MEMORY_RO) &&
0439 (md->type != EFI_RUNTIME_SERVICES_CODE))
0440 pf |= _PAGE_RW;
0441
0442 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
0443 pf |= _PAGE_ENC;
0444
0445 efi_update_mappings(md, pf);
0446 }
0447 }
0448
0449 void __init efi_dump_pagetable(void)
0450 {
0451 #ifdef CONFIG_EFI_PGT_DUMP
0452 ptdump_walk_pgd_level(NULL, &efi_mm);
0453 #endif
0454 }
0455
0456
0457
0458
0459
0460
0461
0462
0463 void efi_enter_mm(void)
0464 {
0465 efi_prev_mm = current->active_mm;
0466 current->active_mm = &efi_mm;
0467 switch_mm(efi_prev_mm, &efi_mm, NULL);
0468 }
0469
0470 void efi_leave_mm(void)
0471 {
0472 current->active_mm = efi_prev_mm;
0473 switch_mm(&efi_mm, efi_prev_mm, NULL);
0474 }
0475
0476 static DEFINE_SPINLOCK(efi_runtime_lock);
0477
0478
0479
0480
0481
0482
0483 #define __efi_thunk(func, ...) \
0484 ({ \
0485 unsigned short __ds, __es; \
0486 efi_status_t ____s; \
0487 \
0488 savesegment(ds, __ds); \
0489 savesegment(es, __es); \
0490 \
0491 loadsegment(ss, __KERNEL_DS); \
0492 loadsegment(ds, __KERNEL_DS); \
0493 loadsegment(es, __KERNEL_DS); \
0494 \
0495 ____s = efi64_thunk(efi.runtime->mixed_mode.func, __VA_ARGS__); \
0496 \
0497 loadsegment(ds, __ds); \
0498 loadsegment(es, __es); \
0499 \
0500 ____s ^= (____s & BIT(31)) | (____s & BIT_ULL(31)) << 32; \
0501 ____s; \
0502 })
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512 #define efi_thunk(func...) \
0513 ({ \
0514 efi_status_t __s; \
0515 \
0516 arch_efi_call_virt_setup(); \
0517 \
0518 __s = __efi_thunk(func); \
0519 \
0520 arch_efi_call_virt_teardown(); \
0521 \
0522 __s; \
0523 })
0524
0525 static efi_status_t __init __no_sanitize_address
0526 efi_thunk_set_virtual_address_map(unsigned long memory_map_size,
0527 unsigned long descriptor_size,
0528 u32 descriptor_version,
0529 efi_memory_desc_t *virtual_map)
0530 {
0531 efi_status_t status;
0532 unsigned long flags;
0533
0534 efi_sync_low_kernel_mappings();
0535 local_irq_save(flags);
0536
0537 efi_enter_mm();
0538
0539 status = __efi_thunk(set_virtual_address_map, memory_map_size,
0540 descriptor_size, descriptor_version, virtual_map);
0541
0542 efi_leave_mm();
0543 local_irq_restore(flags);
0544
0545 return status;
0546 }
0547
0548 static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
0549 {
0550 return EFI_UNSUPPORTED;
0551 }
0552
0553 static efi_status_t efi_thunk_set_time(efi_time_t *tm)
0554 {
0555 return EFI_UNSUPPORTED;
0556 }
0557
0558 static efi_status_t
0559 efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
0560 efi_time_t *tm)
0561 {
0562 return EFI_UNSUPPORTED;
0563 }
0564
0565 static efi_status_t
0566 efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
0567 {
0568 return EFI_UNSUPPORTED;
0569 }
0570
0571 static unsigned long efi_name_size(efi_char16_t *name)
0572 {
0573 return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
0574 }
0575
0576 static efi_status_t
0577 efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
0578 u32 *attr, unsigned long *data_size, void *data)
0579 {
0580 u8 buf[24] __aligned(8);
0581 efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
0582 efi_status_t status;
0583 u32 phys_name, phys_vendor, phys_attr;
0584 u32 phys_data_size, phys_data;
0585 unsigned long flags;
0586
0587 spin_lock_irqsave(&efi_runtime_lock, flags);
0588
0589 *vnd = *vendor;
0590
0591 phys_data_size = virt_to_phys_or_null(data_size);
0592 phys_vendor = virt_to_phys_or_null(vnd);
0593 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
0594 phys_attr = virt_to_phys_or_null(attr);
0595 phys_data = virt_to_phys_or_null_size(data, *data_size);
0596
0597 if (!phys_name || (data && !phys_data))
0598 status = EFI_INVALID_PARAMETER;
0599 else
0600 status = efi_thunk(get_variable, phys_name, phys_vendor,
0601 phys_attr, phys_data_size, phys_data);
0602
0603 spin_unlock_irqrestore(&efi_runtime_lock, flags);
0604
0605 return status;
0606 }
0607
0608 static efi_status_t
0609 efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
0610 u32 attr, unsigned long data_size, void *data)
0611 {
0612 u8 buf[24] __aligned(8);
0613 efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
0614 u32 phys_name, phys_vendor, phys_data;
0615 efi_status_t status;
0616 unsigned long flags;
0617
0618 spin_lock_irqsave(&efi_runtime_lock, flags);
0619
0620 *vnd = *vendor;
0621
0622 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
0623 phys_vendor = virt_to_phys_or_null(vnd);
0624 phys_data = virt_to_phys_or_null_size(data, data_size);
0625
0626 if (!phys_name || (data && !phys_data))
0627 status = EFI_INVALID_PARAMETER;
0628 else
0629 status = efi_thunk(set_variable, phys_name, phys_vendor,
0630 attr, data_size, phys_data);
0631
0632 spin_unlock_irqrestore(&efi_runtime_lock, flags);
0633
0634 return status;
0635 }
0636
0637 static efi_status_t
0638 efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
0639 u32 attr, unsigned long data_size,
0640 void *data)
0641 {
0642 u8 buf[24] __aligned(8);
0643 efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
0644 u32 phys_name, phys_vendor, phys_data;
0645 efi_status_t status;
0646 unsigned long flags;
0647
0648 if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
0649 return EFI_NOT_READY;
0650
0651 *vnd = *vendor;
0652
0653 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
0654 phys_vendor = virt_to_phys_or_null(vnd);
0655 phys_data = virt_to_phys_or_null_size(data, data_size);
0656
0657 if (!phys_name || (data && !phys_data))
0658 status = EFI_INVALID_PARAMETER;
0659 else
0660 status = efi_thunk(set_variable, phys_name, phys_vendor,
0661 attr, data_size, phys_data);
0662
0663 spin_unlock_irqrestore(&efi_runtime_lock, flags);
0664
0665 return status;
0666 }
0667
0668 static efi_status_t
0669 efi_thunk_get_next_variable(unsigned long *name_size,
0670 efi_char16_t *name,
0671 efi_guid_t *vendor)
0672 {
0673 u8 buf[24] __aligned(8);
0674 efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
0675 efi_status_t status;
0676 u32 phys_name_size, phys_name, phys_vendor;
0677 unsigned long flags;
0678
0679 spin_lock_irqsave(&efi_runtime_lock, flags);
0680
0681 *vnd = *vendor;
0682
0683 phys_name_size = virt_to_phys_or_null(name_size);
0684 phys_vendor = virt_to_phys_or_null(vnd);
0685 phys_name = virt_to_phys_or_null_size(name, *name_size);
0686
0687 if (!phys_name)
0688 status = EFI_INVALID_PARAMETER;
0689 else
0690 status = efi_thunk(get_next_variable, phys_name_size,
0691 phys_name, phys_vendor);
0692
0693 spin_unlock_irqrestore(&efi_runtime_lock, flags);
0694
0695 *vendor = *vnd;
0696 return status;
0697 }
0698
0699 static efi_status_t
0700 efi_thunk_get_next_high_mono_count(u32 *count)
0701 {
0702 return EFI_UNSUPPORTED;
0703 }
0704
0705 static void
0706 efi_thunk_reset_system(int reset_type, efi_status_t status,
0707 unsigned long data_size, efi_char16_t *data)
0708 {
0709 u32 phys_data;
0710 unsigned long flags;
0711
0712 spin_lock_irqsave(&efi_runtime_lock, flags);
0713
0714 phys_data = virt_to_phys_or_null_size(data, data_size);
0715
0716 efi_thunk(reset_system, reset_type, status, data_size, phys_data);
0717
0718 spin_unlock_irqrestore(&efi_runtime_lock, flags);
0719 }
0720
0721 static efi_status_t
0722 efi_thunk_update_capsule(efi_capsule_header_t **capsules,
0723 unsigned long count, unsigned long sg_list)
0724 {
0725
0726
0727
0728
0729
0730 return EFI_UNSUPPORTED;
0731 }
0732
0733 static efi_status_t
0734 efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
0735 u64 *remaining_space,
0736 u64 *max_variable_size)
0737 {
0738 efi_status_t status;
0739 u32 phys_storage, phys_remaining, phys_max;
0740 unsigned long flags;
0741
0742 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
0743 return EFI_UNSUPPORTED;
0744
0745 spin_lock_irqsave(&efi_runtime_lock, flags);
0746
0747 phys_storage = virt_to_phys_or_null(storage_space);
0748 phys_remaining = virt_to_phys_or_null(remaining_space);
0749 phys_max = virt_to_phys_or_null(max_variable_size);
0750
0751 status = efi_thunk(query_variable_info, attr, phys_storage,
0752 phys_remaining, phys_max);
0753
0754 spin_unlock_irqrestore(&efi_runtime_lock, flags);
0755
0756 return status;
0757 }
0758
0759 static efi_status_t
0760 efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
0761 u64 *remaining_space,
0762 u64 *max_variable_size)
0763 {
0764 efi_status_t status;
0765 u32 phys_storage, phys_remaining, phys_max;
0766 unsigned long flags;
0767
0768 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
0769 return EFI_UNSUPPORTED;
0770
0771 if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
0772 return EFI_NOT_READY;
0773
0774 phys_storage = virt_to_phys_or_null(storage_space);
0775 phys_remaining = virt_to_phys_or_null(remaining_space);
0776 phys_max = virt_to_phys_or_null(max_variable_size);
0777
0778 status = efi_thunk(query_variable_info, attr, phys_storage,
0779 phys_remaining, phys_max);
0780
0781 spin_unlock_irqrestore(&efi_runtime_lock, flags);
0782
0783 return status;
0784 }
0785
0786 static efi_status_t
0787 efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
0788 unsigned long count, u64 *max_size,
0789 int *reset_type)
0790 {
0791
0792
0793
0794
0795
0796 return EFI_UNSUPPORTED;
0797 }
0798
0799 void __init efi_thunk_runtime_setup(void)
0800 {
0801 if (!IS_ENABLED(CONFIG_EFI_MIXED))
0802 return;
0803
0804 efi.get_time = efi_thunk_get_time;
0805 efi.set_time = efi_thunk_set_time;
0806 efi.get_wakeup_time = efi_thunk_get_wakeup_time;
0807 efi.set_wakeup_time = efi_thunk_set_wakeup_time;
0808 efi.get_variable = efi_thunk_get_variable;
0809 efi.get_next_variable = efi_thunk_get_next_variable;
0810 efi.set_variable = efi_thunk_set_variable;
0811 efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
0812 efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
0813 efi.reset_system = efi_thunk_reset_system;
0814 efi.query_variable_info = efi_thunk_query_variable_info;
0815 efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
0816 efi.update_capsule = efi_thunk_update_capsule;
0817 efi.query_capsule_caps = efi_thunk_query_capsule_caps;
0818 }
0819
0820 efi_status_t __init __no_sanitize_address
0821 efi_set_virtual_address_map(unsigned long memory_map_size,
0822 unsigned long descriptor_size,
0823 u32 descriptor_version,
0824 efi_memory_desc_t *virtual_map,
0825 unsigned long systab_phys)
0826 {
0827 const efi_system_table_t *systab = (efi_system_table_t *)systab_phys;
0828 efi_status_t status;
0829 unsigned long flags;
0830
0831 if (efi_is_mixed())
0832 return efi_thunk_set_virtual_address_map(memory_map_size,
0833 descriptor_size,
0834 descriptor_version,
0835 virtual_map);
0836 efi_enter_mm();
0837
0838 efi_fpu_begin();
0839
0840
0841 local_irq_save(flags);
0842 status = efi_call(efi.runtime->set_virtual_address_map,
0843 memory_map_size, descriptor_size,
0844 descriptor_version, virtual_map);
0845 local_irq_restore(flags);
0846
0847 efi_fpu_end();
0848
0849
0850 efi.runtime = READ_ONCE(systab->runtime);
0851
0852 efi_leave_mm();
0853
0854 return status;
0855 }