0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #define pr_fmt(fmt) "ACPI: " fmt
0014
0015 #include <linux/acpi.h>
0016 #include <linux/cpumask.h>
0017 #include <linux/efi.h>
0018 #include <linux/efi-bgrt.h>
0019 #include <linux/init.h>
0020 #include <linux/irq.h>
0021 #include <linux/irqdomain.h>
0022 #include <linux/irq_work.h>
0023 #include <linux/memblock.h>
0024 #include <linux/of_fdt.h>
0025 #include <linux/libfdt.h>
0026 #include <linux/smp.h>
0027 #include <linux/serial_core.h>
0028 #include <linux/pgtable.h>
0029
0030 #include <acpi/ghes.h>
0031 #include <asm/cputype.h>
0032 #include <asm/cpu_ops.h>
0033 #include <asm/daifflags.h>
0034 #include <asm/smp_plat.h>
0035
0036 int acpi_noirq = 1;
0037 int acpi_disabled = 1;
0038 EXPORT_SYMBOL(acpi_disabled);
0039
0040 int acpi_pci_disabled = 1;
0041 EXPORT_SYMBOL(acpi_pci_disabled);
0042
0043 static bool param_acpi_off __initdata;
0044 static bool param_acpi_on __initdata;
0045 static bool param_acpi_force __initdata;
0046
0047 static int __init parse_acpi(char *arg)
0048 {
0049 if (!arg)
0050 return -EINVAL;
0051
0052
0053 if (strcmp(arg, "off") == 0)
0054 param_acpi_off = true;
0055 else if (strcmp(arg, "on") == 0)
0056 param_acpi_on = true;
0057 else if (strcmp(arg, "force") == 0)
0058 param_acpi_force = true;
0059 else
0060 return -EINVAL;
0061
0062 return 0;
0063 }
0064 early_param("acpi", parse_acpi);
0065
0066 static bool __init dt_is_stub(void)
0067 {
0068 int node;
0069
0070 fdt_for_each_subnode(node, initial_boot_params, 0) {
0071 const char *name = fdt_get_name(initial_boot_params, node, NULL);
0072 if (strcmp(name, "chosen") == 0)
0073 continue;
0074 if (strcmp(name, "hypervisor") == 0 &&
0075 of_flat_dt_is_compatible(node, "xen,xen"))
0076 continue;
0077
0078 return false;
0079 }
0080
0081 return true;
0082 }
0083
0084
0085
0086
0087
0088 void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
0089 {
0090 if (!size)
0091 return NULL;
0092
0093 return early_memremap(phys, size);
0094 }
0095
0096 void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
0097 {
0098 if (!map || !size)
0099 return;
0100
0101 early_memunmap(map, size);
0102 }
0103
0104 bool __init acpi_psci_present(void)
0105 {
0106 return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
0107 }
0108
0109
0110 bool acpi_psci_use_hvc(void)
0111 {
0112 return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
0113 }
0114
0115
0116
0117
0118
0119
0120
0121 static int __init acpi_fadt_sanity_check(void)
0122 {
0123 struct acpi_table_header *table;
0124 struct acpi_table_fadt *fadt;
0125 acpi_status status;
0126 int ret = 0;
0127
0128
0129
0130
0131
0132 status = acpi_get_table(ACPI_SIG_FADT, 0, &table);
0133 if (ACPI_FAILURE(status)) {
0134 const char *msg = acpi_format_exception(status);
0135
0136 pr_err("Failed to get FADT table, %s\n", msg);
0137 return -ENODEV;
0138 }
0139
0140 fadt = (struct acpi_table_fadt *)table;
0141
0142
0143
0144
0145
0146
0147
0148 if (table->revision < 5 ||
0149 (table->revision == 5 && fadt->minor_revision < 1)) {
0150 pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n",
0151 table->revision, fadt->minor_revision);
0152
0153 if (!fadt->arm_boot_flags) {
0154 ret = -EINVAL;
0155 goto out;
0156 }
0157 pr_err("FADT has ARM boot flags set, assuming 5.1\n");
0158 }
0159
0160 if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
0161 pr_err("FADT not ACPI hardware reduced compliant\n");
0162 ret = -EINVAL;
0163 }
0164
0165 out:
0166
0167
0168
0169
0170 acpi_put_table(table);
0171 return ret;
0172 }
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192 void __init acpi_boot_table_init(void)
0193 {
0194
0195
0196
0197
0198
0199
0200
0201 if (param_acpi_off ||
0202 (!param_acpi_on && !param_acpi_force && !dt_is_stub()))
0203 goto done;
0204
0205
0206
0207
0208
0209 enable_acpi();
0210
0211
0212
0213
0214
0215
0216
0217
0218 if (acpi_table_init() || acpi_fadt_sanity_check()) {
0219 pr_err("Failed to init ACPI tables\n");
0220 if (!param_acpi_force)
0221 disable_acpi();
0222 }
0223
0224 done:
0225 if (acpi_disabled) {
0226 if (earlycon_acpi_spcr_enable)
0227 early_init_dt_scan_chosen_stdout();
0228 } else {
0229 acpi_parse_spcr(earlycon_acpi_spcr_enable, true);
0230 if (IS_ENABLED(CONFIG_ACPI_BGRT))
0231 acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
0232 }
0233 }
0234
0235 static pgprot_t __acpi_get_writethrough_mem_attribute(void)
0236 {
0237
0238
0239
0240
0241
0242
0243 pr_warn_once("No MAIR allocation for EFI_MEMORY_WT; treating as Normal Non-cacheable\n");
0244 return __pgprot(PROT_NORMAL_NC);
0245 }
0246
0247 pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
0248 {
0249
0250
0251
0252
0253
0254
0255
0256
0257 u64 attr;
0258
0259 attr = efi_mem_attributes(addr);
0260 if (attr & EFI_MEMORY_WB)
0261 return PAGE_KERNEL;
0262 if (attr & EFI_MEMORY_WC)
0263 return __pgprot(PROT_NORMAL_NC);
0264 if (attr & EFI_MEMORY_WT)
0265 return __acpi_get_writethrough_mem_attribute();
0266 return __pgprot(PROT_DEVICE_nGnRnE);
0267 }
0268
0269 void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
0270 {
0271 efi_memory_desc_t *md, *region = NULL;
0272 pgprot_t prot;
0273
0274 if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP)))
0275 return NULL;
0276
0277 for_each_efi_memory_desc(md) {
0278 u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
0279
0280 if (phys < md->phys_addr || phys >= end)
0281 continue;
0282
0283 if (phys + size > end) {
0284 pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n");
0285 return NULL;
0286 }
0287 region = md;
0288 break;
0289 }
0290
0291
0292
0293
0294
0295
0296
0297 prot = __pgprot(PROT_DEVICE_nGnRnE);
0298 if (region) {
0299 switch (region->type) {
0300 case EFI_LOADER_CODE:
0301 case EFI_LOADER_DATA:
0302 case EFI_BOOT_SERVICES_CODE:
0303 case EFI_BOOT_SERVICES_DATA:
0304 case EFI_CONVENTIONAL_MEMORY:
0305 case EFI_PERSISTENT_MEMORY:
0306 if (memblock_is_map_memory(phys) ||
0307 !memblock_is_region_memory(phys, size)) {
0308 pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys);
0309 return NULL;
0310 }
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320 fallthrough;
0321
0322 case EFI_RUNTIME_SERVICES_CODE:
0323
0324
0325
0326
0327
0328 prot = PAGE_KERNEL_RO;
0329 break;
0330
0331 case EFI_ACPI_RECLAIM_MEMORY:
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341 if (memblock_is_map_memory(phys))
0342 return (void __iomem *)__phys_to_virt(phys);
0343 fallthrough;
0344
0345 default:
0346 if (region->attribute & EFI_MEMORY_WB)
0347 prot = PAGE_KERNEL;
0348 else if (region->attribute & EFI_MEMORY_WC)
0349 prot = __pgprot(PROT_NORMAL_NC);
0350 else if (region->attribute & EFI_MEMORY_WT)
0351 prot = __acpi_get_writethrough_mem_attribute();
0352 }
0353 }
0354 return ioremap_prot(phys, size, pgprot_val(prot));
0355 }
0356
0357
0358
0359
0360
0361
0362
0363 int apei_claim_sea(struct pt_regs *regs)
0364 {
0365 int err = -ENOENT;
0366 bool return_to_irqs_enabled;
0367 unsigned long current_flags;
0368
0369 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
0370 return err;
0371
0372 current_flags = local_daif_save_flags();
0373
0374
0375 return_to_irqs_enabled = !irqs_disabled_flags(arch_local_save_flags());
0376
0377 if (regs)
0378 return_to_irqs_enabled = interrupts_enabled(regs);
0379
0380
0381
0382
0383
0384 local_daif_restore(DAIF_ERRCTX);
0385 nmi_enter();
0386 err = ghes_notify_sea();
0387 nmi_exit();
0388
0389
0390
0391
0392
0393 if (!err) {
0394 if (return_to_irqs_enabled) {
0395 local_daif_restore(DAIF_PROCCTX_NOIRQ);
0396 __irq_enter();
0397 irq_work_run();
0398 __irq_exit();
0399 } else {
0400 pr_warn_ratelimited("APEI work queued but not completed");
0401 err = -EINPROGRESS;
0402 }
0403 }
0404
0405 local_daif_restore(current_flags);
0406
0407 return err;
0408 }
0409
0410 void arch_reserve_mem_area(acpi_physical_address addr, size_t size)
0411 {
0412 memblock_mark_nomap(addr, size);
0413 }