Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 #ifndef _ASM_X86_ACPI_H
0003 #define _ASM_X86_ACPI_H
0004 
0005 /*
0006  *  Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
0007  *  Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
0008  */
0009 #include <acpi/pdc_intel.h>
0010 
0011 #include <asm/numa.h>
0012 #include <asm/fixmap.h>
0013 #include <asm/processor.h>
0014 #include <asm/mmu.h>
0015 #include <asm/mpspec.h>
0016 #include <asm/x86_init.h>
0017 
0018 #ifdef CONFIG_ACPI_APEI
0019 # include <asm/pgtable_types.h>
0020 #endif
0021 
0022 #ifdef CONFIG_ACPI
0023 extern int acpi_lapic;
0024 extern int acpi_ioapic;
0025 extern int acpi_noirq;
0026 extern int acpi_strict;
0027 extern int acpi_disabled;
0028 extern int acpi_pci_disabled;
0029 extern int acpi_skip_timer_override;
0030 extern int acpi_use_timer_override;
0031 extern int acpi_fix_pin2_polarity;
0032 extern int acpi_disable_cmcff;
0033 
0034 extern u8 acpi_sci_flags;
0035 extern u32 acpi_sci_override_gsi;
0036 void acpi_pic_sci_set_trigger(unsigned int, u16);
0037 
0038 struct device;
0039 
0040 extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
0041                   int trigger, int polarity);
0042 extern void (*__acpi_unregister_gsi)(u32 gsi);
0043 
0044 static inline void disable_acpi(void)
0045 {
0046     acpi_disabled = 1;
0047     acpi_pci_disabled = 1;
0048     acpi_noirq = 1;
0049 }
0050 
0051 extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
0052 
0053 static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
0054 static inline void acpi_disable_pci(void)
0055 {
0056     acpi_pci_disabled = 1;
0057     acpi_noirq_set();
0058 }
0059 
0060 /* Low-level suspend routine. */
0061 extern int (*acpi_suspend_lowlevel)(void);
0062 
0063 /* Physical address to resume after wakeup */
0064 unsigned long acpi_get_wakeup_address(void);
0065 
0066 /*
0067  * Check if the CPU can handle C2 and deeper
0068  */
0069 static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
0070 {
0071     /*
0072      * Early models (<=5) of AMD Opterons are not supposed to go into
0073      * C2 state.
0074      *
0075      * Steppings 0x0A and later are good
0076      */
0077     if (boot_cpu_data.x86 == 0x0F &&
0078         boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
0079         boot_cpu_data.x86_model <= 0x05 &&
0080         boot_cpu_data.x86_stepping < 0x0A)
0081         return 1;
0082     else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E))
0083         return 1;
0084     else
0085         return max_cstate;
0086 }
0087 
0088 static inline bool arch_has_acpi_pdc(void)
0089 {
0090     struct cpuinfo_x86 *c = &cpu_data(0);
0091     return (c->x86_vendor == X86_VENDOR_INTEL ||
0092         c->x86_vendor == X86_VENDOR_CENTAUR);
0093 }
0094 
0095 static inline void arch_acpi_set_pdc_bits(u32 *buf)
0096 {
0097     struct cpuinfo_x86 *c = &cpu_data(0);
0098 
0099     buf[2] |= ACPI_PDC_C_CAPABILITY_SMP;
0100 
0101     if (cpu_has(c, X86_FEATURE_EST))
0102         buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
0103 
0104     if (cpu_has(c, X86_FEATURE_ACPI))
0105         buf[2] |= ACPI_PDC_T_FFH;
0106 
0107     /*
0108      * If mwait/monitor is unsupported, C2/C3_FFH will be disabled
0109      */
0110     if (!cpu_has(c, X86_FEATURE_MWAIT))
0111         buf[2] &= ~(ACPI_PDC_C_C2C3_FFH);
0112 }
0113 
0114 static inline bool acpi_has_cpu_in_madt(void)
0115 {
0116     return !!acpi_lapic;
0117 }
0118 
0119 #define ACPI_HAVE_ARCH_SET_ROOT_POINTER
0120 static inline void acpi_arch_set_root_pointer(u64 addr)
0121 {
0122     x86_init.acpi.set_root_pointer(addr);
0123 }
0124 
0125 #define ACPI_HAVE_ARCH_GET_ROOT_POINTER
0126 static inline u64 acpi_arch_get_root_pointer(void)
0127 {
0128     return x86_init.acpi.get_root_pointer();
0129 }
0130 
0131 void acpi_generic_reduced_hw_init(void);
0132 
0133 void x86_default_set_root_pointer(u64 addr);
0134 u64 x86_default_get_root_pointer(void);
0135 
0136 #else /* !CONFIG_ACPI */
0137 
0138 #define acpi_lapic 0
0139 #define acpi_ioapic 0
0140 #define acpi_disable_cmcff 0
0141 static inline void acpi_noirq_set(void) { }
0142 static inline void acpi_disable_pci(void) { }
0143 static inline void disable_acpi(void) { }
0144 
0145 static inline void acpi_generic_reduced_hw_init(void) { }
0146 
0147 static inline void x86_default_set_root_pointer(u64 addr) { }
0148 
0149 static inline u64 x86_default_get_root_pointer(void)
0150 {
0151     return 0;
0152 }
0153 
0154 #endif /* !CONFIG_ACPI */
0155 
0156 #define ARCH_HAS_POWER_INIT 1
0157 
0158 #ifdef CONFIG_ACPI_NUMA
0159 extern int x86_acpi_numa_init(void);
0160 #endif /* CONFIG_ACPI_NUMA */
0161 
0162 struct cper_ia_proc_ctx;
0163 
0164 #ifdef CONFIG_ACPI_APEI
0165 static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
0166 {
0167     /*
0168      * We currently have no way to look up the EFI memory map
0169      * attributes for a region in a consistent way, because the
0170      * memmap is discarded after efi_free_boot_services(). So if
0171      * you call efi_mem_attributes() during boot and at runtime,
0172      * you could theoretically see different attributes.
0173      *
0174      * We are yet to see any x86 platforms that require anything
0175      * other than PAGE_KERNEL (some ARM64 platforms require the
0176      * equivalent of PAGE_KERNEL_NOCACHE). Additionally, if SME
0177      * is active, the ACPI information will not be encrypted,
0178      * so return PAGE_KERNEL_NOENC until we know differently.
0179      */
0180     return PAGE_KERNEL_NOENC;
0181 }
0182 
0183 int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
0184                    u64 lapic_id);
0185 #else
0186 static inline int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
0187                          u64 lapic_id)
0188 {
0189     return -EINVAL;
0190 }
0191 #endif
0192 
0193 #define ACPI_TABLE_UPGRADE_MAX_PHYS (max_low_pfn_mapped << PAGE_SHIFT)
0194 
0195 #endif /* _ASM_X86_ACPI_H */