0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/kernel_stat.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/irq.h>
0015 #include <linux/seq_file.h>
0016 #include <linux/delay.h>
0017 #include <linux/ftrace.h>
0018 #include <linux/uaccess.h>
0019 #include <linux/smp.h>
0020 #include <linux/sched/task_stack.h>
0021
0022 #include <asm/cpu_entry_area.h>
0023 #include <asm/softirq_stack.h>
0024 #include <asm/irq_stack.h>
0025 #include <asm/io_apic.h>
0026 #include <asm/apic.h>
0027
0028 DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible;
0029 DECLARE_INIT_PER_CPU(irq_stack_backing_store);
0030
0031 #ifdef CONFIG_VMAP_STACK
0032
0033
0034
0035 static int map_irq_stack(unsigned int cpu)
0036 {
0037 char *stack = (char *)per_cpu_ptr(&irq_stack_backing_store, cpu);
0038 struct page *pages[IRQ_STACK_SIZE / PAGE_SIZE];
0039 void *va;
0040 int i;
0041
0042 for (i = 0; i < IRQ_STACK_SIZE / PAGE_SIZE; i++) {
0043 phys_addr_t pa = per_cpu_ptr_to_phys(stack + (i << PAGE_SHIFT));
0044
0045 pages[i] = pfn_to_page(pa >> PAGE_SHIFT);
0046 }
0047
0048 va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL);
0049 if (!va)
0050 return -ENOMEM;
0051
0052
0053 per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
0054 return 0;
0055 }
0056 #else
0057
0058
0059
0060
0061 static int map_irq_stack(unsigned int cpu)
0062 {
0063 void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
0064
0065
0066 per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
0067 return 0;
0068 }
0069 #endif
0070
0071 int irq_init_percpu_irqstack(unsigned int cpu)
0072 {
0073 if (per_cpu(hardirq_stack_ptr, cpu))
0074 return 0;
0075 return map_irq_stack(cpu);
0076 }