Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *  Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
0004  *
0005  * This file contains the lowest level x86-specific interrupt
0006  * entry, irq-stacks and irq statistics code. All the remaining
0007  * irq logic is done by the generic kernel/irq/ code and
0008  * by the x86-specific irq controller code. (e.g. i8259.c and
0009  * io_apic.c.)
0010  */
0011 
0012 #include <linux/seq_file.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/irq.h>
0015 #include <linux/kernel_stat.h>
0016 #include <linux/notifier.h>
0017 #include <linux/cpu.h>
0018 #include <linux/delay.h>
0019 #include <linux/uaccess.h>
0020 #include <linux/percpu.h>
0021 #include <linux/mm.h>
0022 
0023 #include <asm/apic.h>
0024 #include <asm/nospec-branch.h>
0025 #include <asm/softirq_stack.h>
0026 
0027 #ifdef CONFIG_DEBUG_STACKOVERFLOW
0028 
0029 int sysctl_panic_on_stackoverflow __read_mostly;
0030 
0031 /* Debugging check for stack overflow: is there less than 1KB free? */
0032 static int check_stack_overflow(void)
0033 {
0034     long sp;
0035 
0036     __asm__ __volatile__("andl %%esp,%0" :
0037                  "=r" (sp) : "0" (THREAD_SIZE - 1));
0038 
0039     return sp < (sizeof(struct thread_info) + STACK_WARN);
0040 }
0041 
0042 static void print_stack_overflow(void)
0043 {
0044     printk(KERN_WARNING "low stack detected by irq handler\n");
0045     dump_stack();
0046     if (sysctl_panic_on_stackoverflow)
0047         panic("low stack detected by irq handler - check messages\n");
0048 }
0049 
0050 #else
0051 static inline int check_stack_overflow(void) { return 0; }
0052 static inline void print_stack_overflow(void) { }
0053 #endif
0054 
0055 DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
0056 DEFINE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
0057 
0058 static void call_on_stack(void *func, void *stack)
0059 {
0060     asm volatile("xchgl %%ebx,%%esp \n"
0061              CALL_NOSPEC
0062              "movl  %%ebx,%%esp \n"
0063              : "=b" (stack)
0064              : "0" (stack),
0065                [thunk_target] "D"(func)
0066              : "memory", "cc", "edx", "ecx", "eax");
0067 }
0068 
0069 static inline void *current_stack(void)
0070 {
0071     return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
0072 }
0073 
0074 static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
0075 {
0076     struct irq_stack *curstk, *irqstk;
0077     u32 *isp, *prev_esp, arg1;
0078 
0079     curstk = (struct irq_stack *) current_stack();
0080     irqstk = __this_cpu_read(hardirq_stack_ptr);
0081 
0082     /*
0083      * this is where we switch to the IRQ stack. However, if we are
0084      * already using the IRQ stack (because we interrupted a hardirq
0085      * handler) we can't do that and just have to keep using the
0086      * current stack (which is the irq stack already after all)
0087      */
0088     if (unlikely(curstk == irqstk))
0089         return 0;
0090 
0091     isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
0092 
0093     /* Save the next esp at the bottom of the stack */
0094     prev_esp = (u32 *)irqstk;
0095     *prev_esp = current_stack_pointer;
0096 
0097     if (unlikely(overflow))
0098         call_on_stack(print_stack_overflow, isp);
0099 
0100     asm volatile("xchgl %%ebx,%%esp \n"
0101              CALL_NOSPEC
0102              "movl  %%ebx,%%esp \n"
0103              : "=a" (arg1), "=b" (isp)
0104              :  "0" (desc),   "1" (isp),
0105             [thunk_target] "D" (desc->handle_irq)
0106              : "memory", "cc", "ecx");
0107     return 1;
0108 }
0109 
0110 /*
0111  * Allocate per-cpu stacks for hardirq and softirq processing
0112  */
0113 int irq_init_percpu_irqstack(unsigned int cpu)
0114 {
0115     int node = cpu_to_node(cpu);
0116     struct page *ph, *ps;
0117 
0118     if (per_cpu(hardirq_stack_ptr, cpu))
0119         return 0;
0120 
0121     ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
0122     if (!ph)
0123         return -ENOMEM;
0124     ps = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
0125     if (!ps) {
0126         __free_pages(ph, THREAD_SIZE_ORDER);
0127         return -ENOMEM;
0128     }
0129 
0130     per_cpu(hardirq_stack_ptr, cpu) = page_address(ph);
0131     per_cpu(softirq_stack_ptr, cpu) = page_address(ps);
0132     return 0;
0133 }
0134 
0135 #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
0136 void do_softirq_own_stack(void)
0137 {
0138     struct irq_stack *irqstk;
0139     u32 *isp, *prev_esp;
0140 
0141     irqstk = __this_cpu_read(softirq_stack_ptr);
0142 
0143     /* build the stack frame on the softirq stack */
0144     isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
0145 
0146     /* Push the previous esp onto the stack */
0147     prev_esp = (u32 *)irqstk;
0148     *prev_esp = current_stack_pointer;
0149 
0150     call_on_stack(__do_softirq, isp);
0151 }
0152 #endif
0153 
0154 void __handle_irq(struct irq_desc *desc, struct pt_regs *regs)
0155 {
0156     int overflow = check_stack_overflow();
0157 
0158     if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) {
0159         if (unlikely(overflow))
0160             print_stack_overflow();
0161         generic_handle_irq_desc(desc);
0162     }
0163 }