Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is subject to the terms and conditions of the GNU General Public
0003  * License.  See the file "COPYING" in the main directory of this archive
0004  * for more details.
0005  *
0006  * Code to handle x86 style IRQs plus some generic interrupt stuff.
0007  *
0008  * Copyright (C) 1992 Linus Torvalds
0009  * Copyright (C) 1994 - 2000 Ralf Baechle
0010  */
0011 #include <linux/kernel.h>
0012 #include <linux/delay.h>
0013 #include <linux/init.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/kernel_stat.h>
0016 #include <linux/proc_fs.h>
0017 #include <linux/mm.h>
0018 #include <linux/random.h>
0019 #include <linux/sched.h>
0020 #include <linux/seq_file.h>
0021 #include <linux/kallsyms.h>
0022 #include <linux/kgdb.h>
0023 #include <linux/ftrace.h>
0024 #include <linux/irqdomain.h>
0025 
0026 #include <linux/atomic.h>
0027 #include <linux/uaccess.h>
0028 
0029 void *irq_stack[NR_CPUS];
0030 
0031 /*
0032  * 'what should we do if we get a hw irq event on an illegal vector'.
0033  * each architecture has to answer this themselves.
0034  */
0035 void ack_bad_irq(unsigned int irq)
0036 {
0037     printk("unexpected IRQ # %d\n", irq);
0038 }
0039 
0040 atomic_t irq_err_count;
0041 
0042 int arch_show_interrupts(struct seq_file *p, int prec)
0043 {
0044     seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
0045     return 0;
0046 }
0047 
0048 asmlinkage void spurious_interrupt(void)
0049 {
0050     atomic_inc(&irq_err_count);
0051 }
0052 
0053 void __init init_IRQ(void)
0054 {
0055     int i;
0056     unsigned int order = get_order(IRQ_STACK_SIZE);
0057 
0058     for (i = 0; i < NR_IRQS; i++)
0059         irq_set_noprobe(i);
0060 
0061     if (cpu_has_veic)
0062         clear_c0_status(ST0_IM);
0063 
0064     arch_init_irq();
0065 
0066     for_each_possible_cpu(i) {
0067         void *s = (void *)__get_free_pages(GFP_KERNEL, order);
0068 
0069         irq_stack[i] = s;
0070         pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
0071             irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
0072     }
0073 }
0074 
0075 #ifdef CONFIG_DEBUG_STACKOVERFLOW
0076 static inline void check_stack_overflow(void)
0077 {
0078     unsigned long sp;
0079 
0080     __asm__ __volatile__("move %0, $sp" : "=r" (sp));
0081     sp &= THREAD_MASK;
0082 
0083     /*
0084      * Check for stack overflow: is there less than STACK_WARN free?
0085      * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
0086      */
0087     if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
0088         printk("do_IRQ: stack overflow: %ld\n",
0089                sp - sizeof(struct thread_info));
0090         dump_stack();
0091     }
0092 }
0093 #else
0094 static inline void check_stack_overflow(void) {}
0095 #endif
0096 
0097 
0098 /*
0099  * do_IRQ handles all normal device IRQ's (the special
0100  * SMP cross-CPU interrupts have their own specific
0101  * handlers).
0102  */
0103 void __irq_entry do_IRQ(unsigned int irq)
0104 {
0105     irq_enter();
0106     check_stack_overflow();
0107     generic_handle_irq(irq);
0108     irq_exit();
0109 }
0110 
0111 #ifdef CONFIG_IRQ_DOMAIN
0112 void __irq_entry do_domain_IRQ(struct irq_domain *domain, unsigned int hwirq)
0113 {
0114     irq_enter();
0115     check_stack_overflow();
0116     generic_handle_domain_irq(domain, hwirq);
0117     irq_exit();
0118 }
0119 #endif