Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
0004  */
0005 
0006 #include <linux/interrupt.h>
0007 #include <linux/module.h>
0008 #include <linux/of.h>
0009 #include <linux/irqdomain.h>
0010 #include <linux/irqchip.h>
0011 #include <asm/irq.h>
0012 
0013 #define NR_CPU_IRQS 32  /* number of irq lines coming in */
0014 #define TIMER0_IRQ  3   /* Fixed by ISA */
0015 
0016 /*
0017  * Early Hardware specific Interrupt setup
0018  * -Platform independent, needed for each CPU (not foldable into init_IRQ)
0019  * -Called very early (start_kernel -> setup_arch -> setup_processor)
0020  *
0021  * what it does ?
0022  * -Optionally, setup the High priority Interrupts as Level 2 IRQs
0023  */
0024 void arc_init_IRQ(void)
0025 {
0026     unsigned int level_mask = 0, i;
0027 
0028        /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
0029     level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
0030 
0031     /*
0032      * Write to register, even if no LV2 IRQs configured to reset it
0033      * in case bootloader had mucked with it
0034      */
0035     write_aux_reg(AUX_IRQ_LEV, level_mask);
0036 
0037     if (level_mask)
0038         pr_info("Level-2 interrupts bitset %x\n", level_mask);
0039 
0040     /*
0041      * Disable all IRQ lines so faulty external hardware won't
0042      * trigger interrupt that kernel is not ready to handle.
0043      */
0044     for (i = TIMER0_IRQ; i < NR_CPU_IRQS; i++) {
0045         unsigned int ienb;
0046 
0047         ienb = read_aux_reg(AUX_IENABLE);
0048         ienb &= ~(1 << i);
0049         write_aux_reg(AUX_IENABLE, ienb);
0050     }
0051 }
0052 
0053 /*
0054  * ARC700 core includes a simple on-chip intc supporting
0055  * -per IRQ enable/disable
0056  * -2 levels of interrupts (high/low)
0057  * -all interrupts being level triggered
0058  *
0059  * To reduce platform code, we assume all IRQs directly hooked-up into intc.
0060  * Platforms with external intc, hence cascaded IRQs, are free to over-ride
0061  * below, per IRQ.
0062  */
0063 
0064 static void arc_irq_mask(struct irq_data *data)
0065 {
0066     unsigned int ienb;
0067 
0068     ienb = read_aux_reg(AUX_IENABLE);
0069     ienb &= ~(1 << data->hwirq);
0070     write_aux_reg(AUX_IENABLE, ienb);
0071 }
0072 
0073 static void arc_irq_unmask(struct irq_data *data)
0074 {
0075     unsigned int ienb;
0076 
0077     ienb = read_aux_reg(AUX_IENABLE);
0078     ienb |= (1 << data->hwirq);
0079     write_aux_reg(AUX_IENABLE, ienb);
0080 }
0081 
0082 static struct irq_chip onchip_intc = {
0083     .name           = "ARC In-core Intc",
0084     .irq_mask   = arc_irq_mask,
0085     .irq_unmask = arc_irq_unmask,
0086 };
0087 
0088 static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
0089                    irq_hw_number_t hw)
0090 {
0091     switch (hw) {
0092     case TIMER0_IRQ:
0093         irq_set_percpu_devid(irq);
0094         irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
0095         break;
0096     default:
0097         irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
0098     }
0099     return 0;
0100 }
0101 
0102 static const struct irq_domain_ops arc_intc_domain_ops = {
0103     .xlate = irq_domain_xlate_onecell,
0104     .map = arc_intc_domain_map,
0105 };
0106 
0107 static int __init
0108 init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
0109 {
0110     struct irq_domain *root_domain;
0111 
0112     if (parent)
0113         panic("DeviceTree incore intc not a root irq controller\n");
0114 
0115     root_domain = irq_domain_add_linear(intc, NR_CPU_IRQS,
0116                         &arc_intc_domain_ops, NULL);
0117     if (!root_domain)
0118         panic("root irq domain not avail\n");
0119 
0120     /*
0121      * Needed for primary domain lookup to succeed
0122      * This is a primary irqchip, and can never have a parent
0123      */
0124     irq_set_default_host(root_domain);
0125 
0126     return 0;
0127 }
0128 
0129 IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ);
0130 
0131 /*
0132  * arch_local_irq_enable - Enable interrupts.
0133  *
0134  * 1. Explicitly called to re-enable interrupts
0135  * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
0136  *    which maybe in hard ISR itself
0137  *
0138  * Semantics of this function change depending on where it is called from:
0139  *
0140  * -If called from hard-ISR, it must not invert interrupt priorities
0141  *  e.g. suppose TIMER is high priority (Level 2) IRQ
0142  *    Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
0143  *    Here local_irq_enable( ) shd not re-enable lower priority interrupts
0144  * -If called from soft-ISR, it must re-enable all interrupts
0145  *    soft ISR are low priority jobs which can be very slow, thus all IRQs
0146  *    must be enabled while they run.
0147  *    Now hardware context wise we may still be in L2 ISR (not done rtie)
0148  *    still we must re-enable both L1 and L2 IRQs
0149  *  Another twist is prev scenario with flow being
0150  *     L1 ISR ==> interrupted by L2 ISR  ==> L2 soft ISR
0151  *     here we must not re-enable Ll as prev Ll Interrupt's h/w context will get
0152  *     over-written (this is deficiency in ARC700 Interrupt mechanism)
0153  */
0154 
0155 #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS    /* Complex version for 2 IRQ levels */
0156 
0157 void arch_local_irq_enable(void)
0158 {
0159     unsigned long flags = arch_local_save_flags();
0160 
0161     if (flags & STATUS_A2_MASK)
0162         flags |= STATUS_E2_MASK;
0163     else if (flags & STATUS_A1_MASK)
0164         flags |= STATUS_E1_MASK;
0165 
0166     arch_local_irq_restore(flags);
0167 }
0168 
0169 EXPORT_SYMBOL(arch_local_irq_enable);
0170 #endif