Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is subject to the terms and conditions of the GNU General Public
0003  * License.  See the file "COPYING" in the main directory of this archive
0004  * for more details.
0005  *
0006  * Copyright (C) 2004-2016 Cavium, Inc.
0007  */
0008 
0009 #include <linux/of_address.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/irqdomain.h>
0012 #include <linux/bitops.h>
0013 #include <linux/of_irq.h>
0014 #include <linux/percpu.h>
0015 #include <linux/slab.h>
0016 #include <linux/irq.h>
0017 #include <linux/smp.h>
0018 #include <linux/of.h>
0019 
0020 #include <asm/octeon/octeon.h>
0021 #include <asm/octeon/cvmx-ciu2-defs.h>
0022 #include <asm/octeon/cvmx-ciu3-defs.h>
0023 
0024 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
0025 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
0026 static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
0027 static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip2);
0028 
0029 static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip3);
0030 static DEFINE_PER_CPU(struct octeon_ciu3_info *, octeon_ciu3_info);
0031 #define CIU3_MBOX_PER_CORE 10
0032 
0033 /*
0034  * The 8 most significant bits of the intsn identify the interrupt major block.
0035  * Each major block might use its own interrupt domain. Thus 256 domains are
0036  * needed.
0037  */
0038 #define MAX_CIU3_DOMAINS        256
0039 
0040 typedef irq_hw_number_t (*octeon_ciu3_intsn2hw_t)(struct irq_domain *, unsigned int);
0041 
0042 /* Information for each ciu3 in the system */
0043 struct octeon_ciu3_info {
0044     u64         ciu3_addr;
0045     int         node;
0046     struct irq_domain   *domain[MAX_CIU3_DOMAINS];
0047     octeon_ciu3_intsn2hw_t  intsn2hw[MAX_CIU3_DOMAINS];
0048 };
0049 
0050 /* Each ciu3 in the system uses its own data (one ciu3 per node) */
0051 static struct octeon_ciu3_info  *octeon_ciu3_info_per_node[4];
0052 
0053 struct octeon_irq_ciu_domain_data {
0054     int num_sum;  /* number of sum registers (2 or 3). */
0055 };
0056 
0057 /* Register offsets from ciu3_addr */
0058 #define CIU3_CONST      0x220
0059 #define CIU3_IDT_CTL(_idt)  ((_idt) * 8 + 0x110000)
0060 #define CIU3_IDT_PP(_idt, _idx) ((_idt) * 32 + (_idx) * 8 + 0x120000)
0061 #define CIU3_IDT_IO(_idt)   ((_idt) * 8 + 0x130000)
0062 #define CIU3_DEST_PP_INT(_pp_ip) ((_pp_ip) * 8 + 0x200000)
0063 #define CIU3_DEST_IO_INT(_io)   ((_io) * 8 + 0x210000)
0064 #define CIU3_ISC_CTL(_intsn)    ((_intsn) * 8 + 0x80000000)
0065 #define CIU3_ISC_W1C(_intsn)    ((_intsn) * 8 + 0x90000000)
0066 #define CIU3_ISC_W1S(_intsn)    ((_intsn) * 8 + 0xa0000000)
0067 
0068 static __read_mostly int octeon_irq_ciu_to_irq[8][64];
0069 
0070 struct octeon_ciu_chip_data {
0071     union {
0072         struct {        /* only used for ciu3 */
0073             u64 ciu3_addr;
0074             unsigned int intsn;
0075         };
0076         struct {        /* only used for ciu/ciu2 */
0077             u8 line;
0078             u8 bit;
0079         };
0080     };
0081     int gpio_line;
0082     int current_cpu;    /* Next CPU expected to take this irq */
0083     int ciu_node; /* NUMA node number of the CIU */
0084 };
0085 
0086 struct octeon_core_chip_data {
0087     struct mutex core_irq_mutex;
0088     bool current_en;
0089     bool desired_en;
0090     u8 bit;
0091 };
0092 
0093 #define MIPS_CORE_IRQ_LINES 8
0094 
0095 static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
0096 
0097 static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
0098                       struct irq_chip *chip,
0099                       irq_flow_handler_t handler)
0100 {
0101     struct octeon_ciu_chip_data *cd;
0102 
0103     cd = kzalloc(sizeof(*cd), GFP_KERNEL);
0104     if (!cd)
0105         return -ENOMEM;
0106 
0107     irq_set_chip_and_handler(irq, chip, handler);
0108 
0109     cd->line = line;
0110     cd->bit = bit;
0111     cd->gpio_line = gpio_line;
0112 
0113     irq_set_chip_data(irq, cd);
0114     octeon_irq_ciu_to_irq[line][bit] = irq;
0115     return 0;
0116 }
0117 
0118 static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
0119 {
0120     struct irq_data *data = irq_get_irq_data(irq);
0121     struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
0122 
0123     irq_set_chip_data(irq, NULL);
0124     kfree(cd);
0125 }
0126 
0127 static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
0128                     int irq, int line, int bit)
0129 {
0130     struct device_node *of_node;
0131     int ret;
0132 
0133     of_node = irq_domain_get_of_node(domain);
0134     if (!of_node)
0135         return -EINVAL;
0136     ret = irq_alloc_desc_at(irq, of_node_to_nid(of_node));
0137     if (ret < 0)
0138         return ret;
0139 
0140     return irq_domain_associate(domain, irq, line << 6 | bit);
0141 }
0142 
0143 static int octeon_coreid_for_cpu(int cpu)
0144 {
0145 #ifdef CONFIG_SMP
0146     return cpu_logical_map(cpu);
0147 #else
0148     return cvmx_get_core_num();
0149 #endif
0150 }
0151 
0152 static int octeon_cpu_for_coreid(int coreid)
0153 {
0154 #ifdef CONFIG_SMP
0155     return cpu_number_map(coreid);
0156 #else
0157     return smp_processor_id();
0158 #endif
0159 }
0160 
0161 static void octeon_irq_core_ack(struct irq_data *data)
0162 {
0163     struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
0164     unsigned int bit = cd->bit;
0165 
0166     /*
0167      * We don't need to disable IRQs to make these atomic since
0168      * they are already disabled earlier in the low level
0169      * interrupt code.
0170      */
0171     clear_c0_status(0x100 << bit);
0172     /* The two user interrupts must be cleared manually. */
0173     if (bit < 2)
0174         clear_c0_cause(0x100 << bit);
0175 }
0176 
0177 static void octeon_irq_core_eoi(struct irq_data *data)
0178 {
0179     struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
0180 
0181     /*
0182      * We don't need to disable IRQs to make these atomic since
0183      * they are already disabled earlier in the low level
0184      * interrupt code.
0185      */
0186     set_c0_status(0x100 << cd->bit);
0187 }
0188 
0189 static void octeon_irq_core_set_enable_local(void *arg)
0190 {
0191     struct irq_data *data = arg;
0192     struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
0193     unsigned int mask = 0x100 << cd->bit;
0194 
0195     /*
0196      * Interrupts are already disabled, so these are atomic.
0197      */
0198     if (cd->desired_en)
0199         set_c0_status(mask);
0200     else
0201         clear_c0_status(mask);
0202 
0203 }
0204 
0205 static void octeon_irq_core_disable(struct irq_data *data)
0206 {
0207     struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
0208     cd->desired_en = false;
0209 }
0210 
0211 static void octeon_irq_core_enable(struct irq_data *data)
0212 {
0213     struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
0214     cd->desired_en = true;
0215 }
0216 
0217 static void octeon_irq_core_bus_lock(struct irq_data *data)
0218 {
0219     struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
0220 
0221     mutex_lock(&cd->core_irq_mutex);
0222 }
0223 
0224 static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
0225 {
0226     struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
0227 
0228     if (cd->desired_en != cd->current_en) {
0229         on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
0230 
0231         cd->current_en = cd->desired_en;
0232     }
0233 
0234     mutex_unlock(&cd->core_irq_mutex);
0235 }
0236 
0237 static struct irq_chip octeon_irq_chip_core = {
0238     .name = "Core",
0239     .irq_enable = octeon_irq_core_enable,
0240     .irq_disable = octeon_irq_core_disable,
0241     .irq_ack = octeon_irq_core_ack,
0242     .irq_eoi = octeon_irq_core_eoi,
0243     .irq_bus_lock = octeon_irq_core_bus_lock,
0244     .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
0245 
0246     .irq_cpu_online = octeon_irq_core_eoi,
0247     .irq_cpu_offline = octeon_irq_core_ack,
0248     .flags = IRQCHIP_ONOFFLINE_ENABLED,
0249 };
0250 
0251 static void __init octeon_irq_init_core(void)
0252 {
0253     int i;
0254     int irq;
0255     struct octeon_core_chip_data *cd;
0256 
0257     for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
0258         cd = &octeon_irq_core_chip_data[i];
0259         cd->current_en = false;
0260         cd->desired_en = false;
0261         cd->bit = i;
0262         mutex_init(&cd->core_irq_mutex);
0263 
0264         irq = OCTEON_IRQ_SW0 + i;
0265         irq_set_chip_data(irq, cd);
0266         irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
0267                      handle_percpu_irq);
0268     }
0269 }
0270 
0271 static int next_cpu_for_irq(struct irq_data *data)
0272 {
0273 
0274 #ifdef CONFIG_SMP
0275     int cpu;
0276     const struct cpumask *mask = irq_data_get_affinity_mask(data);
0277     int weight = cpumask_weight(mask);
0278     struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
0279 
0280     if (weight > 1) {
0281         cpu = cd->current_cpu;
0282         for (;;) {
0283             cpu = cpumask_next(cpu, mask);
0284             if (cpu >= nr_cpu_ids) {
0285                 cpu = -1;
0286                 continue;
0287             } else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
0288                 break;
0289             }
0290         }
0291     } else if (weight == 1) {
0292         cpu = cpumask_first(mask);
0293     } else {
0294         cpu = smp_processor_id();
0295     }
0296     cd->current_cpu = cpu;
0297     return cpu;
0298 #else
0299     return smp_processor_id();
0300 #endif
0301 }
0302 
0303 static void octeon_irq_ciu_enable(struct irq_data *data)
0304 {
0305     int cpu = next_cpu_for_irq(data);
0306     int coreid = octeon_coreid_for_cpu(cpu);
0307     unsigned long *pen;
0308     unsigned long flags;
0309     struct octeon_ciu_chip_data *cd;
0310     raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
0311 
0312     cd = irq_data_get_irq_chip_data(data);
0313 
0314     raw_spin_lock_irqsave(lock, flags);
0315     if (cd->line == 0) {
0316         pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
0317         __set_bit(cd->bit, pen);
0318         /*
0319          * Must be visible to octeon_irq_ip{2,3}_ciu() before
0320          * enabling the irq.
0321          */
0322         wmb();
0323         cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
0324     } else {
0325         pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
0326         __set_bit(cd->bit, pen);
0327         /*
0328          * Must be visible to octeon_irq_ip{2,3}_ciu() before
0329          * enabling the irq.
0330          */
0331         wmb();
0332         cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
0333     }
0334     raw_spin_unlock_irqrestore(lock, flags);
0335 }
0336 
0337 static void octeon_irq_ciu_enable_local(struct irq_data *data)
0338 {
0339     unsigned long *pen;
0340     unsigned long flags;
0341     struct octeon_ciu_chip_data *cd;
0342     raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
0343 
0344     cd = irq_data_get_irq_chip_data(data);
0345 
0346     raw_spin_lock_irqsave(lock, flags);
0347     if (cd->line == 0) {
0348         pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
0349         __set_bit(cd->bit, pen);
0350         /*
0351          * Must be visible to octeon_irq_ip{2,3}_ciu() before
0352          * enabling the irq.
0353          */
0354         wmb();
0355         cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
0356     } else {
0357         pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
0358         __set_bit(cd->bit, pen);
0359         /*
0360          * Must be visible to octeon_irq_ip{2,3}_ciu() before
0361          * enabling the irq.
0362          */
0363         wmb();
0364         cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
0365     }
0366     raw_spin_unlock_irqrestore(lock, flags);
0367 }
0368 
0369 static void octeon_irq_ciu_disable_local(struct irq_data *data)
0370 {
0371     unsigned long *pen;
0372     unsigned long flags;
0373     struct octeon_ciu_chip_data *cd;
0374     raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
0375 
0376     cd = irq_data_get_irq_chip_data(data);
0377 
0378     raw_spin_lock_irqsave(lock, flags);
0379     if (cd->line == 0) {
0380         pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
0381         __clear_bit(cd->bit, pen);
0382         /*
0383          * Must be visible to octeon_irq_ip{2,3}_ciu() before
0384          * enabling the irq.
0385          */
0386         wmb();
0387         cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
0388     } else {
0389         pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
0390         __clear_bit(cd->bit, pen);
0391         /*
0392          * Must be visible to octeon_irq_ip{2,3}_ciu() before
0393          * enabling the irq.
0394          */
0395         wmb();
0396         cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
0397     }
0398     raw_spin_unlock_irqrestore(lock, flags);
0399 }
0400 
0401 static void octeon_irq_ciu_disable_all(struct irq_data *data)
0402 {
0403     unsigned long flags;
0404     unsigned long *pen;
0405     int cpu;
0406     struct octeon_ciu_chip_data *cd;
0407     raw_spinlock_t *lock;
0408 
0409     cd = irq_data_get_irq_chip_data(data);
0410 
0411     for_each_online_cpu(cpu) {
0412         int coreid = octeon_coreid_for_cpu(cpu);
0413         lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
0414         if (cd->line == 0)
0415             pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
0416         else
0417             pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
0418 
0419         raw_spin_lock_irqsave(lock, flags);
0420         __clear_bit(cd->bit, pen);
0421         /*
0422          * Must be visible to octeon_irq_ip{2,3}_ciu() before
0423          * enabling the irq.
0424          */
0425         wmb();
0426         if (cd->line == 0)
0427             cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
0428         else
0429             cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
0430         raw_spin_unlock_irqrestore(lock, flags);
0431     }
0432 }
0433 
0434 static void octeon_irq_ciu_enable_all(struct irq_data *data)
0435 {
0436     unsigned long flags;
0437     unsigned long *pen;
0438     int cpu;
0439     struct octeon_ciu_chip_data *cd;
0440     raw_spinlock_t *lock;
0441 
0442     cd = irq_data_get_irq_chip_data(data);
0443 
0444     for_each_online_cpu(cpu) {
0445         int coreid = octeon_coreid_for_cpu(cpu);
0446         lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
0447         if (cd->line == 0)
0448             pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
0449         else
0450             pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
0451 
0452         raw_spin_lock_irqsave(lock, flags);
0453         __set_bit(cd->bit, pen);
0454         /*
0455          * Must be visible to octeon_irq_ip{2,3}_ciu() before
0456          * enabling the irq.
0457          */
0458         wmb();
0459         if (cd->line == 0)
0460             cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
0461         else
0462             cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
0463         raw_spin_unlock_irqrestore(lock, flags);
0464     }
0465 }
0466 
0467 /*
0468  * Enable the irq on the next core in the affinity set for chips that
0469  * have the EN*_W1{S,C} registers.
0470  */
0471 static void octeon_irq_ciu_enable_v2(struct irq_data *data)
0472 {
0473     u64 mask;
0474     int cpu = next_cpu_for_irq(data);
0475     struct octeon_ciu_chip_data *cd;
0476 
0477     cd = irq_data_get_irq_chip_data(data);
0478     mask = 1ull << (cd->bit);
0479 
0480     /*
0481      * Called under the desc lock, so these should never get out
0482      * of sync.
0483      */
0484     if (cd->line == 0) {
0485         int index = octeon_coreid_for_cpu(cpu) * 2;
0486         set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
0487         cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
0488     } else {
0489         int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
0490         set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
0491         cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
0492     }
0493 }
0494 
0495 /*
0496  * Enable the irq in the sum2 registers.
0497  */
0498 static void octeon_irq_ciu_enable_sum2(struct irq_data *data)
0499 {
0500     u64 mask;
0501     int cpu = next_cpu_for_irq(data);
0502     int index = octeon_coreid_for_cpu(cpu);
0503     struct octeon_ciu_chip_data *cd;
0504 
0505     cd = irq_data_get_irq_chip_data(data);
0506     mask = 1ull << (cd->bit);
0507 
0508     cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
0509 }
0510 
0511 /*
0512  * Disable the irq in the sum2 registers.
0513  */
0514 static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
0515 {
0516     u64 mask;
0517     int cpu = next_cpu_for_irq(data);
0518     int index = octeon_coreid_for_cpu(cpu);
0519     struct octeon_ciu_chip_data *cd;
0520 
0521     cd = irq_data_get_irq_chip_data(data);
0522     mask = 1ull << (cd->bit);
0523 
0524     cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
0525 }
0526 
0527 static void octeon_irq_ciu_ack_sum2(struct irq_data *data)
0528 {
0529     u64 mask;
0530     int cpu = next_cpu_for_irq(data);
0531     int index = octeon_coreid_for_cpu(cpu);
0532     struct octeon_ciu_chip_data *cd;
0533 
0534     cd = irq_data_get_irq_chip_data(data);
0535     mask = 1ull << (cd->bit);
0536 
0537     cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask);
0538 }
0539 
0540 static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data)
0541 {
0542     int cpu;
0543     struct octeon_ciu_chip_data *cd;
0544     u64 mask;
0545 
0546     cd = irq_data_get_irq_chip_data(data);
0547     mask = 1ull << (cd->bit);
0548 
0549     for_each_online_cpu(cpu) {
0550         int coreid = octeon_coreid_for_cpu(cpu);
0551 
0552         cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask);
0553     }
0554 }
0555 
0556 /*
0557  * Enable the irq on the current CPU for chips that
0558  * have the EN*_W1{S,C} registers.
0559  */
0560 static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
0561 {
0562     u64 mask;
0563     struct octeon_ciu_chip_data *cd;
0564 
0565     cd = irq_data_get_irq_chip_data(data);
0566     mask = 1ull << (cd->bit);
0567 
0568     if (cd->line == 0) {
0569         int index = cvmx_get_core_num() * 2;
0570         set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
0571         cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
0572     } else {
0573         int index = cvmx_get_core_num() * 2 + 1;
0574         set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
0575         cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
0576     }
0577 }
0578 
0579 static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
0580 {
0581     u64 mask;
0582     struct octeon_ciu_chip_data *cd;
0583 
0584     cd = irq_data_get_irq_chip_data(data);
0585     mask = 1ull << (cd->bit);
0586 
0587     if (cd->line == 0) {
0588         int index = cvmx_get_core_num() * 2;
0589         clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
0590         cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
0591     } else {
0592         int index = cvmx_get_core_num() * 2 + 1;
0593         clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
0594         cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
0595     }
0596 }
0597 
0598 /*
0599  * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
0600  */
0601 static void octeon_irq_ciu_ack(struct irq_data *data)
0602 {
0603     u64 mask;
0604     struct octeon_ciu_chip_data *cd;
0605 
0606     cd = irq_data_get_irq_chip_data(data);
0607     mask = 1ull << (cd->bit);
0608 
0609     if (cd->line == 0) {
0610         int index = cvmx_get_core_num() * 2;
0611         cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
0612     } else {
0613         cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
0614     }
0615 }
0616 
0617 /*
0618  * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
0619  * registers.
0620  */
0621 static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
0622 {
0623     int cpu;
0624     u64 mask;
0625     struct octeon_ciu_chip_data *cd;
0626 
0627     cd = irq_data_get_irq_chip_data(data);
0628     mask = 1ull << (cd->bit);
0629 
0630     if (cd->line == 0) {
0631         for_each_online_cpu(cpu) {
0632             int index = octeon_coreid_for_cpu(cpu) * 2;
0633             clear_bit(cd->bit,
0634                 &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
0635             cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
0636         }
0637     } else {
0638         for_each_online_cpu(cpu) {
0639             int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
0640             clear_bit(cd->bit,
0641                 &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
0642             cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
0643         }
0644     }
0645 }
0646 
0647 /*
0648  * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
0649  * registers.
0650  */
0651 static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
0652 {
0653     int cpu;
0654     u64 mask;
0655     struct octeon_ciu_chip_data *cd;
0656 
0657     cd = irq_data_get_irq_chip_data(data);
0658     mask = 1ull << (cd->bit);
0659 
0660     if (cd->line == 0) {
0661         for_each_online_cpu(cpu) {
0662             int index = octeon_coreid_for_cpu(cpu) * 2;
0663             set_bit(cd->bit,
0664                 &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
0665             cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
0666         }
0667     } else {
0668         for_each_online_cpu(cpu) {
0669             int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
0670             set_bit(cd->bit,
0671                 &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
0672             cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
0673         }
0674     }
0675 }
0676 
0677 static int octeon_irq_ciu_set_type(struct irq_data *data, unsigned int t)
0678 {
0679     irqd_set_trigger_type(data, t);
0680 
0681     if (t & IRQ_TYPE_EDGE_BOTH)
0682         irq_set_handler_locked(data, handle_edge_irq);
0683     else
0684         irq_set_handler_locked(data, handle_level_irq);
0685 
0686     return IRQ_SET_MASK_OK;
0687 }
0688 
0689 static void octeon_irq_gpio_setup(struct irq_data *data)
0690 {
0691     union cvmx_gpio_bit_cfgx cfg;
0692     struct octeon_ciu_chip_data *cd;
0693     u32 t = irqd_get_trigger_type(data);
0694 
0695     cd = irq_data_get_irq_chip_data(data);
0696 
0697     cfg.u64 = 0;
0698     cfg.s.int_en = 1;
0699     cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0;
0700     cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0;
0701 
0702     /* 140 nS glitch filter*/
0703     cfg.s.fil_cnt = 7;
0704     cfg.s.fil_sel = 3;
0705 
0706     cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64);
0707 }
0708 
0709 static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
0710 {
0711     octeon_irq_gpio_setup(data);
0712     octeon_irq_ciu_enable_v2(data);
0713 }
0714 
0715 static void octeon_irq_ciu_enable_gpio(struct irq_data *data)
0716 {
0717     octeon_irq_gpio_setup(data);
0718     octeon_irq_ciu_enable(data);
0719 }
0720 
0721 static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
0722 {
0723     irqd_set_trigger_type(data, t);
0724     octeon_irq_gpio_setup(data);
0725 
0726     if (t & IRQ_TYPE_EDGE_BOTH)
0727         irq_set_handler_locked(data, handle_edge_irq);
0728     else
0729         irq_set_handler_locked(data, handle_level_irq);
0730 
0731     return IRQ_SET_MASK_OK;
0732 }
0733 
0734 static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
0735 {
0736     struct octeon_ciu_chip_data *cd;
0737 
0738     cd = irq_data_get_irq_chip_data(data);
0739     cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
0740 
0741     octeon_irq_ciu_disable_all_v2(data);
0742 }
0743 
0744 static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
0745 {
0746     struct octeon_ciu_chip_data *cd;
0747 
0748     cd = irq_data_get_irq_chip_data(data);
0749     cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
0750 
0751     octeon_irq_ciu_disable_all(data);
0752 }
0753 
0754 static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
0755 {
0756     struct octeon_ciu_chip_data *cd;
0757     u64 mask;
0758 
0759     cd = irq_data_get_irq_chip_data(data);
0760     mask = 1ull << (cd->gpio_line);
0761 
0762     cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
0763 }
0764 
0765 #ifdef CONFIG_SMP
0766 
0767 static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
0768 {
0769     int cpu = smp_processor_id();
0770     cpumask_t new_affinity;
0771     const struct cpumask *mask = irq_data_get_affinity_mask(data);
0772 
0773     if (!cpumask_test_cpu(cpu, mask))
0774         return;
0775 
0776     if (cpumask_weight(mask) > 1) {
0777         /*
0778          * It has multi CPU affinity, just remove this CPU
0779          * from the affinity set.
0780          */
0781         cpumask_copy(&new_affinity, mask);
0782         cpumask_clear_cpu(cpu, &new_affinity);
0783     } else {
0784         /* Otherwise, put it on lowest numbered online CPU. */
0785         cpumask_clear(&new_affinity);
0786         cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
0787     }
0788     irq_set_affinity_locked(data, &new_affinity, false);
0789 }
0790 
0791 static int octeon_irq_ciu_set_affinity(struct irq_data *data,
0792                        const struct cpumask *dest, bool force)
0793 {
0794     int cpu;
0795     bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
0796     unsigned long flags;
0797     struct octeon_ciu_chip_data *cd;
0798     unsigned long *pen;
0799     raw_spinlock_t *lock;
0800 
0801     cd = irq_data_get_irq_chip_data(data);
0802 
0803     /*
0804      * For non-v2 CIU, we will allow only single CPU affinity.
0805      * This removes the need to do locking in the .ack/.eoi
0806      * functions.
0807      */
0808     if (cpumask_weight(dest) != 1)
0809         return -EINVAL;
0810 
0811     if (!enable_one)
0812         return 0;
0813 
0814 
0815     for_each_online_cpu(cpu) {
0816         int coreid = octeon_coreid_for_cpu(cpu);
0817 
0818         lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
0819         raw_spin_lock_irqsave(lock, flags);
0820 
0821         if (cd->line == 0)
0822             pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
0823         else
0824             pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
0825 
0826         if (cpumask_test_cpu(cpu, dest) && enable_one) {
0827             enable_one = false;
0828             __set_bit(cd->bit, pen);
0829         } else {
0830             __clear_bit(cd->bit, pen);
0831         }
0832         /*
0833          * Must be visible to octeon_irq_ip{2,3}_ciu() before
0834          * enabling the irq.
0835          */
0836         wmb();
0837 
0838         if (cd->line == 0)
0839             cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
0840         else
0841             cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
0842 
0843         raw_spin_unlock_irqrestore(lock, flags);
0844     }
0845     return 0;
0846 }
0847 
0848 /*
0849  * Set affinity for the irq for chips that have the EN*_W1{S,C}
0850  * registers.
0851  */
0852 static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
0853                       const struct cpumask *dest,
0854                       bool force)
0855 {
0856     int cpu;
0857     bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
0858     u64 mask;
0859     struct octeon_ciu_chip_data *cd;
0860 
0861     if (!enable_one)
0862         return 0;
0863 
0864     cd = irq_data_get_irq_chip_data(data);
0865     mask = 1ull << cd->bit;
0866 
0867     if (cd->line == 0) {
0868         for_each_online_cpu(cpu) {
0869             unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
0870             int index = octeon_coreid_for_cpu(cpu) * 2;
0871             if (cpumask_test_cpu(cpu, dest) && enable_one) {
0872                 enable_one = false;
0873                 set_bit(cd->bit, pen);
0874                 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
0875             } else {
0876                 clear_bit(cd->bit, pen);
0877                 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
0878             }
0879         }
0880     } else {
0881         for_each_online_cpu(cpu) {
0882             unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
0883             int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
0884             if (cpumask_test_cpu(cpu, dest) && enable_one) {
0885                 enable_one = false;
0886                 set_bit(cd->bit, pen);
0887                 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
0888             } else {
0889                 clear_bit(cd->bit, pen);
0890                 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
0891             }
0892         }
0893     }
0894     return 0;
0895 }
0896 
0897 static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
0898                         const struct cpumask *dest,
0899                         bool force)
0900 {
0901     int cpu;
0902     bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
0903     u64 mask;
0904     struct octeon_ciu_chip_data *cd;
0905 
0906     if (!enable_one)
0907         return 0;
0908 
0909     cd = irq_data_get_irq_chip_data(data);
0910     mask = 1ull << cd->bit;
0911 
0912     for_each_online_cpu(cpu) {
0913         int index = octeon_coreid_for_cpu(cpu);
0914 
0915         if (cpumask_test_cpu(cpu, dest) && enable_one) {
0916             enable_one = false;
0917             cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
0918         } else {
0919             cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
0920         }
0921     }
0922     return 0;
0923 }
0924 #endif
0925 
0926 static unsigned int edge_startup(struct irq_data *data)
0927 {
0928     /* ack any pending edge-irq at startup, so there is
0929      * an _edge_ to fire on when the event reappears.
0930      */
0931     data->chip->irq_ack(data);
0932     data->chip->irq_enable(data);
0933     return 0;
0934 }
0935 
0936 /*
0937  * Newer octeon chips have support for lockless CIU operation.
0938  */
0939 static struct irq_chip octeon_irq_chip_ciu_v2 = {
0940     .name = "CIU",
0941     .irq_enable = octeon_irq_ciu_enable_v2,
0942     .irq_disable = octeon_irq_ciu_disable_all_v2,
0943     .irq_mask = octeon_irq_ciu_disable_local_v2,
0944     .irq_unmask = octeon_irq_ciu_enable_v2,
0945 #ifdef CONFIG_SMP
0946     .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
0947     .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
0948 #endif
0949 };
0950 
0951 static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
0952     .name = "CIU",
0953     .irq_enable = octeon_irq_ciu_enable_v2,
0954     .irq_disable = octeon_irq_ciu_disable_all_v2,
0955     .irq_ack = octeon_irq_ciu_ack,
0956     .irq_mask = octeon_irq_ciu_disable_local_v2,
0957     .irq_unmask = octeon_irq_ciu_enable_v2,
0958 #ifdef CONFIG_SMP
0959     .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
0960     .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
0961 #endif
0962 };
0963 
0964 /*
0965  * Newer octeon chips have support for lockless CIU operation.
0966  */
0967 static struct irq_chip octeon_irq_chip_ciu_sum2 = {
0968     .name = "CIU",
0969     .irq_enable = octeon_irq_ciu_enable_sum2,
0970     .irq_disable = octeon_irq_ciu_disable_all_sum2,
0971     .irq_mask = octeon_irq_ciu_disable_local_sum2,
0972     .irq_unmask = octeon_irq_ciu_enable_sum2,
0973 #ifdef CONFIG_SMP
0974     .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
0975     .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
0976 #endif
0977 };
0978 
0979 static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
0980     .name = "CIU",
0981     .irq_enable = octeon_irq_ciu_enable_sum2,
0982     .irq_disable = octeon_irq_ciu_disable_all_sum2,
0983     .irq_ack = octeon_irq_ciu_ack_sum2,
0984     .irq_mask = octeon_irq_ciu_disable_local_sum2,
0985     .irq_unmask = octeon_irq_ciu_enable_sum2,
0986 #ifdef CONFIG_SMP
0987     .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
0988     .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
0989 #endif
0990 };
0991 
0992 static struct irq_chip octeon_irq_chip_ciu = {
0993     .name = "CIU",
0994     .irq_enable = octeon_irq_ciu_enable,
0995     .irq_disable = octeon_irq_ciu_disable_all,
0996     .irq_mask = octeon_irq_ciu_disable_local,
0997     .irq_unmask = octeon_irq_ciu_enable,
0998 #ifdef CONFIG_SMP
0999     .irq_set_affinity = octeon_irq_ciu_set_affinity,
1000     .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1001 #endif
1002 };
1003 
1004 static struct irq_chip octeon_irq_chip_ciu_edge = {
1005     .name = "CIU",
1006     .irq_enable = octeon_irq_ciu_enable,
1007     .irq_disable = octeon_irq_ciu_disable_all,
1008     .irq_ack = octeon_irq_ciu_ack,
1009     .irq_mask = octeon_irq_ciu_disable_local,
1010     .irq_unmask = octeon_irq_ciu_enable,
1011 #ifdef CONFIG_SMP
1012     .irq_set_affinity = octeon_irq_ciu_set_affinity,
1013     .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1014 #endif
1015 };
1016 
1017 /* The mbox versions don't do any affinity or round-robin. */
1018 static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
1019     .name = "CIU-M",
1020     .irq_enable = octeon_irq_ciu_enable_all_v2,
1021     .irq_disable = octeon_irq_ciu_disable_all_v2,
1022     .irq_ack = octeon_irq_ciu_disable_local_v2,
1023     .irq_eoi = octeon_irq_ciu_enable_local_v2,
1024 
1025     .irq_cpu_online = octeon_irq_ciu_enable_local_v2,
1026     .irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
1027     .flags = IRQCHIP_ONOFFLINE_ENABLED,
1028 };
1029 
1030 static struct irq_chip octeon_irq_chip_ciu_mbox = {
1031     .name = "CIU-M",
1032     .irq_enable = octeon_irq_ciu_enable_all,
1033     .irq_disable = octeon_irq_ciu_disable_all,
1034     .irq_ack = octeon_irq_ciu_disable_local,
1035     .irq_eoi = octeon_irq_ciu_enable_local,
1036 
1037     .irq_cpu_online = octeon_irq_ciu_enable_local,
1038     .irq_cpu_offline = octeon_irq_ciu_disable_local,
1039     .flags = IRQCHIP_ONOFFLINE_ENABLED,
1040 };
1041 
1042 static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
1043     .name = "CIU-GPIO",
1044     .irq_enable = octeon_irq_ciu_enable_gpio_v2,
1045     .irq_disable = octeon_irq_ciu_disable_gpio_v2,
1046     .irq_ack = octeon_irq_ciu_gpio_ack,
1047     .irq_mask = octeon_irq_ciu_disable_local_v2,
1048     .irq_unmask = octeon_irq_ciu_enable_v2,
1049     .irq_set_type = octeon_irq_ciu_gpio_set_type,
1050 #ifdef CONFIG_SMP
1051     .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
1052     .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1053 #endif
1054     .flags = IRQCHIP_SET_TYPE_MASKED,
1055 };
1056 
1057 static struct irq_chip octeon_irq_chip_ciu_gpio = {
1058     .name = "CIU-GPIO",
1059     .irq_enable = octeon_irq_ciu_enable_gpio,
1060     .irq_disable = octeon_irq_ciu_disable_gpio,
1061     .irq_mask = octeon_irq_ciu_disable_local,
1062     .irq_unmask = octeon_irq_ciu_enable,
1063     .irq_ack = octeon_irq_ciu_gpio_ack,
1064     .irq_set_type = octeon_irq_ciu_gpio_set_type,
1065 #ifdef CONFIG_SMP
1066     .irq_set_affinity = octeon_irq_ciu_set_affinity,
1067     .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1068 #endif
1069     .flags = IRQCHIP_SET_TYPE_MASKED,
1070 };
1071 
1072 /*
1073  * Watchdog interrupts are special.  They are associated with a single
1074  * core, so we hardwire the affinity to that core.
1075  */
1076 static void octeon_irq_ciu_wd_enable(struct irq_data *data)
1077 {
1078     unsigned long flags;
1079     unsigned long *pen;
1080     int coreid = data->irq - OCTEON_IRQ_WDOG0;  /* Bit 0-63 of EN1 */
1081     int cpu = octeon_cpu_for_coreid(coreid);
1082     raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
1083 
1084     raw_spin_lock_irqsave(lock, flags);
1085     pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
1086     __set_bit(coreid, pen);
1087     /*
1088      * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling
1089      * the irq.
1090      */
1091     wmb();
1092     cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
1093     raw_spin_unlock_irqrestore(lock, flags);
1094 }
1095 
1096 /*
1097  * Watchdog interrupts are special.  They are associated with a single
1098  * core, so we hardwire the affinity to that core.
1099  */
1100 static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
1101 {
1102     int coreid = data->irq - OCTEON_IRQ_WDOG0;
1103     int cpu = octeon_cpu_for_coreid(coreid);
1104 
1105     set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
1106     cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
1107 }
1108 
1109 
1110 static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
1111     .name = "CIU-W",
1112     .irq_enable = octeon_irq_ciu1_wd_enable_v2,
1113     .irq_disable = octeon_irq_ciu_disable_all_v2,
1114     .irq_mask = octeon_irq_ciu_disable_local_v2,
1115     .irq_unmask = octeon_irq_ciu_enable_local_v2,
1116 };
1117 
1118 static struct irq_chip octeon_irq_chip_ciu_wd = {
1119     .name = "CIU-W",
1120     .irq_enable = octeon_irq_ciu_wd_enable,
1121     .irq_disable = octeon_irq_ciu_disable_all,
1122     .irq_mask = octeon_irq_ciu_disable_local,
1123     .irq_unmask = octeon_irq_ciu_enable_local,
1124 };
1125 
1126 static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit)
1127 {
1128     bool edge = false;
1129 
1130     if (line == 0)
1131         switch (bit) {
1132         case 48 ... 49: /* GMX DRP */
1133         case 50: /* IPD_DRP */
1134         case 52 ... 55: /* Timers */
1135         case 58: /* MPI */
1136             edge = true;
1137             break;
1138         default:
1139             break;
1140         }
1141     else /* line == 1 */
1142         switch (bit) {
1143         case 47: /* PTP */
1144             edge = true;
1145             break;
1146         default:
1147             break;
1148         }
1149     return edge;
1150 }
1151 
1152 struct octeon_irq_gpio_domain_data {
1153     unsigned int base_hwirq;
1154 };
1155 
1156 static int octeon_irq_gpio_xlat(struct irq_domain *d,
1157                 struct device_node *node,
1158                 const u32 *intspec,
1159                 unsigned int intsize,
1160                 unsigned long *out_hwirq,
1161                 unsigned int *out_type)
1162 {
1163     unsigned int type;
1164     unsigned int pin;
1165     unsigned int trigger;
1166 
1167     if (irq_domain_get_of_node(d) != node)
1168         return -EINVAL;
1169 
1170     if (intsize < 2)
1171         return -EINVAL;
1172 
1173     pin = intspec[0];
1174     if (pin >= 16)
1175         return -EINVAL;
1176 
1177     trigger = intspec[1];
1178 
1179     switch (trigger) {
1180     case 1:
1181         type = IRQ_TYPE_EDGE_RISING;
1182         break;
1183     case 2:
1184         type = IRQ_TYPE_EDGE_FALLING;
1185         break;
1186     case 4:
1187         type = IRQ_TYPE_LEVEL_HIGH;
1188         break;
1189     case 8:
1190         type = IRQ_TYPE_LEVEL_LOW;
1191         break;
1192     default:
1193         pr_err("Error: (%pOFn) Invalid irq trigger specification: %x\n",
1194                node,
1195                trigger);
1196         type = IRQ_TYPE_LEVEL_LOW;
1197         break;
1198     }
1199     *out_type = type;
1200     *out_hwirq = pin;
1201 
1202     return 0;
1203 }
1204 
1205 static int octeon_irq_ciu_xlat(struct irq_domain *d,
1206                    struct device_node *node,
1207                    const u32 *intspec,
1208                    unsigned int intsize,
1209                    unsigned long *out_hwirq,
1210                    unsigned int *out_type)
1211 {
1212     unsigned int ciu, bit;
1213     struct octeon_irq_ciu_domain_data *dd = d->host_data;
1214 
1215     ciu = intspec[0];
1216     bit = intspec[1];
1217 
1218     if (ciu >= dd->num_sum || bit > 63)
1219         return -EINVAL;
1220 
1221     *out_hwirq = (ciu << 6) | bit;
1222     *out_type = 0;
1223 
1224     return 0;
1225 }
1226 
1227 static struct irq_chip *octeon_irq_ciu_chip;
1228 static struct irq_chip *octeon_irq_ciu_chip_edge;
1229 static struct irq_chip *octeon_irq_gpio_chip;
1230 
1231 static int octeon_irq_ciu_map(struct irq_domain *d,
1232                   unsigned int virq, irq_hw_number_t hw)
1233 {
1234     int rv;
1235     unsigned int line = hw >> 6;
1236     unsigned int bit = hw & 63;
1237     struct octeon_irq_ciu_domain_data *dd = d->host_data;
1238 
1239     if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
1240         return -EINVAL;
1241 
1242     if (line == 2) {
1243         if (octeon_irq_ciu_is_edge(line, bit))
1244             rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1245                 &octeon_irq_chip_ciu_sum2_edge,
1246                 handle_edge_irq);
1247         else
1248             rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1249                 &octeon_irq_chip_ciu_sum2,
1250                 handle_level_irq);
1251     } else {
1252         if (octeon_irq_ciu_is_edge(line, bit))
1253             rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1254                 octeon_irq_ciu_chip_edge,
1255                 handle_edge_irq);
1256         else
1257             rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1258                 octeon_irq_ciu_chip,
1259                 handle_level_irq);
1260     }
1261     return rv;
1262 }
1263 
1264 static int octeon_irq_gpio_map(struct irq_domain *d,
1265                    unsigned int virq, irq_hw_number_t hw)
1266 {
1267     struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
1268     unsigned int line, bit;
1269     int r;
1270 
1271     line = (hw + gpiod->base_hwirq) >> 6;
1272     bit = (hw + gpiod->base_hwirq) & 63;
1273     if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
1274         octeon_irq_ciu_to_irq[line][bit] != 0)
1275         return -EINVAL;
1276 
1277     /*
1278      * Default to handle_level_irq. If the DT contains a different
1279      * trigger type, it will call the irq_set_type callback and
1280      * the handler gets updated.
1281      */
1282     r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
1283                        octeon_irq_gpio_chip, handle_level_irq);
1284     return r;
1285 }
1286 
1287 static const struct irq_domain_ops octeon_irq_domain_ciu_ops = {
1288     .map = octeon_irq_ciu_map,
1289     .unmap = octeon_irq_free_cd,
1290     .xlate = octeon_irq_ciu_xlat,
1291 };
1292 
1293 static const struct irq_domain_ops octeon_irq_domain_gpio_ops = {
1294     .map = octeon_irq_gpio_map,
1295     .unmap = octeon_irq_free_cd,
1296     .xlate = octeon_irq_gpio_xlat,
1297 };
1298 
1299 static void octeon_irq_ip2_ciu(void)
1300 {
1301     const unsigned long core_id = cvmx_get_core_num();
1302     u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
1303 
1304     ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror);
1305     if (likely(ciu_sum)) {
1306         int bit = fls64(ciu_sum) - 1;
1307         int irq = octeon_irq_ciu_to_irq[0][bit];
1308         if (likely(irq))
1309             do_IRQ(irq);
1310         else
1311             spurious_interrupt();
1312     } else {
1313         spurious_interrupt();
1314     }
1315 }
1316 
1317 static void octeon_irq_ip3_ciu(void)
1318 {
1319     u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
1320 
1321     ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror);
1322     if (likely(ciu_sum)) {
1323         int bit = fls64(ciu_sum) - 1;
1324         int irq = octeon_irq_ciu_to_irq[1][bit];
1325         if (likely(irq))
1326             do_IRQ(irq);
1327         else
1328             spurious_interrupt();
1329     } else {
1330         spurious_interrupt();
1331     }
1332 }
1333 
1334 static void octeon_irq_ip4_ciu(void)
1335 {
1336     int coreid = cvmx_get_core_num();
1337     u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid));
1338     u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid));
1339 
1340     ciu_sum &= ciu_en;
1341     if (likely(ciu_sum)) {
1342         int bit = fls64(ciu_sum) - 1;
1343         int irq = octeon_irq_ciu_to_irq[2][bit];
1344 
1345         if (likely(irq))
1346             do_IRQ(irq);
1347         else
1348             spurious_interrupt();
1349     } else {
1350         spurious_interrupt();
1351     }
1352 }
1353 
1354 static bool octeon_irq_use_ip4;
1355 
1356 static void octeon_irq_local_enable_ip4(void *arg)
1357 {
1358     set_c0_status(STATUSF_IP4);
1359 }
1360 
1361 static void octeon_irq_ip4_mask(void)
1362 {
1363     clear_c0_status(STATUSF_IP4);
1364     spurious_interrupt();
1365 }
1366 
1367 static void (*octeon_irq_ip2)(void);
1368 static void (*octeon_irq_ip3)(void);
1369 static void (*octeon_irq_ip4)(void);
1370 
1371 void (*octeon_irq_setup_secondary)(void);
1372 
1373 void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
1374 {
1375     octeon_irq_ip4 = h;
1376     octeon_irq_use_ip4 = true;
1377     on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
1378 }
1379 
1380 static void octeon_irq_percpu_enable(void)
1381 {
1382     irq_cpu_online();
1383 }
1384 
1385 static void octeon_irq_init_ciu_percpu(void)
1386 {
1387     int coreid = cvmx_get_core_num();
1388 
1389 
1390     __this_cpu_write(octeon_irq_ciu0_en_mirror, 0);
1391     __this_cpu_write(octeon_irq_ciu1_en_mirror, 0);
1392     wmb();
1393     raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock));
1394     /*
1395      * Disable All CIU Interrupts. The ones we need will be
1396      * enabled later.  Read the SUM register so we know the write
1397      * completed.
1398      */
1399     cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
1400     cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
1401     cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
1402     cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
1403     cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
1404 }
1405 
1406 static void octeon_irq_init_ciu2_percpu(void)
1407 {
1408     u64 regx, ipx;
1409     int coreid = cvmx_get_core_num();
1410     u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid);
1411 
1412     /*
1413      * Disable All CIU2 Interrupts. The ones we need will be
1414      * enabled later.  Read the SUM register so we know the write
1415      * completed.
1416      *
1417      * There are 9 registers and 3 IPX levels with strides 0x1000
1418      * and 0x200 respectively.  Use loops to clear them.
1419      */
1420     for (regx = 0; regx <= 0x8000; regx += 0x1000) {
1421         for (ipx = 0; ipx <= 0x400; ipx += 0x200)
1422             cvmx_write_csr(base + regx + ipx, 0);
1423     }
1424 
1425     cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
1426 }
1427 
1428 static void octeon_irq_setup_secondary_ciu(void)
1429 {
1430     octeon_irq_init_ciu_percpu();
1431     octeon_irq_percpu_enable();
1432 
1433     /* Enable the CIU lines */
1434     set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1435     if (octeon_irq_use_ip4)
1436         set_c0_status(STATUSF_IP4);
1437     else
1438         clear_c0_status(STATUSF_IP4);
1439 }
1440 
1441 static void octeon_irq_setup_secondary_ciu2(void)
1442 {
1443     octeon_irq_init_ciu2_percpu();
1444     octeon_irq_percpu_enable();
1445 
1446     /* Enable the CIU lines */
1447     set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1448     if (octeon_irq_use_ip4)
1449         set_c0_status(STATUSF_IP4);
1450     else
1451         clear_c0_status(STATUSF_IP4);
1452 }
1453 
1454 static int __init octeon_irq_init_ciu(
1455     struct device_node *ciu_node, struct device_node *parent)
1456 {
1457     int i, r;
1458     struct irq_chip *chip;
1459     struct irq_chip *chip_edge;
1460     struct irq_chip *chip_mbox;
1461     struct irq_chip *chip_wd;
1462     struct irq_domain *ciu_domain = NULL;
1463     struct octeon_irq_ciu_domain_data *dd;
1464 
1465     dd = kzalloc(sizeof(*dd), GFP_KERNEL);
1466     if (!dd)
1467         return -ENOMEM;
1468 
1469     octeon_irq_init_ciu_percpu();
1470     octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
1471 
1472     octeon_irq_ip2 = octeon_irq_ip2_ciu;
1473     octeon_irq_ip3 = octeon_irq_ip3_ciu;
1474     if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
1475         && !OCTEON_IS_MODEL(OCTEON_CN63XX)) {
1476         octeon_irq_ip4 =  octeon_irq_ip4_ciu;
1477         dd->num_sum = 3;
1478         octeon_irq_use_ip4 = true;
1479     } else {
1480         octeon_irq_ip4 = octeon_irq_ip4_mask;
1481         dd->num_sum = 2;
1482         octeon_irq_use_ip4 = false;
1483     }
1484     if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
1485         OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
1486         OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
1487         OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
1488         chip = &octeon_irq_chip_ciu_v2;
1489         chip_edge = &octeon_irq_chip_ciu_v2_edge;
1490         chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
1491         chip_wd = &octeon_irq_chip_ciu_wd_v2;
1492         octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
1493     } else {
1494         chip = &octeon_irq_chip_ciu;
1495         chip_edge = &octeon_irq_chip_ciu_edge;
1496         chip_mbox = &octeon_irq_chip_ciu_mbox;
1497         chip_wd = &octeon_irq_chip_ciu_wd;
1498         octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
1499     }
1500     octeon_irq_ciu_chip = chip;
1501     octeon_irq_ciu_chip_edge = chip_edge;
1502 
1503     /* Mips internal */
1504     octeon_irq_init_core();
1505 
1506     ciu_domain = irq_domain_add_tree(
1507         ciu_node, &octeon_irq_domain_ciu_ops, dd);
1508     irq_set_default_host(ciu_domain);
1509 
1510     /* CIU_0 */
1511     for (i = 0; i < 16; i++) {
1512         r = octeon_irq_force_ciu_mapping(
1513             ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
1514         if (r)
1515             goto err;
1516     }
1517 
1518     r = irq_alloc_desc_at(OCTEON_IRQ_MBOX0, -1);
1519     if (r < 0) {
1520         pr_err("Failed to allocate desc for %s\n", "OCTEON_IRQ_MBOX0");
1521         goto err;
1522     }
1523     r = octeon_irq_set_ciu_mapping(
1524         OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
1525     if (r)
1526         goto err;
1527     r = irq_alloc_desc_at(OCTEON_IRQ_MBOX1, -1);
1528     if (r < 0) {
1529         pr_err("Failed to allocate desc for %s\n", "OCTEON_IRQ_MBOX1");
1530         goto err;
1531     }
1532     r = octeon_irq_set_ciu_mapping(
1533         OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
1534     if (r)
1535         goto err;
1536 
1537     for (i = 0; i < 4; i++) {
1538         r = octeon_irq_force_ciu_mapping(
1539             ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
1540         if (r)
1541             goto err;
1542     }
1543     for (i = 0; i < 4; i++) {
1544         r = octeon_irq_force_ciu_mapping(
1545             ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
1546         if (r)
1547             goto err;
1548     }
1549 
1550     r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
1551     if (r)
1552         goto err;
1553 
1554     r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
1555     if (r)
1556         goto err;
1557 
1558     for (i = 0; i < 4; i++) {
1559         r = octeon_irq_force_ciu_mapping(
1560             ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
1561         if (r)
1562             goto err;
1563     }
1564 
1565     r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
1566     if (r)
1567         goto err;
1568 
1569     r = irq_alloc_descs(OCTEON_IRQ_WDOG0, OCTEON_IRQ_WDOG0, 16, -1);
1570     if (r < 0) {
1571         pr_err("Failed to allocate desc for %s\n", "OCTEON_IRQ_WDOGx");
1572         goto err;
1573     }
1574     /* CIU_1 */
1575     for (i = 0; i < 16; i++) {
1576         r = octeon_irq_set_ciu_mapping(
1577             i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
1578             handle_level_irq);
1579         if (r)
1580             goto err;
1581     }
1582 
1583     /* Enable the CIU lines */
1584     set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1585     if (octeon_irq_use_ip4)
1586         set_c0_status(STATUSF_IP4);
1587     else
1588         clear_c0_status(STATUSF_IP4);
1589 
1590     return 0;
1591 err:
1592     return r;
1593 }
1594 
1595 static int __init octeon_irq_init_gpio(
1596     struct device_node *gpio_node, struct device_node *parent)
1597 {
1598     struct octeon_irq_gpio_domain_data *gpiod;
1599     u32 interrupt_cells;
1600     unsigned int base_hwirq;
1601     int r;
1602 
1603     r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells);
1604     if (r)
1605         return r;
1606 
1607     if (interrupt_cells == 1) {
1608         u32 v;
1609 
1610         r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v);
1611         if (r) {
1612             pr_warn("No \"interrupts\" property.\n");
1613             return r;
1614         }
1615         base_hwirq = v;
1616     } else if (interrupt_cells == 2) {
1617         u32 v0, v1;
1618 
1619         r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0);
1620         if (r) {
1621             pr_warn("No \"interrupts\" property.\n");
1622             return r;
1623         }
1624         r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1);
1625         if (r) {
1626             pr_warn("No \"interrupts\" property.\n");
1627             return r;
1628         }
1629         base_hwirq = (v0 << 6) | v1;
1630     } else {
1631         pr_warn("Bad \"#interrupt-cells\" property: %u\n",
1632             interrupt_cells);
1633         return -EINVAL;
1634     }
1635 
1636     gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
1637     if (gpiod) {
1638         /* gpio domain host_data is the base hwirq number. */
1639         gpiod->base_hwirq = base_hwirq;
1640         irq_domain_add_linear(
1641             gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
1642     } else {
1643         pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
1644         return -ENOMEM;
1645     }
1646 
1647     /*
1648      * Clear the OF_POPULATED flag that was set by of_irq_init()
1649      * so that all GPIO devices will be probed.
1650      */
1651     of_node_clear_flag(gpio_node, OF_POPULATED);
1652 
1653     return 0;
1654 }
1655 /*
1656  * Watchdog interrupts are special.  They are associated with a single
1657  * core, so we hardwire the affinity to that core.
1658  */
1659 static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
1660 {
1661     u64 mask;
1662     u64 en_addr;
1663     int coreid = data->irq - OCTEON_IRQ_WDOG0;
1664     struct octeon_ciu_chip_data *cd;
1665 
1666     cd = irq_data_get_irq_chip_data(data);
1667     mask = 1ull << (cd->bit);
1668 
1669     en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
1670         (0x1000ull * cd->line);
1671     cvmx_write_csr(en_addr, mask);
1672 
1673 }
1674 
1675 static void octeon_irq_ciu2_enable(struct irq_data *data)
1676 {
1677     u64 mask;
1678     u64 en_addr;
1679     int cpu = next_cpu_for_irq(data);
1680     int coreid = octeon_coreid_for_cpu(cpu);
1681     struct octeon_ciu_chip_data *cd;
1682 
1683     cd = irq_data_get_irq_chip_data(data);
1684     mask = 1ull << (cd->bit);
1685 
1686     en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
1687         (0x1000ull * cd->line);
1688     cvmx_write_csr(en_addr, mask);
1689 }
1690 
1691 static void octeon_irq_ciu2_enable_local(struct irq_data *data)
1692 {
1693     u64 mask;
1694     u64 en_addr;
1695     int coreid = cvmx_get_core_num();
1696     struct octeon_ciu_chip_data *cd;
1697 
1698     cd = irq_data_get_irq_chip_data(data);
1699     mask = 1ull << (cd->bit);
1700 
1701     en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
1702         (0x1000ull * cd->line);
1703     cvmx_write_csr(en_addr, mask);
1704 
1705 }
1706 
1707 static void octeon_irq_ciu2_disable_local(struct irq_data *data)
1708 {
1709     u64 mask;
1710     u64 en_addr;
1711     int coreid = cvmx_get_core_num();
1712     struct octeon_ciu_chip_data *cd;
1713 
1714     cd = irq_data_get_irq_chip_data(data);
1715     mask = 1ull << (cd->bit);
1716 
1717     en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) +
1718         (0x1000ull * cd->line);
1719     cvmx_write_csr(en_addr, mask);
1720 
1721 }
1722 
1723 static void octeon_irq_ciu2_ack(struct irq_data *data)
1724 {
1725     u64 mask;
1726     u64 en_addr;
1727     int coreid = cvmx_get_core_num();
1728     struct octeon_ciu_chip_data *cd;
1729 
1730     cd = irq_data_get_irq_chip_data(data);
1731     mask = 1ull << (cd->bit);
1732 
1733     en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line);
1734     cvmx_write_csr(en_addr, mask);
1735 
1736 }
1737 
1738 static void octeon_irq_ciu2_disable_all(struct irq_data *data)
1739 {
1740     int cpu;
1741     u64 mask;
1742     struct octeon_ciu_chip_data *cd;
1743 
1744     cd = irq_data_get_irq_chip_data(data);
1745     mask = 1ull << (cd->bit);
1746 
1747     for_each_online_cpu(cpu) {
1748         u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
1749             octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
1750         cvmx_write_csr(en_addr, mask);
1751     }
1752 }
1753 
1754 static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
1755 {
1756     int cpu;
1757     u64 mask;
1758 
1759     mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1760 
1761     for_each_online_cpu(cpu) {
1762         u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
1763             octeon_coreid_for_cpu(cpu));
1764         cvmx_write_csr(en_addr, mask);
1765     }
1766 }
1767 
1768 static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
1769 {
1770     int cpu;
1771     u64 mask;
1772 
1773     mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1774 
1775     for_each_online_cpu(cpu) {
1776         u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
1777             octeon_coreid_for_cpu(cpu));
1778         cvmx_write_csr(en_addr, mask);
1779     }
1780 }
1781 
1782 static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data)
1783 {
1784     u64 mask;
1785     u64 en_addr;
1786     int coreid = cvmx_get_core_num();
1787 
1788     mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1789     en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid);
1790     cvmx_write_csr(en_addr, mask);
1791 }
1792 
1793 static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data)
1794 {
1795     u64 mask;
1796     u64 en_addr;
1797     int coreid = cvmx_get_core_num();
1798 
1799     mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1800     en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid);
1801     cvmx_write_csr(en_addr, mask);
1802 }
1803 
1804 #ifdef CONFIG_SMP
1805 static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
1806                     const struct cpumask *dest, bool force)
1807 {
1808     int cpu;
1809     bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
1810     u64 mask;
1811     struct octeon_ciu_chip_data *cd;
1812 
1813     if (!enable_one)
1814         return 0;
1815 
1816     cd = irq_data_get_irq_chip_data(data);
1817     mask = 1ull << cd->bit;
1818 
1819     for_each_online_cpu(cpu) {
1820         u64 en_addr;
1821         if (cpumask_test_cpu(cpu, dest) && enable_one) {
1822             enable_one = false;
1823             en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
1824                 octeon_coreid_for_cpu(cpu)) +
1825                 (0x1000ull * cd->line);
1826         } else {
1827             en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
1828                 octeon_coreid_for_cpu(cpu)) +
1829                 (0x1000ull * cd->line);
1830         }
1831         cvmx_write_csr(en_addr, mask);
1832     }
1833 
1834     return 0;
1835 }
1836 #endif
1837 
1838 static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
1839 {
1840     octeon_irq_gpio_setup(data);
1841     octeon_irq_ciu2_enable(data);
1842 }
1843 
1844 static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
1845 {
1846     struct octeon_ciu_chip_data *cd;
1847 
1848     cd = irq_data_get_irq_chip_data(data);
1849 
1850     cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
1851 
1852     octeon_irq_ciu2_disable_all(data);
1853 }
1854 
1855 static struct irq_chip octeon_irq_chip_ciu2 = {
1856     .name = "CIU2-E",
1857     .irq_enable = octeon_irq_ciu2_enable,
1858     .irq_disable = octeon_irq_ciu2_disable_all,
1859     .irq_mask = octeon_irq_ciu2_disable_local,
1860     .irq_unmask = octeon_irq_ciu2_enable,
1861 #ifdef CONFIG_SMP
1862     .irq_set_affinity = octeon_irq_ciu2_set_affinity,
1863     .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1864 #endif
1865 };
1866 
1867 static struct irq_chip octeon_irq_chip_ciu2_edge = {
1868     .name = "CIU2-E",
1869     .irq_enable = octeon_irq_ciu2_enable,
1870     .irq_disable = octeon_irq_ciu2_disable_all,
1871     .irq_ack = octeon_irq_ciu2_ack,
1872     .irq_mask = octeon_irq_ciu2_disable_local,
1873     .irq_unmask = octeon_irq_ciu2_enable,
1874 #ifdef CONFIG_SMP
1875     .irq_set_affinity = octeon_irq_ciu2_set_affinity,
1876     .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1877 #endif
1878 };
1879 
1880 static struct irq_chip octeon_irq_chip_ciu2_mbox = {
1881     .name = "CIU2-M",
1882     .irq_enable = octeon_irq_ciu2_mbox_enable_all,
1883     .irq_disable = octeon_irq_ciu2_mbox_disable_all,
1884     .irq_ack = octeon_irq_ciu2_mbox_disable_local,
1885     .irq_eoi = octeon_irq_ciu2_mbox_enable_local,
1886 
1887     .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local,
1888     .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local,
1889     .flags = IRQCHIP_ONOFFLINE_ENABLED,
1890 };
1891 
1892 static struct irq_chip octeon_irq_chip_ciu2_wd = {
1893     .name = "CIU2-W",
1894     .irq_enable = octeon_irq_ciu2_wd_enable,
1895     .irq_disable = octeon_irq_ciu2_disable_all,
1896     .irq_mask = octeon_irq_ciu2_disable_local,
1897     .irq_unmask = octeon_irq_ciu2_enable_local,
1898 };
1899 
1900 static struct irq_chip octeon_irq_chip_ciu2_gpio = {
1901     .name = "CIU-GPIO",
1902     .irq_enable = octeon_irq_ciu2_enable_gpio,
1903     .irq_disable = octeon_irq_ciu2_disable_gpio,
1904     .irq_ack = octeon_irq_ciu_gpio_ack,
1905     .irq_mask = octeon_irq_ciu2_disable_local,
1906     .irq_unmask = octeon_irq_ciu2_enable,
1907     .irq_set_type = octeon_irq_ciu_gpio_set_type,
1908 #ifdef CONFIG_SMP
1909     .irq_set_affinity = octeon_irq_ciu2_set_affinity,
1910     .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1911 #endif
1912     .flags = IRQCHIP_SET_TYPE_MASKED,
1913 };
1914 
1915 static int octeon_irq_ciu2_xlat(struct irq_domain *d,
1916                 struct device_node *node,
1917                 const u32 *intspec,
1918                 unsigned int intsize,
1919                 unsigned long *out_hwirq,
1920                 unsigned int *out_type)
1921 {
1922     unsigned int ciu, bit;
1923 
1924     ciu = intspec[0];
1925     bit = intspec[1];
1926 
1927     *out_hwirq = (ciu << 6) | bit;
1928     *out_type = 0;
1929 
1930     return 0;
1931 }
1932 
1933 static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
1934 {
1935     bool edge = false;
1936 
1937     if (line == 3) /* MIO */
1938         switch (bit) {
1939         case 2:  /* IPD_DRP */
1940         case 8 ... 11: /* Timers */
1941         case 48: /* PTP */
1942             edge = true;
1943             break;
1944         default:
1945             break;
1946         }
1947     else if (line == 6) /* PKT */
1948         switch (bit) {
1949         case 52 ... 53: /* ILK_DRP */
1950         case 8 ... 12:  /* GMX_DRP */
1951             edge = true;
1952             break;
1953         default:
1954             break;
1955         }
1956     return edge;
1957 }
1958 
1959 static int octeon_irq_ciu2_map(struct irq_domain *d,
1960                    unsigned int virq, irq_hw_number_t hw)
1961 {
1962     unsigned int line = hw >> 6;
1963     unsigned int bit = hw & 63;
1964 
1965     /*
1966      * Don't map irq if it is reserved for GPIO.
1967      * (Line 7 are the GPIO lines.)
1968      */
1969     if (line == 7)
1970         return 0;
1971 
1972     if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
1973         return -EINVAL;
1974 
1975     if (octeon_irq_ciu2_is_edge(line, bit))
1976         octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1977                        &octeon_irq_chip_ciu2_edge,
1978                        handle_edge_irq);
1979     else
1980         octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1981                        &octeon_irq_chip_ciu2,
1982                        handle_level_irq);
1983 
1984     return 0;
1985 }
1986 
1987 static const struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
1988     .map = octeon_irq_ciu2_map,
1989     .unmap = octeon_irq_free_cd,
1990     .xlate = octeon_irq_ciu2_xlat,
1991 };
1992 
1993 static void octeon_irq_ciu2(void)
1994 {
1995     int line;
1996     int bit;
1997     int irq;
1998     u64 src_reg, src, sum;
1999     const unsigned long core_id = cvmx_get_core_num();
2000 
2001     sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful;
2002 
2003     if (unlikely(!sum))
2004         goto spurious;
2005 
2006     line = fls64(sum) - 1;
2007     src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line);
2008     src = cvmx_read_csr(src_reg);
2009 
2010     if (unlikely(!src))
2011         goto spurious;
2012 
2013     bit = fls64(src) - 1;
2014     irq = octeon_irq_ciu_to_irq[line][bit];
2015     if (unlikely(!irq))
2016         goto spurious;
2017 
2018     do_IRQ(irq);
2019     goto out;
2020 
2021 spurious:
2022     spurious_interrupt();
2023 out:
2024     /* CN68XX pass 1.x has an errata that accessing the ACK registers
2025         can stop interrupts from propagating */
2026     if (OCTEON_IS_MODEL(OCTEON_CN68XX))
2027         cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
2028     else
2029         cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id));
2030     return;
2031 }
2032 
2033 static void octeon_irq_ciu2_mbox(void)
2034 {
2035     int line;
2036 
2037     const unsigned long core_id = cvmx_get_core_num();
2038     u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60;
2039 
2040     if (unlikely(!sum))
2041         goto spurious;
2042 
2043     line = fls64(sum) - 1;
2044 
2045     do_IRQ(OCTEON_IRQ_MBOX0 + line);
2046     goto out;
2047 
2048 spurious:
2049     spurious_interrupt();
2050 out:
2051     /* CN68XX pass 1.x has an errata that accessing the ACK registers
2052         can stop interrupts from propagating */
2053     if (OCTEON_IS_MODEL(OCTEON_CN68XX))
2054         cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
2055     else
2056         cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id));
2057     return;
2058 }
2059 
2060 static int __init octeon_irq_init_ciu2(
2061     struct device_node *ciu_node, struct device_node *parent)
2062 {
2063     unsigned int i, r;
2064     struct irq_domain *ciu_domain = NULL;
2065 
2066     octeon_irq_init_ciu2_percpu();
2067     octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
2068 
2069     octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio;
2070     octeon_irq_ip2 = octeon_irq_ciu2;
2071     octeon_irq_ip3 = octeon_irq_ciu2_mbox;
2072     octeon_irq_ip4 = octeon_irq_ip4_mask;
2073 
2074     /* Mips internal */
2075     octeon_irq_init_core();
2076 
2077     ciu_domain = irq_domain_add_tree(
2078         ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
2079     irq_set_default_host(ciu_domain);
2080 
2081     /* CUI2 */
2082     for (i = 0; i < 64; i++) {
2083         r = octeon_irq_force_ciu_mapping(
2084             ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
2085         if (r)
2086             goto err;
2087     }
2088 
2089     for (i = 0; i < 32; i++) {
2090         r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
2091             &octeon_irq_chip_ciu2_wd, handle_level_irq);
2092         if (r)
2093             goto err;
2094     }
2095 
2096     for (i = 0; i < 4; i++) {
2097         r = octeon_irq_force_ciu_mapping(
2098             ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
2099         if (r)
2100             goto err;
2101     }
2102 
2103     for (i = 0; i < 4; i++) {
2104         r = octeon_irq_force_ciu_mapping(
2105             ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
2106         if (r)
2107             goto err;
2108     }
2109 
2110     for (i = 0; i < 4; i++) {
2111         r = octeon_irq_force_ciu_mapping(
2112             ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
2113         if (r)
2114             goto err;
2115     }
2116 
2117     irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
2118     irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
2119     irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
2120     irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
2121 
2122     /* Enable the CIU lines */
2123     set_c0_status(STATUSF_IP3 | STATUSF_IP2);
2124     clear_c0_status(STATUSF_IP4);
2125     return 0;
2126 err:
2127     return r;
2128 }
2129 
2130 struct octeon_irq_cib_host_data {
2131     raw_spinlock_t lock;
2132     u64 raw_reg;
2133     u64 en_reg;
2134     int max_bits;
2135 };
2136 
2137 struct octeon_irq_cib_chip_data {
2138     struct octeon_irq_cib_host_data *host_data;
2139     int bit;
2140 };
2141 
2142 static void octeon_irq_cib_enable(struct irq_data *data)
2143 {
2144     unsigned long flags;
2145     u64 en;
2146     struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
2147     struct octeon_irq_cib_host_data *host_data = cd->host_data;
2148 
2149     raw_spin_lock_irqsave(&host_data->lock, flags);
2150     en = cvmx_read_csr(host_data->en_reg);
2151     en |= 1ull << cd->bit;
2152     cvmx_write_csr(host_data->en_reg, en);
2153     raw_spin_unlock_irqrestore(&host_data->lock, flags);
2154 }
2155 
2156 static void octeon_irq_cib_disable(struct irq_data *data)
2157 {
2158     unsigned long flags;
2159     u64 en;
2160     struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
2161     struct octeon_irq_cib_host_data *host_data = cd->host_data;
2162 
2163     raw_spin_lock_irqsave(&host_data->lock, flags);
2164     en = cvmx_read_csr(host_data->en_reg);
2165     en &= ~(1ull << cd->bit);
2166     cvmx_write_csr(host_data->en_reg, en);
2167     raw_spin_unlock_irqrestore(&host_data->lock, flags);
2168 }
2169 
2170 static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
2171 {
2172     irqd_set_trigger_type(data, t);
2173     return IRQ_SET_MASK_OK;
2174 }
2175 
2176 static struct irq_chip octeon_irq_chip_cib = {
2177     .name = "CIB",
2178     .irq_enable = octeon_irq_cib_enable,
2179     .irq_disable = octeon_irq_cib_disable,
2180     .irq_mask = octeon_irq_cib_disable,
2181     .irq_unmask = octeon_irq_cib_enable,
2182     .irq_set_type = octeon_irq_cib_set_type,
2183 };
2184 
2185 static int octeon_irq_cib_xlat(struct irq_domain *d,
2186                    struct device_node *node,
2187                    const u32 *intspec,
2188                    unsigned int intsize,
2189                    unsigned long *out_hwirq,
2190                    unsigned int *out_type)
2191 {
2192     unsigned int type = 0;
2193 
2194     if (intsize == 2)
2195         type = intspec[1];
2196 
2197     switch (type) {
2198     case 0: /* unofficial value, but we might as well let it work. */
2199     case 4: /* official value for level triggering. */
2200         *out_type = IRQ_TYPE_LEVEL_HIGH;
2201         break;
2202     case 1: /* official value for edge triggering. */
2203         *out_type = IRQ_TYPE_EDGE_RISING;
2204         break;
2205     default: /* Nothing else is acceptable. */
2206         return -EINVAL;
2207     }
2208 
2209     *out_hwirq = intspec[0];
2210 
2211     return 0;
2212 }
2213 
2214 static int octeon_irq_cib_map(struct irq_domain *d,
2215                   unsigned int virq, irq_hw_number_t hw)
2216 {
2217     struct octeon_irq_cib_host_data *host_data = d->host_data;
2218     struct octeon_irq_cib_chip_data *cd;
2219 
2220     if (hw >= host_data->max_bits) {
2221         pr_err("ERROR: %s mapping %u is too big!\n",
2222                irq_domain_get_of_node(d)->name, (unsigned)hw);
2223         return -EINVAL;
2224     }
2225 
2226     cd = kzalloc(sizeof(*cd), GFP_KERNEL);
2227     if (!cd)
2228         return -ENOMEM;
2229 
2230     cd->host_data = host_data;
2231     cd->bit = hw;
2232 
2233     irq_set_chip_and_handler(virq, &octeon_irq_chip_cib,
2234                  handle_simple_irq);
2235     irq_set_chip_data(virq, cd);
2236     return 0;
2237 }
2238 
2239 static const struct irq_domain_ops octeon_irq_domain_cib_ops = {
2240     .map = octeon_irq_cib_map,
2241     .unmap = octeon_irq_free_cd,
2242     .xlate = octeon_irq_cib_xlat,
2243 };
2244 
2245 /* Chain to real handler. */
2246 static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
2247 {
2248     u64 en;
2249     u64 raw;
2250     u64 bits;
2251     int i;
2252     int irq;
2253     struct irq_domain *cib_domain = data;
2254     struct octeon_irq_cib_host_data *host_data = cib_domain->host_data;
2255 
2256     en = cvmx_read_csr(host_data->en_reg);
2257     raw = cvmx_read_csr(host_data->raw_reg);
2258 
2259     bits = en & raw;
2260 
2261     for (i = 0; i < host_data->max_bits; i++) {
2262         if ((bits & 1ull << i) == 0)
2263             continue;
2264         irq = irq_find_mapping(cib_domain, i);
2265         if (!irq) {
2266             unsigned long flags;
2267 
2268             pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
2269                 i, host_data->raw_reg);
2270             raw_spin_lock_irqsave(&host_data->lock, flags);
2271             en = cvmx_read_csr(host_data->en_reg);
2272             en &= ~(1ull << i);
2273             cvmx_write_csr(host_data->en_reg, en);
2274             cvmx_write_csr(host_data->raw_reg, 1ull << i);
2275             raw_spin_unlock_irqrestore(&host_data->lock, flags);
2276         } else {
2277             struct irq_desc *desc = irq_to_desc(irq);
2278             struct irq_data *irq_data = irq_desc_get_irq_data(desc);
2279             /* If edge, acknowledge the bit we will be sending. */
2280             if (irqd_get_trigger_type(irq_data) &
2281                 IRQ_TYPE_EDGE_BOTH)
2282                 cvmx_write_csr(host_data->raw_reg, 1ull << i);
2283             generic_handle_irq_desc(desc);
2284         }
2285     }
2286 
2287     return IRQ_HANDLED;
2288 }
2289 
2290 static int __init octeon_irq_init_cib(struct device_node *ciu_node,
2291                       struct device_node *parent)
2292 {
2293     const __be32 *addr;
2294     u32 val;
2295     struct octeon_irq_cib_host_data *host_data;
2296     int parent_irq;
2297     int r;
2298     struct irq_domain *cib_domain;
2299 
2300     parent_irq = irq_of_parse_and_map(ciu_node, 0);
2301     if (!parent_irq) {
2302         pr_err("ERROR: Couldn't acquire parent_irq for %pOFn\n",
2303             ciu_node);
2304         return -EINVAL;
2305     }
2306 
2307     host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
2308     if (!host_data)
2309         return -ENOMEM;
2310     raw_spin_lock_init(&host_data->lock);
2311 
2312     addr = of_get_address(ciu_node, 0, NULL, NULL);
2313     if (!addr) {
2314         pr_err("ERROR: Couldn't acquire reg(0) %pOFn\n", ciu_node);
2315         return -EINVAL;
2316     }
2317     host_data->raw_reg = (u64)phys_to_virt(
2318         of_translate_address(ciu_node, addr));
2319 
2320     addr = of_get_address(ciu_node, 1, NULL, NULL);
2321     if (!addr) {
2322         pr_err("ERROR: Couldn't acquire reg(1) %pOFn\n", ciu_node);
2323         return -EINVAL;
2324     }
2325     host_data->en_reg = (u64)phys_to_virt(
2326         of_translate_address(ciu_node, addr));
2327 
2328     r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
2329     if (r) {
2330         pr_err("ERROR: Couldn't read cavium,max-bits from %pOFn\n",
2331             ciu_node);
2332         return r;
2333     }
2334     host_data->max_bits = val;
2335 
2336     cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
2337                        &octeon_irq_domain_cib_ops,
2338                        host_data);
2339     if (!cib_domain) {
2340         pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
2341         return -ENOMEM;
2342     }
2343 
2344     cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */
2345     cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */
2346 
2347     r = request_irq(parent_irq, octeon_irq_cib_handler,
2348             IRQF_NO_THREAD, "cib", cib_domain);
2349     if (r) {
2350         pr_err("request_irq cib failed %d\n", r);
2351         return r;
2352     }
2353     pr_info("CIB interrupt controller probed: %llx %d\n",
2354         host_data->raw_reg, host_data->max_bits);
2355     return 0;
2356 }
2357 
2358 int octeon_irq_ciu3_xlat(struct irq_domain *d,
2359              struct device_node *node,
2360              const u32 *intspec,
2361              unsigned int intsize,
2362              unsigned long *out_hwirq,
2363              unsigned int *out_type)
2364 {
2365     struct octeon_ciu3_info *ciu3_info = d->host_data;
2366     unsigned int hwirq, type, intsn_major;
2367     union cvmx_ciu3_iscx_ctl isc;
2368 
2369     if (intsize < 2)
2370         return -EINVAL;
2371     hwirq = intspec[0];
2372     type = intspec[1];
2373 
2374     if (hwirq >= (1 << 20))
2375         return -EINVAL;
2376 
2377     intsn_major = hwirq >> 12;
2378     switch (intsn_major) {
2379     case 0x04: /* Software handled separately. */
2380         return -EINVAL;
2381     default:
2382         break;
2383     }
2384 
2385     isc.u64 =  cvmx_read_csr(ciu3_info->ciu3_addr + CIU3_ISC_CTL(hwirq));
2386     if (!isc.s.imp)
2387         return -EINVAL;
2388 
2389     switch (type) {
2390     case 4: /* official value for level triggering. */
2391         *out_type = IRQ_TYPE_LEVEL_HIGH;
2392         break;
2393     case 0: /* unofficial value, but we might as well let it work. */
2394     case 1: /* official value for edge triggering. */
2395         *out_type = IRQ_TYPE_EDGE_RISING;
2396         break;
2397     default: /* Nothing else is acceptable. */
2398         return -EINVAL;
2399     }
2400 
2401     *out_hwirq = hwirq;
2402 
2403     return 0;
2404 }
2405 
2406 void octeon_irq_ciu3_enable(struct irq_data *data)
2407 {
2408     int cpu;
2409     union cvmx_ciu3_iscx_ctl isc_ctl;
2410     union cvmx_ciu3_iscx_w1c isc_w1c;
2411     u64 isc_ctl_addr;
2412 
2413     struct octeon_ciu_chip_data *cd;
2414 
2415     cpu = next_cpu_for_irq(data);
2416 
2417     cd = irq_data_get_irq_chip_data(data);
2418 
2419     isc_w1c.u64 = 0;
2420     isc_w1c.s.en = 1;
2421     cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
2422 
2423     isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
2424     isc_ctl.u64 = 0;
2425     isc_ctl.s.en = 1;
2426     isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
2427     cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
2428     cvmx_read_csr(isc_ctl_addr);
2429 }
2430 
2431 void octeon_irq_ciu3_disable(struct irq_data *data)
2432 {
2433     u64 isc_ctl_addr;
2434     union cvmx_ciu3_iscx_w1c isc_w1c;
2435 
2436     struct octeon_ciu_chip_data *cd;
2437 
2438     cd = irq_data_get_irq_chip_data(data);
2439 
2440     isc_w1c.u64 = 0;
2441     isc_w1c.s.en = 1;
2442 
2443     isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
2444     cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
2445     cvmx_write_csr(isc_ctl_addr, 0);
2446     cvmx_read_csr(isc_ctl_addr);
2447 }
2448 
2449 void octeon_irq_ciu3_ack(struct irq_data *data)
2450 {
2451     u64 isc_w1c_addr;
2452     union cvmx_ciu3_iscx_w1c isc_w1c;
2453     struct octeon_ciu_chip_data *cd;
2454     u32 trigger_type = irqd_get_trigger_type(data);
2455 
2456     /*
2457      * We use a single irq_chip, so we have to do nothing to ack a
2458      * level interrupt.
2459      */
2460     if (!(trigger_type & IRQ_TYPE_EDGE_BOTH))
2461         return;
2462 
2463     cd = irq_data_get_irq_chip_data(data);
2464 
2465     isc_w1c.u64 = 0;
2466     isc_w1c.s.raw = 1;
2467 
2468     isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
2469     cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
2470     cvmx_read_csr(isc_w1c_addr);
2471 }
2472 
2473 void octeon_irq_ciu3_mask(struct irq_data *data)
2474 {
2475     union cvmx_ciu3_iscx_w1c isc_w1c;
2476     u64 isc_w1c_addr;
2477     struct octeon_ciu_chip_data *cd;
2478 
2479     cd = irq_data_get_irq_chip_data(data);
2480 
2481     isc_w1c.u64 = 0;
2482     isc_w1c.s.en = 1;
2483 
2484     isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
2485     cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
2486     cvmx_read_csr(isc_w1c_addr);
2487 }
2488 
2489 void octeon_irq_ciu3_mask_ack(struct irq_data *data)
2490 {
2491     union cvmx_ciu3_iscx_w1c isc_w1c;
2492     u64 isc_w1c_addr;
2493     struct octeon_ciu_chip_data *cd;
2494     u32 trigger_type = irqd_get_trigger_type(data);
2495 
2496     cd = irq_data_get_irq_chip_data(data);
2497 
2498     isc_w1c.u64 = 0;
2499     isc_w1c.s.en = 1;
2500 
2501     /*
2502      * We use a single irq_chip, so only ack an edge (!level)
2503      * interrupt.
2504      */
2505     if (trigger_type & IRQ_TYPE_EDGE_BOTH)
2506         isc_w1c.s.raw = 1;
2507 
2508     isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
2509     cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
2510     cvmx_read_csr(isc_w1c_addr);
2511 }
2512 
2513 #ifdef CONFIG_SMP
2514 static int octeon_irq_ciu3_set_affinity(struct irq_data *data,
2515                     const struct cpumask *dest, bool force)
2516 {
2517     union cvmx_ciu3_iscx_ctl isc_ctl;
2518     union cvmx_ciu3_iscx_w1c isc_w1c;
2519     u64 isc_ctl_addr;
2520     int cpu;
2521     bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
2522     struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
2523 
2524     if (!cpumask_subset(dest, cpumask_of_node(cd->ciu_node)))
2525         return -EINVAL;
2526 
2527     if (!enable_one)
2528         return IRQ_SET_MASK_OK;
2529 
2530     cd = irq_data_get_irq_chip_data(data);
2531     cpu = cpumask_first(dest);
2532     if (cpu >= nr_cpu_ids)
2533         cpu = smp_processor_id();
2534     cd->current_cpu = cpu;
2535 
2536     isc_w1c.u64 = 0;
2537     isc_w1c.s.en = 1;
2538     cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
2539 
2540     isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
2541     isc_ctl.u64 = 0;
2542     isc_ctl.s.en = 1;
2543     isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
2544     cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
2545     cvmx_read_csr(isc_ctl_addr);
2546 
2547     return IRQ_SET_MASK_OK;
2548 }
2549 #endif
2550 
2551 static struct irq_chip octeon_irq_chip_ciu3 = {
2552     .name = "CIU3",
2553     .irq_startup = edge_startup,
2554     .irq_enable = octeon_irq_ciu3_enable,
2555     .irq_disable = octeon_irq_ciu3_disable,
2556     .irq_ack = octeon_irq_ciu3_ack,
2557     .irq_mask = octeon_irq_ciu3_mask,
2558     .irq_mask_ack = octeon_irq_ciu3_mask_ack,
2559     .irq_unmask = octeon_irq_ciu3_enable,
2560     .irq_set_type = octeon_irq_ciu_set_type,
2561 #ifdef CONFIG_SMP
2562     .irq_set_affinity = octeon_irq_ciu3_set_affinity,
2563     .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
2564 #endif
2565 };
2566 
2567 int octeon_irq_ciu3_mapx(struct irq_domain *d, unsigned int virq,
2568              irq_hw_number_t hw, struct irq_chip *chip)
2569 {
2570     struct octeon_ciu3_info *ciu3_info = d->host_data;
2571     struct octeon_ciu_chip_data *cd = kzalloc_node(sizeof(*cd), GFP_KERNEL,
2572                                ciu3_info->node);
2573     if (!cd)
2574         return -ENOMEM;
2575     cd->intsn = hw;
2576     cd->current_cpu = -1;
2577     cd->ciu3_addr = ciu3_info->ciu3_addr;
2578     cd->ciu_node = ciu3_info->node;
2579     irq_set_chip_and_handler(virq, chip, handle_edge_irq);
2580     irq_set_chip_data(virq, cd);
2581 
2582     return 0;
2583 }
2584 
2585 static int octeon_irq_ciu3_map(struct irq_domain *d,
2586                    unsigned int virq, irq_hw_number_t hw)
2587 {
2588     return octeon_irq_ciu3_mapx(d, virq, hw, &octeon_irq_chip_ciu3);
2589 }
2590 
2591 static const struct irq_domain_ops octeon_dflt_domain_ciu3_ops = {
2592     .map = octeon_irq_ciu3_map,
2593     .unmap = octeon_irq_free_cd,
2594     .xlate = octeon_irq_ciu3_xlat,
2595 };
2596 
2597 static void octeon_irq_ciu3_ip2(void)
2598 {
2599     union cvmx_ciu3_destx_pp_int dest_pp_int;
2600     struct octeon_ciu3_info *ciu3_info;
2601     u64 ciu3_addr;
2602 
2603     ciu3_info = __this_cpu_read(octeon_ciu3_info);
2604     ciu3_addr = ciu3_info->ciu3_addr;
2605 
2606     dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(3 * cvmx_get_local_core_num()));
2607 
2608     if (likely(dest_pp_int.s.intr)) {
2609         irq_hw_number_t intsn = dest_pp_int.s.intsn;
2610         irq_hw_number_t hw;
2611         struct irq_domain *domain;
2612         /* Get the domain to use from the major block */
2613         int block = intsn >> 12;
2614         int ret;
2615 
2616         domain = ciu3_info->domain[block];
2617         if (ciu3_info->intsn2hw[block])
2618             hw = ciu3_info->intsn2hw[block](domain, intsn);
2619         else
2620             hw = intsn;
2621 
2622         irq_enter();
2623         ret = generic_handle_domain_irq(domain, hw);
2624         irq_exit();
2625 
2626         if (ret < 0) {
2627             union cvmx_ciu3_iscx_w1c isc_w1c;
2628             u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
2629 
2630             isc_w1c.u64 = 0;
2631             isc_w1c.s.en = 1;
2632             cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
2633             cvmx_read_csr(isc_w1c_addr);
2634             spurious_interrupt();
2635         }
2636     } else {
2637         spurious_interrupt();
2638     }
2639 }
2640 
2641 /*
2642  * 10 mbox per core starting from zero.
2643  * Base mbox is core * 10
2644  */
2645 static unsigned int octeon_irq_ciu3_base_mbox_intsn(int core)
2646 {
2647     /* SW (mbox) are 0x04 in bits 12..19 */
2648     return 0x04000 + CIU3_MBOX_PER_CORE * core;
2649 }
2650 
2651 static unsigned int octeon_irq_ciu3_mbox_intsn_for_core(int core, unsigned int mbox)
2652 {
2653     return octeon_irq_ciu3_base_mbox_intsn(core) + mbox;
2654 }
2655 
2656 static unsigned int octeon_irq_ciu3_mbox_intsn_for_cpu(int cpu, unsigned int mbox)
2657 {
2658     int local_core = octeon_coreid_for_cpu(cpu) & 0x3f;
2659 
2660     return octeon_irq_ciu3_mbox_intsn_for_core(local_core, mbox);
2661 }
2662 
2663 static void octeon_irq_ciu3_mbox(void)
2664 {
2665     union cvmx_ciu3_destx_pp_int dest_pp_int;
2666     struct octeon_ciu3_info *ciu3_info;
2667     u64 ciu3_addr;
2668     int core = cvmx_get_local_core_num();
2669 
2670     ciu3_info = __this_cpu_read(octeon_ciu3_info);
2671     ciu3_addr = ciu3_info->ciu3_addr;
2672 
2673     dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(1 + 3 * core));
2674 
2675     if (likely(dest_pp_int.s.intr)) {
2676         irq_hw_number_t intsn = dest_pp_int.s.intsn;
2677         int mbox = intsn - octeon_irq_ciu3_base_mbox_intsn(core);
2678 
2679         if (likely(mbox >= 0 && mbox < CIU3_MBOX_PER_CORE)) {
2680             do_IRQ(mbox + OCTEON_IRQ_MBOX0);
2681         } else {
2682             union cvmx_ciu3_iscx_w1c isc_w1c;
2683             u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
2684 
2685             isc_w1c.u64 = 0;
2686             isc_w1c.s.en = 1;
2687             cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
2688             cvmx_read_csr(isc_w1c_addr);
2689             spurious_interrupt();
2690         }
2691     } else {
2692         spurious_interrupt();
2693     }
2694 }
2695 
2696 void octeon_ciu3_mbox_send(int cpu, unsigned int mbox)
2697 {
2698     struct octeon_ciu3_info *ciu3_info;
2699     unsigned int intsn;
2700     union cvmx_ciu3_iscx_w1s isc_w1s;
2701     u64 isc_w1s_addr;
2702 
2703     if (WARN_ON_ONCE(mbox >= CIU3_MBOX_PER_CORE))
2704         return;
2705 
2706     intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
2707     ciu3_info = per_cpu(octeon_ciu3_info, cpu);
2708     isc_w1s_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1S(intsn);
2709 
2710     isc_w1s.u64 = 0;
2711     isc_w1s.s.raw = 1;
2712 
2713     cvmx_write_csr(isc_w1s_addr, isc_w1s.u64);
2714     cvmx_read_csr(isc_w1s_addr);
2715 }
2716 
2717 static void octeon_irq_ciu3_mbox_set_enable(struct irq_data *data, int cpu, bool en)
2718 {
2719     struct octeon_ciu3_info *ciu3_info;
2720     unsigned int intsn;
2721     u64 isc_ctl_addr, isc_w1c_addr;
2722     union cvmx_ciu3_iscx_ctl isc_ctl;
2723     unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
2724 
2725     intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
2726     ciu3_info = per_cpu(octeon_ciu3_info, cpu);
2727     isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
2728     isc_ctl_addr = ciu3_info->ciu3_addr + CIU3_ISC_CTL(intsn);
2729 
2730     isc_ctl.u64 = 0;
2731     isc_ctl.s.en = 1;
2732 
2733     cvmx_write_csr(isc_w1c_addr, isc_ctl.u64);
2734     cvmx_write_csr(isc_ctl_addr, 0);
2735     if (en) {
2736         unsigned int idt = per_cpu(octeon_irq_ciu3_idt_ip3, cpu);
2737 
2738         isc_ctl.u64 = 0;
2739         isc_ctl.s.en = 1;
2740         isc_ctl.s.idt = idt;
2741         cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
2742     }
2743     cvmx_read_csr(isc_ctl_addr);
2744 }
2745 
2746 static void octeon_irq_ciu3_mbox_enable(struct irq_data *data)
2747 {
2748     int cpu;
2749     unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
2750 
2751     WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
2752 
2753     for_each_online_cpu(cpu)
2754         octeon_irq_ciu3_mbox_set_enable(data, cpu, true);
2755 }
2756 
2757 static void octeon_irq_ciu3_mbox_disable(struct irq_data *data)
2758 {
2759     int cpu;
2760     unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
2761 
2762     WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
2763 
2764     for_each_online_cpu(cpu)
2765         octeon_irq_ciu3_mbox_set_enable(data, cpu, false);
2766 }
2767 
2768 static void octeon_irq_ciu3_mbox_ack(struct irq_data *data)
2769 {
2770     struct octeon_ciu3_info *ciu3_info;
2771     unsigned int intsn;
2772     u64 isc_w1c_addr;
2773     union cvmx_ciu3_iscx_w1c isc_w1c;
2774     unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
2775 
2776     intsn = octeon_irq_ciu3_mbox_intsn_for_core(cvmx_get_local_core_num(), mbox);
2777 
2778     isc_w1c.u64 = 0;
2779     isc_w1c.s.raw = 1;
2780 
2781     ciu3_info = __this_cpu_read(octeon_ciu3_info);
2782     isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
2783     cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
2784     cvmx_read_csr(isc_w1c_addr);
2785 }
2786 
2787 static void octeon_irq_ciu3_mbox_cpu_online(struct irq_data *data)
2788 {
2789     octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), true);
2790 }
2791 
2792 static void octeon_irq_ciu3_mbox_cpu_offline(struct irq_data *data)
2793 {
2794     octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), false);
2795 }
2796 
2797 static int octeon_irq_ciu3_alloc_resources(struct octeon_ciu3_info *ciu3_info)
2798 {
2799     u64 b = ciu3_info->ciu3_addr;
2800     int idt_ip2, idt_ip3, idt_ip4;
2801     int unused_idt2;
2802     int core = cvmx_get_local_core_num();
2803     int i;
2804 
2805     __this_cpu_write(octeon_ciu3_info, ciu3_info);
2806 
2807     /*
2808      * 4 idt per core starting from 1 because zero is reserved.
2809      * Base idt per core is 4 * core + 1
2810      */
2811     idt_ip2 = core * 4 + 1;
2812     idt_ip3 = core * 4 + 2;
2813     idt_ip4 = core * 4 + 3;
2814     unused_idt2 = core * 4 + 4;
2815     __this_cpu_write(octeon_irq_ciu3_idt_ip2, idt_ip2);
2816     __this_cpu_write(octeon_irq_ciu3_idt_ip3, idt_ip3);
2817 
2818     /* ip2 interrupts for this CPU */
2819     cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip2), 0);
2820     cvmx_write_csr(b + CIU3_IDT_PP(idt_ip2, 0), 1ull << core);
2821     cvmx_write_csr(b + CIU3_IDT_IO(idt_ip2), 0);
2822 
2823     /* ip3 interrupts for this CPU */
2824     cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip3), 1);
2825     cvmx_write_csr(b + CIU3_IDT_PP(idt_ip3, 0), 1ull << core);
2826     cvmx_write_csr(b + CIU3_IDT_IO(idt_ip3), 0);
2827 
2828     /* ip4 interrupts for this CPU */
2829     cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip4), 2);
2830     cvmx_write_csr(b + CIU3_IDT_PP(idt_ip4, 0), 0);
2831     cvmx_write_csr(b + CIU3_IDT_IO(idt_ip4), 0);
2832 
2833     cvmx_write_csr(b + CIU3_IDT_CTL(unused_idt2), 0);
2834     cvmx_write_csr(b + CIU3_IDT_PP(unused_idt2, 0), 0);
2835     cvmx_write_csr(b + CIU3_IDT_IO(unused_idt2), 0);
2836 
2837     for (i = 0; i < CIU3_MBOX_PER_CORE; i++) {
2838         unsigned int intsn = octeon_irq_ciu3_mbox_intsn_for_core(core, i);
2839 
2840         cvmx_write_csr(b + CIU3_ISC_W1C(intsn), 2);
2841         cvmx_write_csr(b + CIU3_ISC_CTL(intsn), 0);
2842     }
2843 
2844     return 0;
2845 }
2846 
2847 static void octeon_irq_setup_secondary_ciu3(void)
2848 {
2849     struct octeon_ciu3_info *ciu3_info;
2850 
2851     ciu3_info = octeon_ciu3_info_per_node[cvmx_get_node_num()];
2852     octeon_irq_ciu3_alloc_resources(ciu3_info);
2853     irq_cpu_online();
2854 
2855     /* Enable the CIU lines */
2856     set_c0_status(STATUSF_IP3 | STATUSF_IP2);
2857     if (octeon_irq_use_ip4)
2858         set_c0_status(STATUSF_IP4);
2859     else
2860         clear_c0_status(STATUSF_IP4);
2861 }
2862 
2863 static struct irq_chip octeon_irq_chip_ciu3_mbox = {
2864     .name = "CIU3-M",
2865     .irq_enable = octeon_irq_ciu3_mbox_enable,
2866     .irq_disable = octeon_irq_ciu3_mbox_disable,
2867     .irq_ack = octeon_irq_ciu3_mbox_ack,
2868 
2869     .irq_cpu_online = octeon_irq_ciu3_mbox_cpu_online,
2870     .irq_cpu_offline = octeon_irq_ciu3_mbox_cpu_offline,
2871     .flags = IRQCHIP_ONOFFLINE_ENABLED,
2872 };
2873 
2874 static int __init octeon_irq_init_ciu3(struct device_node *ciu_node,
2875                        struct device_node *parent)
2876 {
2877     int i;
2878     int node;
2879     struct irq_domain *domain;
2880     struct octeon_ciu3_info *ciu3_info;
2881     const __be32 *zero_addr;
2882     u64 base_addr;
2883     union cvmx_ciu3_const consts;
2884 
2885     node = 0; /* of_node_to_nid(ciu_node); */
2886     ciu3_info = kzalloc_node(sizeof(*ciu3_info), GFP_KERNEL, node);
2887 
2888     if (!ciu3_info)
2889         return -ENOMEM;
2890 
2891     zero_addr = of_get_address(ciu_node, 0, NULL, NULL);
2892     if (WARN_ON(!zero_addr))
2893         return -EINVAL;
2894 
2895     base_addr = of_translate_address(ciu_node, zero_addr);
2896     base_addr = (u64)phys_to_virt(base_addr);
2897 
2898     ciu3_info->ciu3_addr = base_addr;
2899     ciu3_info->node = node;
2900 
2901     consts.u64 = cvmx_read_csr(base_addr + CIU3_CONST);
2902 
2903     octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu3;
2904 
2905     octeon_irq_ip2 = octeon_irq_ciu3_ip2;
2906     octeon_irq_ip3 = octeon_irq_ciu3_mbox;
2907     octeon_irq_ip4 = octeon_irq_ip4_mask;
2908 
2909     if (node == cvmx_get_node_num()) {
2910         /* Mips internal */
2911         octeon_irq_init_core();
2912 
2913         /* Only do per CPU things if it is the CIU of the boot node. */
2914         i = irq_alloc_descs_from(OCTEON_IRQ_MBOX0, 8, node);
2915         WARN_ON(i < 0);
2916 
2917         for (i = 0; i < 8; i++)
2918             irq_set_chip_and_handler(i + OCTEON_IRQ_MBOX0,
2919                          &octeon_irq_chip_ciu3_mbox, handle_percpu_irq);
2920     }
2921 
2922     /*
2923      * Initialize all domains to use the default domain. Specific major
2924      * blocks will overwrite the default domain as needed.
2925      */
2926     domain = irq_domain_add_tree(ciu_node, &octeon_dflt_domain_ciu3_ops,
2927                      ciu3_info);
2928     for (i = 0; i < MAX_CIU3_DOMAINS; i++)
2929         ciu3_info->domain[i] = domain;
2930 
2931     octeon_ciu3_info_per_node[node] = ciu3_info;
2932 
2933     if (node == cvmx_get_node_num()) {
2934         /* Only do per CPU things if it is the CIU of the boot node. */
2935         octeon_irq_ciu3_alloc_resources(ciu3_info);
2936         if (node == 0)
2937             irq_set_default_host(domain);
2938 
2939         octeon_irq_use_ip4 = false;
2940         /* Enable the CIU lines */
2941         set_c0_status(STATUSF_IP2 | STATUSF_IP3);
2942         clear_c0_status(STATUSF_IP4);
2943     }
2944 
2945     return 0;
2946 }
2947 
2948 static struct of_device_id ciu_types[] __initdata = {
2949     {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
2950     {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
2951     {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
2952     {.compatible = "cavium,octeon-7890-ciu3", .data = octeon_irq_init_ciu3},
2953     {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
2954     {}
2955 };
2956 
2957 void __init arch_init_irq(void)
2958 {
2959 #ifdef CONFIG_SMP
2960     /* Set the default affinity to the boot cpu. */
2961     cpumask_clear(irq_default_affinity);
2962     cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
2963 #endif
2964     of_irq_init(ciu_types);
2965 }
2966 
2967 asmlinkage void plat_irq_dispatch(void)
2968 {
2969     unsigned long cop0_cause;
2970     unsigned long cop0_status;
2971 
2972     while (1) {
2973         cop0_cause = read_c0_cause();
2974         cop0_status = read_c0_status();
2975         cop0_cause &= cop0_status;
2976         cop0_cause &= ST0_IM;
2977 
2978         if (cop0_cause & STATUSF_IP2)
2979             octeon_irq_ip2();
2980         else if (cop0_cause & STATUSF_IP3)
2981             octeon_irq_ip3();
2982         else if (cop0_cause & STATUSF_IP4)
2983             octeon_irq_ip4();
2984         else if (cop0_cause)
2985             do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
2986         else
2987             break;
2988     }
2989 }
2990 
2991 #ifdef CONFIG_HOTPLUG_CPU
2992 
2993 void octeon_fixup_irqs(void)
2994 {
2995     irq_cpu_offline();
2996 }
2997 
2998 #endif /* CONFIG_HOTPLUG_CPU */
2999 
3000 struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block)
3001 {
3002     struct octeon_ciu3_info *ciu3_info;
3003 
3004     ciu3_info = octeon_ciu3_info_per_node[node & CVMX_NODE_MASK];
3005     return ciu3_info->domain[block];
3006 }
3007 EXPORT_SYMBOL(octeon_irq_get_block_domain);