0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/kernel.h>
0010 #include <linux/init.h>
0011 #include <linux/errno.h>
0012 #include <linux/reboot.h>
0013 #include <linux/slab.h>
0014 #include <linux/stddef.h>
0015 #include <linux/sched.h>
0016 #include <linux/signal.h>
0017 #include <linux/device.h>
0018 #include <linux/spinlock.h>
0019 #include <linux/irq.h>
0020 #include <linux/interrupt.h>
0021 #include <linux/kernel_stat.h>
0022 #include <linux/of.h>
0023 #include <linux/of_irq.h>
0024 #include <asm/irq.h>
0025 #include <asm/io.h>
0026 #include <asm/dcr.h>
0027
0028 #define NR_UIC_INTS 32
0029
0030 #define UIC_SR 0x0
0031 #define UIC_ER 0x2
0032 #define UIC_CR 0x3
0033 #define UIC_PR 0x4
0034 #define UIC_TR 0x5
0035 #define UIC_MSR 0x6
0036 #define UIC_VR 0x7
0037 #define UIC_VCR 0x8
0038
0039 struct uic *primary_uic;
0040
0041 struct uic {
0042 int index;
0043 int dcrbase;
0044
0045 raw_spinlock_t lock;
0046
0047
0048 struct irq_domain *irqhost;
0049 };
0050
0051 static void uic_unmask_irq(struct irq_data *d)
0052 {
0053 struct uic *uic = irq_data_get_irq_chip_data(d);
0054 unsigned int src = irqd_to_hwirq(d);
0055 unsigned long flags;
0056 u32 er, sr;
0057
0058 sr = 1 << (31-src);
0059 raw_spin_lock_irqsave(&uic->lock, flags);
0060
0061 if (irqd_is_level_type(d))
0062 mtdcr(uic->dcrbase + UIC_SR, sr);
0063 er = mfdcr(uic->dcrbase + UIC_ER);
0064 er |= sr;
0065 mtdcr(uic->dcrbase + UIC_ER, er);
0066 raw_spin_unlock_irqrestore(&uic->lock, flags);
0067 }
0068
0069 static void uic_mask_irq(struct irq_data *d)
0070 {
0071 struct uic *uic = irq_data_get_irq_chip_data(d);
0072 unsigned int src = irqd_to_hwirq(d);
0073 unsigned long flags;
0074 u32 er;
0075
0076 raw_spin_lock_irqsave(&uic->lock, flags);
0077 er = mfdcr(uic->dcrbase + UIC_ER);
0078 er &= ~(1 << (31 - src));
0079 mtdcr(uic->dcrbase + UIC_ER, er);
0080 raw_spin_unlock_irqrestore(&uic->lock, flags);
0081 }
0082
0083 static void uic_ack_irq(struct irq_data *d)
0084 {
0085 struct uic *uic = irq_data_get_irq_chip_data(d);
0086 unsigned int src = irqd_to_hwirq(d);
0087 unsigned long flags;
0088
0089 raw_spin_lock_irqsave(&uic->lock, flags);
0090 mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src));
0091 raw_spin_unlock_irqrestore(&uic->lock, flags);
0092 }
0093
0094 static void uic_mask_ack_irq(struct irq_data *d)
0095 {
0096 struct uic *uic = irq_data_get_irq_chip_data(d);
0097 unsigned int src = irqd_to_hwirq(d);
0098 unsigned long flags;
0099 u32 er, sr;
0100
0101 sr = 1 << (31-src);
0102 raw_spin_lock_irqsave(&uic->lock, flags);
0103 er = mfdcr(uic->dcrbase + UIC_ER);
0104 er &= ~sr;
0105 mtdcr(uic->dcrbase + UIC_ER, er);
0106
0107
0108
0109
0110
0111
0112
0113
0114 if (!irqd_is_level_type(d))
0115 mtdcr(uic->dcrbase + UIC_SR, sr);
0116 raw_spin_unlock_irqrestore(&uic->lock, flags);
0117 }
0118
0119 static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
0120 {
0121 struct uic *uic = irq_data_get_irq_chip_data(d);
0122 unsigned int src = irqd_to_hwirq(d);
0123 unsigned long flags;
0124 int trigger, polarity;
0125 u32 tr, pr, mask;
0126
0127 switch (flow_type & IRQ_TYPE_SENSE_MASK) {
0128 case IRQ_TYPE_NONE:
0129 uic_mask_irq(d);
0130 return 0;
0131
0132 case IRQ_TYPE_EDGE_RISING:
0133 trigger = 1; polarity = 1;
0134 break;
0135 case IRQ_TYPE_EDGE_FALLING:
0136 trigger = 1; polarity = 0;
0137 break;
0138 case IRQ_TYPE_LEVEL_HIGH:
0139 trigger = 0; polarity = 1;
0140 break;
0141 case IRQ_TYPE_LEVEL_LOW:
0142 trigger = 0; polarity = 0;
0143 break;
0144 default:
0145 return -EINVAL;
0146 }
0147
0148 mask = ~(1 << (31 - src));
0149
0150 raw_spin_lock_irqsave(&uic->lock, flags);
0151 tr = mfdcr(uic->dcrbase + UIC_TR);
0152 pr = mfdcr(uic->dcrbase + UIC_PR);
0153 tr = (tr & mask) | (trigger << (31-src));
0154 pr = (pr & mask) | (polarity << (31-src));
0155
0156 mtdcr(uic->dcrbase + UIC_PR, pr);
0157 mtdcr(uic->dcrbase + UIC_TR, tr);
0158 mtdcr(uic->dcrbase + UIC_SR, ~mask);
0159
0160 raw_spin_unlock_irqrestore(&uic->lock, flags);
0161
0162 return 0;
0163 }
0164
0165 static struct irq_chip uic_irq_chip = {
0166 .name = "UIC",
0167 .irq_unmask = uic_unmask_irq,
0168 .irq_mask = uic_mask_irq,
0169 .irq_mask_ack = uic_mask_ack_irq,
0170 .irq_ack = uic_ack_irq,
0171 .irq_set_type = uic_set_irq_type,
0172 };
0173
0174 static int uic_host_map(struct irq_domain *h, unsigned int virq,
0175 irq_hw_number_t hw)
0176 {
0177 struct uic *uic = h->host_data;
0178
0179 irq_set_chip_data(virq, uic);
0180
0181
0182 irq_set_chip_and_handler(virq, &uic_irq_chip, handle_level_irq);
0183
0184
0185 irq_set_irq_type(virq, IRQ_TYPE_NONE);
0186
0187 return 0;
0188 }
0189
0190 static const struct irq_domain_ops uic_host_ops = {
0191 .map = uic_host_map,
0192 .xlate = irq_domain_xlate_twocell,
0193 };
0194
0195 static void uic_irq_cascade(struct irq_desc *desc)
0196 {
0197 struct irq_chip *chip = irq_desc_get_chip(desc);
0198 struct irq_data *idata = irq_desc_get_irq_data(desc);
0199 struct uic *uic = irq_desc_get_handler_data(desc);
0200 u32 msr;
0201 int src;
0202
0203 raw_spin_lock(&desc->lock);
0204 if (irqd_is_level_type(idata))
0205 chip->irq_mask(idata);
0206 else
0207 chip->irq_mask_ack(idata);
0208 raw_spin_unlock(&desc->lock);
0209
0210 msr = mfdcr(uic->dcrbase + UIC_MSR);
0211 if (!msr)
0212 goto uic_irq_ret;
0213
0214 src = 32 - ffs(msr);
0215
0216 generic_handle_domain_irq(uic->irqhost, src);
0217
0218 uic_irq_ret:
0219 raw_spin_lock(&desc->lock);
0220 if (irqd_is_level_type(idata))
0221 chip->irq_ack(idata);
0222 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
0223 chip->irq_unmask(idata);
0224 raw_spin_unlock(&desc->lock);
0225 }
0226
0227 static struct uic * __init uic_init_one(struct device_node *node)
0228 {
0229 struct uic *uic;
0230 const u32 *indexp, *dcrreg;
0231 int len;
0232
0233 BUG_ON(! of_device_is_compatible(node, "ibm,uic"));
0234
0235 uic = kzalloc(sizeof(*uic), GFP_KERNEL);
0236 if (! uic)
0237 return NULL;
0238
0239 raw_spin_lock_init(&uic->lock);
0240 indexp = of_get_property(node, "cell-index", &len);
0241 if (!indexp || (len != sizeof(u32))) {
0242 printk(KERN_ERR "uic: Device node %pOF has missing or invalid "
0243 "cell-index property\n", node);
0244 return NULL;
0245 }
0246 uic->index = *indexp;
0247
0248 dcrreg = of_get_property(node, "dcr-reg", &len);
0249 if (!dcrreg || (len != 2*sizeof(u32))) {
0250 printk(KERN_ERR "uic: Device node %pOF has missing or invalid "
0251 "dcr-reg property\n", node);
0252 return NULL;
0253 }
0254 uic->dcrbase = *dcrreg;
0255
0256 uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops,
0257 uic);
0258 if (! uic->irqhost)
0259 return NULL;
0260
0261
0262 mtdcr(uic->dcrbase + UIC_ER, 0);
0263 mtdcr(uic->dcrbase + UIC_CR, 0);
0264 mtdcr(uic->dcrbase + UIC_TR, 0);
0265
0266 mtdcr(uic->dcrbase + UIC_SR, 0xffffffff);
0267
0268 printk ("UIC%d (%d IRQ sources) at DCR 0x%x\n", uic->index,
0269 NR_UIC_INTS, uic->dcrbase);
0270
0271 return uic;
0272 }
0273
0274 void __init uic_init_tree(void)
0275 {
0276 struct device_node *np;
0277 struct uic *uic;
0278 const u32 *interrupts;
0279
0280
0281 for_each_compatible_node(np, NULL, "ibm,uic") {
0282 interrupts = of_get_property(np, "interrupts", NULL);
0283 if (!interrupts)
0284 break;
0285 }
0286
0287 BUG_ON(!np);
0288
0289 primary_uic = uic_init_one(np);
0290 if (!primary_uic)
0291 panic("Unable to initialize primary UIC %pOF\n", np);
0292
0293 irq_set_default_host(primary_uic->irqhost);
0294 of_node_put(np);
0295
0296
0297 for_each_compatible_node(np, NULL, "ibm,uic") {
0298 interrupts = of_get_property(np, "interrupts", NULL);
0299 if (interrupts) {
0300
0301 int cascade_virq;
0302
0303 uic = uic_init_one(np);
0304 if (! uic)
0305 panic("Unable to initialize a secondary UIC %pOF\n",
0306 np);
0307
0308 cascade_virq = irq_of_parse_and_map(np, 0);
0309
0310 irq_set_handler_data(cascade_virq, uic);
0311 irq_set_chained_handler(cascade_virq, uic_irq_cascade);
0312
0313
0314 }
0315 }
0316 }
0317
0318
0319 unsigned int uic_get_irq(void)
0320 {
0321 u32 msr;
0322 int src;
0323
0324 BUG_ON(! primary_uic);
0325
0326 msr = mfdcr(primary_uic->dcrbase + UIC_MSR);
0327 src = 32 - ffs(msr);
0328
0329 return irq_linear_revmap(primary_uic->irqhost, src);
0330 }