0001
0002
0003
0004
0005 #undef DEBUG
0006
0007 #include <linux/ioport.h>
0008 #include <linux/interrupt.h>
0009 #include <linux/irqdomain.h>
0010 #include <linux/kernel.h>
0011 #include <linux/delay.h>
0012 #include <asm/io.h>
0013 #include <asm/i8259.h>
0014
0015 static volatile void __iomem *pci_intack;
0016
0017 static unsigned char cached_8259[2] = { 0xff, 0xff };
0018 #define cached_A1 (cached_8259[0])
0019 #define cached_21 (cached_8259[1])
0020
0021 static DEFINE_RAW_SPINLOCK(i8259_lock);
0022
0023 static struct irq_domain *i8259_host;
0024
0025
0026
0027
0028
0029
0030
0031 unsigned int i8259_irq(void)
0032 {
0033 int irq;
0034 int lock = 0;
0035
0036
0037 if (pci_intack)
0038 irq = readb(pci_intack);
0039 else {
0040 raw_spin_lock(&i8259_lock);
0041 lock = 1;
0042
0043
0044 outb(0x0C, 0x20);
0045 irq = inb(0x20) & 7;
0046 if (irq == 2 ) {
0047
0048
0049
0050
0051 outb(0x0C, 0xA0);
0052 irq = (inb(0xA0) & 7) + 8;
0053 }
0054 }
0055
0056 if (irq == 7) {
0057
0058
0059
0060
0061
0062
0063
0064 if (!pci_intack)
0065 outb(0x0B, 0x20);
0066 if(~inb(0x20) & 0x80)
0067 irq = 0;
0068 } else if (irq == 0xff)
0069 irq = 0;
0070
0071 if (lock)
0072 raw_spin_unlock(&i8259_lock);
0073 return irq;
0074 }
0075
0076 static void i8259_mask_and_ack_irq(struct irq_data *d)
0077 {
0078 unsigned long flags;
0079
0080 raw_spin_lock_irqsave(&i8259_lock, flags);
0081 if (d->irq > 7) {
0082 cached_A1 |= 1 << (d->irq-8);
0083 inb(0xA1);
0084 outb(cached_A1, 0xA1);
0085 outb(0x20, 0xA0);
0086 outb(0x20, 0x20);
0087 } else {
0088 cached_21 |= 1 << d->irq;
0089 inb(0x21);
0090 outb(cached_21, 0x21);
0091 outb(0x20, 0x20);
0092 }
0093 raw_spin_unlock_irqrestore(&i8259_lock, flags);
0094 }
0095
0096 static void i8259_set_irq_mask(int irq_nr)
0097 {
0098 outb(cached_A1,0xA1);
0099 outb(cached_21,0x21);
0100 }
0101
0102 static void i8259_mask_irq(struct irq_data *d)
0103 {
0104 unsigned long flags;
0105
0106 pr_debug("i8259_mask_irq(%d)\n", d->irq);
0107
0108 raw_spin_lock_irqsave(&i8259_lock, flags);
0109 if (d->irq < 8)
0110 cached_21 |= 1 << d->irq;
0111 else
0112 cached_A1 |= 1 << (d->irq-8);
0113 i8259_set_irq_mask(d->irq);
0114 raw_spin_unlock_irqrestore(&i8259_lock, flags);
0115 }
0116
0117 static void i8259_unmask_irq(struct irq_data *d)
0118 {
0119 unsigned long flags;
0120
0121 pr_debug("i8259_unmask_irq(%d)\n", d->irq);
0122
0123 raw_spin_lock_irqsave(&i8259_lock, flags);
0124 if (d->irq < 8)
0125 cached_21 &= ~(1 << d->irq);
0126 else
0127 cached_A1 &= ~(1 << (d->irq-8));
0128 i8259_set_irq_mask(d->irq);
0129 raw_spin_unlock_irqrestore(&i8259_lock, flags);
0130 }
0131
0132 static struct irq_chip i8259_pic = {
0133 .name = "i8259",
0134 .irq_mask = i8259_mask_irq,
0135 .irq_disable = i8259_mask_irq,
0136 .irq_unmask = i8259_unmask_irq,
0137 .irq_mask_ack = i8259_mask_and_ack_irq,
0138 };
0139
0140 static struct resource pic1_iores = {
0141 .name = "8259 (master)",
0142 .start = 0x20,
0143 .end = 0x21,
0144 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
0145 };
0146
0147 static struct resource pic2_iores = {
0148 .name = "8259 (slave)",
0149 .start = 0xa0,
0150 .end = 0xa1,
0151 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
0152 };
0153
0154 static struct resource pic_edgectrl_iores = {
0155 .name = "8259 edge control",
0156 .start = 0x4d0,
0157 .end = 0x4d1,
0158 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
0159 };
0160
0161 static int i8259_host_match(struct irq_domain *h, struct device_node *node,
0162 enum irq_domain_bus_token bus_token)
0163 {
0164 struct device_node *of_node = irq_domain_get_of_node(h);
0165 return of_node == NULL || of_node == node;
0166 }
0167
0168 static int i8259_host_map(struct irq_domain *h, unsigned int virq,
0169 irq_hw_number_t hw)
0170 {
0171 pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw);
0172
0173
0174 if (hw == 2)
0175 irq_set_status_flags(virq, IRQ_NOREQUEST);
0176
0177
0178
0179
0180 irq_set_status_flags(virq, IRQ_LEVEL);
0181 irq_set_chip_and_handler(virq, &i8259_pic, handle_level_irq);
0182 return 0;
0183 }
0184
0185 static int i8259_host_xlate(struct irq_domain *h, struct device_node *ct,
0186 const u32 *intspec, unsigned int intsize,
0187 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
0188 {
0189 static unsigned char map_isa_senses[4] = {
0190 IRQ_TYPE_LEVEL_LOW,
0191 IRQ_TYPE_LEVEL_HIGH,
0192 IRQ_TYPE_EDGE_FALLING,
0193 IRQ_TYPE_EDGE_RISING,
0194 };
0195
0196 *out_hwirq = intspec[0];
0197 if (intsize > 1 && intspec[1] < 4)
0198 *out_flags = map_isa_senses[intspec[1]];
0199 else
0200 *out_flags = IRQ_TYPE_NONE;
0201
0202 return 0;
0203 }
0204
0205 static const struct irq_domain_ops i8259_host_ops = {
0206 .match = i8259_host_match,
0207 .map = i8259_host_map,
0208 .xlate = i8259_host_xlate,
0209 };
0210
0211 struct irq_domain *__init i8259_get_host(void)
0212 {
0213 return i8259_host;
0214 }
0215
0216
0217
0218
0219
0220
0221
0222
0223 void i8259_init(struct device_node *node, unsigned long intack_addr)
0224 {
0225 unsigned long flags;
0226
0227
0228 raw_spin_lock_irqsave(&i8259_lock, flags);
0229
0230
0231 outb(0xff, 0xA1);
0232 outb(0xff, 0x21);
0233
0234
0235 outb(0x11, 0x20);
0236 outb(0x00, 0x21);
0237 outb(0x04, 0x21);
0238 outb(0x01, 0x21);
0239
0240
0241 outb(0x11, 0xA0);
0242 outb(0x08, 0xA1);
0243 outb(0x02, 0xA1);
0244 outb(0x01, 0xA1);
0245
0246
0247 udelay(100);
0248
0249
0250 outb(0x0B, 0x20);
0251 outb(0x0B, 0xA0);
0252
0253
0254 cached_21 &= ~(1 << 2);
0255
0256
0257 outb(cached_A1, 0xA1);
0258 outb(cached_21, 0x21);
0259
0260 raw_spin_unlock_irqrestore(&i8259_lock, flags);
0261
0262
0263 i8259_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0,
0264 &i8259_host_ops, NULL);
0265 if (i8259_host == NULL) {
0266 printk(KERN_ERR "i8259: failed to allocate irq host !\n");
0267 return;
0268 }
0269
0270
0271
0272
0273
0274
0275 request_resource(&ioport_resource, &pic1_iores);
0276 request_resource(&ioport_resource, &pic2_iores);
0277 request_resource(&ioport_resource, &pic_edgectrl_iores);
0278
0279 if (intack_addr != 0)
0280 pci_intack = ioremap(intack_addr, 1);
0281
0282 printk(KERN_INFO "i8259 legacy interrupt controller initialized\n");
0283 }