0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #include <linux/mm.h>
0035 #include <linux/interrupt.h>
0036 #include <linux/irq.h>
0037 #include <linux/init.h>
0038 #include <linux/delay.h>
0039 #include <linux/sched.h>
0040 #include <linux/pci.h>
0041 #include <linux/mc146818rtc.h>
0042 #include <linux/compiler.h>
0043 #include <linux/acpi.h>
0044 #include <linux/export.h>
0045 #include <linux/syscore_ops.h>
0046 #include <linux/freezer.h>
0047 #include <linux/kthread.h>
0048 #include <linux/jiffies.h> /* time_after() */
0049 #include <linux/slab.h>
0050 #include <linux/memblock.h>
0051 #include <linux/msi.h>
0052
0053 #include <asm/irqdomain.h>
0054 #include <asm/io.h>
0055 #include <asm/smp.h>
0056 #include <asm/cpu.h>
0057 #include <asm/desc.h>
0058 #include <asm/proto.h>
0059 #include <asm/acpi.h>
0060 #include <asm/dma.h>
0061 #include <asm/timer.h>
0062 #include <asm/time.h>
0063 #include <asm/i8259.h>
0064 #include <asm/setup.h>
0065 #include <asm/irq_remapping.h>
0066 #include <asm/hw_irq.h>
0067 #include <asm/apic.h>
0068 #include <asm/pgtable.h>
0069
0070 #define for_each_ioapic(idx) \
0071 for ((idx) = 0; (idx) < nr_ioapics; (idx)++)
0072 #define for_each_ioapic_reverse(idx) \
0073 for ((idx) = nr_ioapics - 1; (idx) >= 0; (idx)--)
0074 #define for_each_pin(idx, pin) \
0075 for ((pin) = 0; (pin) < ioapics[(idx)].nr_registers; (pin)++)
0076 #define for_each_ioapic_pin(idx, pin) \
0077 for_each_ioapic((idx)) \
0078 for_each_pin((idx), (pin))
0079 #define for_each_irq_pin(entry, head) \
0080 list_for_each_entry(entry, &head, list)
0081
0082 static DEFINE_RAW_SPINLOCK(ioapic_lock);
0083 static DEFINE_MUTEX(ioapic_mutex);
0084 static unsigned int ioapic_dynirq_base;
0085 static int ioapic_initialized;
0086
0087 struct irq_pin_list {
0088 struct list_head list;
0089 int apic, pin;
0090 };
0091
0092 struct mp_chip_data {
0093 struct list_head irq_2_pin;
0094 struct IO_APIC_route_entry entry;
0095 bool is_level;
0096 bool active_low;
0097 bool isa_irq;
0098 u32 count;
0099 };
0100
0101 struct mp_ioapic_gsi {
0102 u32 gsi_base;
0103 u32 gsi_end;
0104 };
0105
0106 static struct ioapic {
0107
0108
0109
0110 int nr_registers;
0111
0112
0113
0114 struct IO_APIC_route_entry *saved_registers;
0115
0116 struct mpc_ioapic mp_config;
0117
0118 struct mp_ioapic_gsi gsi_config;
0119 struct ioapic_domain_cfg irqdomain_cfg;
0120 struct irq_domain *irqdomain;
0121 struct resource *iomem_res;
0122 } ioapics[MAX_IO_APICS];
0123
0124 #define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver
0125
0126 int mpc_ioapic_id(int ioapic_idx)
0127 {
0128 return ioapics[ioapic_idx].mp_config.apicid;
0129 }
0130
0131 unsigned int mpc_ioapic_addr(int ioapic_idx)
0132 {
0133 return ioapics[ioapic_idx].mp_config.apicaddr;
0134 }
0135
0136 static inline struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic_idx)
0137 {
0138 return &ioapics[ioapic_idx].gsi_config;
0139 }
0140
0141 static inline int mp_ioapic_pin_count(int ioapic)
0142 {
0143 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic);
0144
0145 return gsi_cfg->gsi_end - gsi_cfg->gsi_base + 1;
0146 }
0147
0148 static inline u32 mp_pin_to_gsi(int ioapic, int pin)
0149 {
0150 return mp_ioapic_gsi_routing(ioapic)->gsi_base + pin;
0151 }
0152
0153 static inline bool mp_is_legacy_irq(int irq)
0154 {
0155 return irq >= 0 && irq < nr_legacy_irqs();
0156 }
0157
0158 static inline struct irq_domain *mp_ioapic_irqdomain(int ioapic)
0159 {
0160 return ioapics[ioapic].irqdomain;
0161 }
0162
0163 int nr_ioapics;
0164
0165
0166 u32 gsi_top;
0167
0168
0169 struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
0170
0171
0172 int mp_irq_entries;
0173
0174 #ifdef CONFIG_EISA
0175 int mp_bus_id_to_type[MAX_MP_BUSSES];
0176 #endif
0177
0178 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
0179
0180 int skip_ioapic_setup;
0181
0182
0183
0184
0185 void disable_ioapic_support(void)
0186 {
0187 #ifdef CONFIG_PCI
0188 noioapicquirk = 1;
0189 noioapicreroute = -1;
0190 #endif
0191 skip_ioapic_setup = 1;
0192 }
0193
0194 static int __init parse_noapic(char *str)
0195 {
0196
0197 disable_ioapic_support();
0198 return 0;
0199 }
0200 early_param("noapic", parse_noapic);
0201
0202
0203 void mp_save_irq(struct mpc_intsrc *m)
0204 {
0205 int i;
0206
0207 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
0208 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
0209 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus,
0210 m->srcbusirq, m->dstapic, m->dstirq);
0211
0212 for (i = 0; i < mp_irq_entries; i++) {
0213 if (!memcmp(&mp_irqs[i], m, sizeof(*m)))
0214 return;
0215 }
0216
0217 memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m));
0218 if (++mp_irq_entries == MAX_IRQ_SOURCES)
0219 panic("Max # of irq sources exceeded!!\n");
0220 }
0221
0222 static void alloc_ioapic_saved_registers(int idx)
0223 {
0224 size_t size;
0225
0226 if (ioapics[idx].saved_registers)
0227 return;
0228
0229 size = sizeof(struct IO_APIC_route_entry) * ioapics[idx].nr_registers;
0230 ioapics[idx].saved_registers = kzalloc(size, GFP_KERNEL);
0231 if (!ioapics[idx].saved_registers)
0232 pr_err("IOAPIC %d: suspend/resume impossible!\n", idx);
0233 }
0234
0235 static void free_ioapic_saved_registers(int idx)
0236 {
0237 kfree(ioapics[idx].saved_registers);
0238 ioapics[idx].saved_registers = NULL;
0239 }
0240
0241 int __init arch_early_ioapic_init(void)
0242 {
0243 int i;
0244
0245 if (!nr_legacy_irqs())
0246 io_apic_irqs = ~0UL;
0247
0248 for_each_ioapic(i)
0249 alloc_ioapic_saved_registers(i);
0250
0251 return 0;
0252 }
0253
0254 struct io_apic {
0255 unsigned int index;
0256 unsigned int unused[3];
0257 unsigned int data;
0258 unsigned int unused2[11];
0259 unsigned int eoi;
0260 };
0261
0262 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
0263 {
0264 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
0265 + (mpc_ioapic_addr(idx) & ~PAGE_MASK);
0266 }
0267
0268 static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
0269 {
0270 struct io_apic __iomem *io_apic = io_apic_base(apic);
0271 writel(vector, &io_apic->eoi);
0272 }
0273
0274 unsigned int native_io_apic_read(unsigned int apic, unsigned int reg)
0275 {
0276 struct io_apic __iomem *io_apic = io_apic_base(apic);
0277 writel(reg, &io_apic->index);
0278 return readl(&io_apic->data);
0279 }
0280
0281 static void io_apic_write(unsigned int apic, unsigned int reg,
0282 unsigned int value)
0283 {
0284 struct io_apic __iomem *io_apic = io_apic_base(apic);
0285
0286 writel(reg, &io_apic->index);
0287 writel(value, &io_apic->data);
0288 }
0289
0290 static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin)
0291 {
0292 struct IO_APIC_route_entry entry;
0293
0294 entry.w1 = io_apic_read(apic, 0x10 + 2 * pin);
0295 entry.w2 = io_apic_read(apic, 0x11 + 2 * pin);
0296
0297 return entry;
0298 }
0299
0300 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
0301 {
0302 struct IO_APIC_route_entry entry;
0303 unsigned long flags;
0304
0305 raw_spin_lock_irqsave(&ioapic_lock, flags);
0306 entry = __ioapic_read_entry(apic, pin);
0307 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
0308
0309 return entry;
0310 }
0311
0312
0313
0314
0315
0316
0317
0318 static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
0319 {
0320 io_apic_write(apic, 0x11 + 2*pin, e.w2);
0321 io_apic_write(apic, 0x10 + 2*pin, e.w1);
0322 }
0323
0324 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
0325 {
0326 unsigned long flags;
0327
0328 raw_spin_lock_irqsave(&ioapic_lock, flags);
0329 __ioapic_write_entry(apic, pin, e);
0330 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
0331 }
0332
0333
0334
0335
0336
0337
0338 static void ioapic_mask_entry(int apic, int pin)
0339 {
0340 struct IO_APIC_route_entry e = { .masked = true };
0341 unsigned long flags;
0342
0343 raw_spin_lock_irqsave(&ioapic_lock, flags);
0344 io_apic_write(apic, 0x10 + 2*pin, e.w1);
0345 io_apic_write(apic, 0x11 + 2*pin, e.w2);
0346 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
0347 }
0348
0349
0350
0351
0352
0353
0354 static int __add_pin_to_irq_node(struct mp_chip_data *data,
0355 int node, int apic, int pin)
0356 {
0357 struct irq_pin_list *entry;
0358
0359
0360 for_each_irq_pin(entry, data->irq_2_pin)
0361 if (entry->apic == apic && entry->pin == pin)
0362 return 0;
0363
0364 entry = kzalloc_node(sizeof(struct irq_pin_list), GFP_ATOMIC, node);
0365 if (!entry) {
0366 pr_err("can not alloc irq_pin_list (%d,%d,%d)\n",
0367 node, apic, pin);
0368 return -ENOMEM;
0369 }
0370 entry->apic = apic;
0371 entry->pin = pin;
0372 list_add_tail(&entry->list, &data->irq_2_pin);
0373
0374 return 0;
0375 }
0376
0377 static void __remove_pin_from_irq(struct mp_chip_data *data, int apic, int pin)
0378 {
0379 struct irq_pin_list *tmp, *entry;
0380
0381 list_for_each_entry_safe(entry, tmp, &data->irq_2_pin, list)
0382 if (entry->apic == apic && entry->pin == pin) {
0383 list_del(&entry->list);
0384 kfree(entry);
0385 return;
0386 }
0387 }
0388
0389 static void add_pin_to_irq_node(struct mp_chip_data *data,
0390 int node, int apic, int pin)
0391 {
0392 if (__add_pin_to_irq_node(data, node, apic, pin))
0393 panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
0394 }
0395
0396
0397
0398
0399 static void __init replace_pin_at_irq_node(struct mp_chip_data *data, int node,
0400 int oldapic, int oldpin,
0401 int newapic, int newpin)
0402 {
0403 struct irq_pin_list *entry;
0404
0405 for_each_irq_pin(entry, data->irq_2_pin) {
0406 if (entry->apic == oldapic && entry->pin == oldpin) {
0407 entry->apic = newapic;
0408 entry->pin = newpin;
0409
0410 return;
0411 }
0412 }
0413
0414
0415 add_pin_to_irq_node(data, node, newapic, newpin);
0416 }
0417
0418 static void io_apic_modify_irq(struct mp_chip_data *data, bool masked,
0419 void (*final)(struct irq_pin_list *entry))
0420 {
0421 struct irq_pin_list *entry;
0422
0423 data->entry.masked = masked;
0424
0425 for_each_irq_pin(entry, data->irq_2_pin) {
0426 io_apic_write(entry->apic, 0x10 + 2 * entry->pin, data->entry.w1);
0427 if (final)
0428 final(entry);
0429 }
0430 }
0431
0432 static void io_apic_sync(struct irq_pin_list *entry)
0433 {
0434
0435
0436
0437
0438 struct io_apic __iomem *io_apic;
0439
0440 io_apic = io_apic_base(entry->apic);
0441 readl(&io_apic->data);
0442 }
0443
0444 static void mask_ioapic_irq(struct irq_data *irq_data)
0445 {
0446 struct mp_chip_data *data = irq_data->chip_data;
0447 unsigned long flags;
0448
0449 raw_spin_lock_irqsave(&ioapic_lock, flags);
0450 io_apic_modify_irq(data, true, &io_apic_sync);
0451 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
0452 }
0453
0454 static void __unmask_ioapic(struct mp_chip_data *data)
0455 {
0456 io_apic_modify_irq(data, false, NULL);
0457 }
0458
0459 static void unmask_ioapic_irq(struct irq_data *irq_data)
0460 {
0461 struct mp_chip_data *data = irq_data->chip_data;
0462 unsigned long flags;
0463
0464 raw_spin_lock_irqsave(&ioapic_lock, flags);
0465 __unmask_ioapic(data);
0466 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
0467 }
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485 static void __eoi_ioapic_pin(int apic, int pin, int vector)
0486 {
0487 if (mpc_ioapic_ver(apic) >= 0x20) {
0488 io_apic_eoi(apic, vector);
0489 } else {
0490 struct IO_APIC_route_entry entry, entry1;
0491
0492 entry = entry1 = __ioapic_read_entry(apic, pin);
0493
0494
0495
0496
0497 entry1.masked = true;
0498 entry1.is_level = false;
0499
0500 __ioapic_write_entry(apic, pin, entry1);
0501
0502
0503
0504
0505 __ioapic_write_entry(apic, pin, entry);
0506 }
0507 }
0508
0509 static void eoi_ioapic_pin(int vector, struct mp_chip_data *data)
0510 {
0511 unsigned long flags;
0512 struct irq_pin_list *entry;
0513
0514 raw_spin_lock_irqsave(&ioapic_lock, flags);
0515 for_each_irq_pin(entry, data->irq_2_pin)
0516 __eoi_ioapic_pin(entry->apic, entry->pin, vector);
0517 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
0518 }
0519
0520 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
0521 {
0522 struct IO_APIC_route_entry entry;
0523
0524
0525 entry = ioapic_read_entry(apic, pin);
0526 if (entry.delivery_mode == APIC_DELIVERY_MODE_SMI)
0527 return;
0528
0529
0530
0531
0532
0533 if (!entry.masked) {
0534 entry.masked = true;
0535 ioapic_write_entry(apic, pin, entry);
0536 entry = ioapic_read_entry(apic, pin);
0537 }
0538
0539 if (entry.irr) {
0540 unsigned long flags;
0541
0542
0543
0544
0545
0546
0547 if (!entry.is_level) {
0548 entry.is_level = true;
0549 ioapic_write_entry(apic, pin, entry);
0550 }
0551 raw_spin_lock_irqsave(&ioapic_lock, flags);
0552 __eoi_ioapic_pin(apic, pin, entry.vector);
0553 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
0554 }
0555
0556
0557
0558
0559
0560 ioapic_mask_entry(apic, pin);
0561 entry = ioapic_read_entry(apic, pin);
0562 if (entry.irr)
0563 pr_err("Unable to reset IRR for apic: %d, pin :%d\n",
0564 mpc_ioapic_id(apic), pin);
0565 }
0566
0567 void clear_IO_APIC (void)
0568 {
0569 int apic, pin;
0570
0571 for_each_ioapic_pin(apic, pin)
0572 clear_IO_APIC_pin(apic, pin);
0573 }
0574
0575 #ifdef CONFIG_X86_32
0576
0577
0578
0579
0580
0581 #define MAX_PIRQS 8
0582 static int pirq_entries[MAX_PIRQS] = {
0583 [0 ... MAX_PIRQS - 1] = -1
0584 };
0585
0586 static int __init ioapic_pirq_setup(char *str)
0587 {
0588 int i, max;
0589 int ints[MAX_PIRQS+1];
0590
0591 get_options(str, ARRAY_SIZE(ints), ints);
0592
0593 apic_printk(APIC_VERBOSE, KERN_INFO
0594 "PIRQ redirection, working around broken MP-BIOS.\n");
0595 max = MAX_PIRQS;
0596 if (ints[0] < MAX_PIRQS)
0597 max = ints[0];
0598
0599 for (i = 0; i < max; i++) {
0600 apic_printk(APIC_VERBOSE, KERN_DEBUG
0601 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
0602
0603
0604
0605 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
0606 }
0607 return 1;
0608 }
0609
0610 __setup("pirq=", ioapic_pirq_setup);
0611 #endif
0612
0613
0614
0615
0616 int save_ioapic_entries(void)
0617 {
0618 int apic, pin;
0619 int err = 0;
0620
0621 for_each_ioapic(apic) {
0622 if (!ioapics[apic].saved_registers) {
0623 err = -ENOMEM;
0624 continue;
0625 }
0626
0627 for_each_pin(apic, pin)
0628 ioapics[apic].saved_registers[pin] =
0629 ioapic_read_entry(apic, pin);
0630 }
0631
0632 return err;
0633 }
0634
0635
0636
0637
0638 void mask_ioapic_entries(void)
0639 {
0640 int apic, pin;
0641
0642 for_each_ioapic(apic) {
0643 if (!ioapics[apic].saved_registers)
0644 continue;
0645
0646 for_each_pin(apic, pin) {
0647 struct IO_APIC_route_entry entry;
0648
0649 entry = ioapics[apic].saved_registers[pin];
0650 if (!entry.masked) {
0651 entry.masked = true;
0652 ioapic_write_entry(apic, pin, entry);
0653 }
0654 }
0655 }
0656 }
0657
0658
0659
0660
0661 int restore_ioapic_entries(void)
0662 {
0663 int apic, pin;
0664
0665 for_each_ioapic(apic) {
0666 if (!ioapics[apic].saved_registers)
0667 continue;
0668
0669 for_each_pin(apic, pin)
0670 ioapic_write_entry(apic, pin,
0671 ioapics[apic].saved_registers[pin]);
0672 }
0673 return 0;
0674 }
0675
0676
0677
0678
0679 static int find_irq_entry(int ioapic_idx, int pin, int type)
0680 {
0681 int i;
0682
0683 for (i = 0; i < mp_irq_entries; i++)
0684 if (mp_irqs[i].irqtype == type &&
0685 (mp_irqs[i].dstapic == mpc_ioapic_id(ioapic_idx) ||
0686 mp_irqs[i].dstapic == MP_APIC_ALL) &&
0687 mp_irqs[i].dstirq == pin)
0688 return i;
0689
0690 return -1;
0691 }
0692
0693
0694
0695
0696 static int __init find_isa_irq_pin(int irq, int type)
0697 {
0698 int i;
0699
0700 for (i = 0; i < mp_irq_entries; i++) {
0701 int lbus = mp_irqs[i].srcbus;
0702
0703 if (test_bit(lbus, mp_bus_not_pci) &&
0704 (mp_irqs[i].irqtype == type) &&
0705 (mp_irqs[i].srcbusirq == irq))
0706
0707 return mp_irqs[i].dstirq;
0708 }
0709 return -1;
0710 }
0711
0712 static int __init find_isa_irq_apic(int irq, int type)
0713 {
0714 int i;
0715
0716 for (i = 0; i < mp_irq_entries; i++) {
0717 int lbus = mp_irqs[i].srcbus;
0718
0719 if (test_bit(lbus, mp_bus_not_pci) &&
0720 (mp_irqs[i].irqtype == type) &&
0721 (mp_irqs[i].srcbusirq == irq))
0722 break;
0723 }
0724
0725 if (i < mp_irq_entries) {
0726 int ioapic_idx;
0727
0728 for_each_ioapic(ioapic_idx)
0729 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic)
0730 return ioapic_idx;
0731 }
0732
0733 return -1;
0734 }
0735
0736 static bool irq_active_low(int idx)
0737 {
0738 int bus = mp_irqs[idx].srcbus;
0739
0740
0741
0742
0743 switch (mp_irqs[idx].irqflag & MP_IRQPOL_MASK) {
0744 case MP_IRQPOL_DEFAULT:
0745
0746
0747
0748
0749 return !test_bit(bus, mp_bus_not_pci);
0750 case MP_IRQPOL_ACTIVE_HIGH:
0751 return false;
0752 case MP_IRQPOL_RESERVED:
0753 pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n");
0754 fallthrough;
0755 case MP_IRQPOL_ACTIVE_LOW:
0756 default:
0757 return true;
0758 }
0759 }
0760
0761 #ifdef CONFIG_EISA
0762
0763
0764
0765 static bool EISA_ELCR(unsigned int irq)
0766 {
0767 if (irq < nr_legacy_irqs()) {
0768 unsigned int port = PIC_ELCR1 + (irq >> 3);
0769 return (inb(port) >> (irq & 7)) & 1;
0770 }
0771 apic_printk(APIC_VERBOSE, KERN_INFO
0772 "Broken MPtable reports ISA irq %d\n", irq);
0773 return false;
0774 }
0775
0776
0777
0778
0779
0780
0781
0782 static bool eisa_irq_is_level(int idx, int bus, bool level)
0783 {
0784 switch (mp_bus_id_to_type[bus]) {
0785 case MP_BUS_PCI:
0786 case MP_BUS_ISA:
0787 return level;
0788 case MP_BUS_EISA:
0789 return EISA_ELCR(mp_irqs[idx].srcbusirq);
0790 }
0791 pr_warn("IOAPIC: Invalid srcbus: %d defaulting to level\n", bus);
0792 return true;
0793 }
0794 #else
0795 static inline int eisa_irq_is_level(int idx, int bus, bool level)
0796 {
0797 return level;
0798 }
0799 #endif
0800
0801 static bool irq_is_level(int idx)
0802 {
0803 int bus = mp_irqs[idx].srcbus;
0804 bool level;
0805
0806
0807
0808
0809 switch (mp_irqs[idx].irqflag & MP_IRQTRIG_MASK) {
0810 case MP_IRQTRIG_DEFAULT:
0811
0812
0813
0814
0815 level = !test_bit(bus, mp_bus_not_pci);
0816
0817 return eisa_irq_is_level(idx, bus, level);
0818 case MP_IRQTRIG_EDGE:
0819 return false;
0820 case MP_IRQTRIG_RESERVED:
0821 pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n");
0822 fallthrough;
0823 case MP_IRQTRIG_LEVEL:
0824 default:
0825 return true;
0826 }
0827 }
0828
0829 static int __acpi_get_override_irq(u32 gsi, bool *trigger, bool *polarity)
0830 {
0831 int ioapic, pin, idx;
0832
0833 if (skip_ioapic_setup)
0834 return -1;
0835
0836 ioapic = mp_find_ioapic(gsi);
0837 if (ioapic < 0)
0838 return -1;
0839
0840 pin = mp_find_ioapic_pin(ioapic, gsi);
0841 if (pin < 0)
0842 return -1;
0843
0844 idx = find_irq_entry(ioapic, pin, mp_INT);
0845 if (idx < 0)
0846 return -1;
0847
0848 *trigger = irq_is_level(idx);
0849 *polarity = irq_active_low(idx);
0850 return 0;
0851 }
0852
0853 #ifdef CONFIG_ACPI
0854 int acpi_get_override_irq(u32 gsi, int *is_level, int *active_low)
0855 {
0856 *is_level = *active_low = 0;
0857 return __acpi_get_override_irq(gsi, (bool *)is_level,
0858 (bool *)active_low);
0859 }
0860 #endif
0861
0862 void ioapic_set_alloc_attr(struct irq_alloc_info *info, int node,
0863 int trigger, int polarity)
0864 {
0865 init_irq_alloc_info(info, NULL);
0866 info->type = X86_IRQ_ALLOC_TYPE_IOAPIC;
0867 info->ioapic.node = node;
0868 info->ioapic.is_level = trigger;
0869 info->ioapic.active_low = polarity;
0870 info->ioapic.valid = 1;
0871 }
0872
0873 static void ioapic_copy_alloc_attr(struct irq_alloc_info *dst,
0874 struct irq_alloc_info *src,
0875 u32 gsi, int ioapic_idx, int pin)
0876 {
0877 bool level, pol_low;
0878
0879 copy_irq_alloc_info(dst, src);
0880 dst->type = X86_IRQ_ALLOC_TYPE_IOAPIC;
0881 dst->devid = mpc_ioapic_id(ioapic_idx);
0882 dst->ioapic.pin = pin;
0883 dst->ioapic.valid = 1;
0884 if (src && src->ioapic.valid) {
0885 dst->ioapic.node = src->ioapic.node;
0886 dst->ioapic.is_level = src->ioapic.is_level;
0887 dst->ioapic.active_low = src->ioapic.active_low;
0888 } else {
0889 dst->ioapic.node = NUMA_NO_NODE;
0890 if (__acpi_get_override_irq(gsi, &level, &pol_low) >= 0) {
0891 dst->ioapic.is_level = level;
0892 dst->ioapic.active_low = pol_low;
0893 } else {
0894
0895
0896
0897
0898 dst->ioapic.is_level = true;
0899 dst->ioapic.active_low = true;
0900 }
0901 }
0902 }
0903
0904 static int ioapic_alloc_attr_node(struct irq_alloc_info *info)
0905 {
0906 return (info && info->ioapic.valid) ? info->ioapic.node : NUMA_NO_NODE;
0907 }
0908
0909 static void mp_register_handler(unsigned int irq, bool level)
0910 {
0911 irq_flow_handler_t hdl;
0912 bool fasteoi;
0913
0914 if (level) {
0915 irq_set_status_flags(irq, IRQ_LEVEL);
0916 fasteoi = true;
0917 } else {
0918 irq_clear_status_flags(irq, IRQ_LEVEL);
0919 fasteoi = false;
0920 }
0921
0922 hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq;
0923 __irq_set_handler(irq, hdl, 0, fasteoi ? "fasteoi" : "edge");
0924 }
0925
0926 static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
0927 {
0928 struct mp_chip_data *data = irq_get_chip_data(irq);
0929
0930
0931
0932
0933
0934
0935 if (irq < nr_legacy_irqs() && data->count == 1) {
0936 if (info->ioapic.is_level != data->is_level)
0937 mp_register_handler(irq, info->ioapic.is_level);
0938 data->entry.is_level = data->is_level = info->ioapic.is_level;
0939 data->entry.active_low = data->active_low = info->ioapic.active_low;
0940 }
0941
0942 return data->is_level == info->ioapic.is_level &&
0943 data->active_low == info->ioapic.active_low;
0944 }
0945
0946 static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi,
0947 struct irq_alloc_info *info)
0948 {
0949 bool legacy = false;
0950 int irq = -1;
0951 int type = ioapics[ioapic].irqdomain_cfg.type;
0952
0953 switch (type) {
0954 case IOAPIC_DOMAIN_LEGACY:
0955
0956
0957
0958
0959 if (!ioapic_initialized || gsi >= nr_legacy_irqs())
0960 irq = gsi;
0961 legacy = mp_is_legacy_irq(irq);
0962 break;
0963 case IOAPIC_DOMAIN_STRICT:
0964 irq = gsi;
0965 break;
0966 case IOAPIC_DOMAIN_DYNAMIC:
0967 break;
0968 default:
0969 WARN(1, "ioapic: unknown irqdomain type %d\n", type);
0970 return -1;
0971 }
0972
0973 return __irq_domain_alloc_irqs(domain, irq, 1,
0974 ioapic_alloc_attr_node(info),
0975 info, legacy, NULL);
0976 }
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988 static int alloc_isa_irq_from_domain(struct irq_domain *domain,
0989 int irq, int ioapic, int pin,
0990 struct irq_alloc_info *info)
0991 {
0992 struct mp_chip_data *data;
0993 struct irq_data *irq_data = irq_get_irq_data(irq);
0994 int node = ioapic_alloc_attr_node(info);
0995
0996
0997
0998
0999
1000
1001 if (irq_data && irq_data->parent_data) {
1002 if (!mp_check_pin_attr(irq, info))
1003 return -EBUSY;
1004 if (__add_pin_to_irq_node(irq_data->chip_data, node, ioapic,
1005 info->ioapic.pin))
1006 return -ENOMEM;
1007 } else {
1008 info->flags |= X86_IRQ_ALLOC_LEGACY;
1009 irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true,
1010 NULL);
1011 if (irq >= 0) {
1012 irq_data = irq_domain_get_irq_data(domain, irq);
1013 data = irq_data->chip_data;
1014 data->isa_irq = true;
1015 }
1016 }
1017
1018 return irq;
1019 }
1020
1021 static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin,
1022 unsigned int flags, struct irq_alloc_info *info)
1023 {
1024 int irq;
1025 bool legacy = false;
1026 struct irq_alloc_info tmp;
1027 struct mp_chip_data *data;
1028 struct irq_domain *domain = mp_ioapic_irqdomain(ioapic);
1029
1030 if (!domain)
1031 return -ENOSYS;
1032
1033 if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) {
1034 irq = mp_irqs[idx].srcbusirq;
1035 legacy = mp_is_legacy_irq(irq);
1036
1037
1038
1039
1040
1041
1042
1043
1044 if (legacy && irq == PIC_CASCADE_IR)
1045 return -EINVAL;
1046 }
1047
1048 mutex_lock(&ioapic_mutex);
1049 if (!(flags & IOAPIC_MAP_ALLOC)) {
1050 if (!legacy) {
1051 irq = irq_find_mapping(domain, pin);
1052 if (irq == 0)
1053 irq = -ENOENT;
1054 }
1055 } else {
1056 ioapic_copy_alloc_attr(&tmp, info, gsi, ioapic, pin);
1057 if (legacy)
1058 irq = alloc_isa_irq_from_domain(domain, irq,
1059 ioapic, pin, &tmp);
1060 else if ((irq = irq_find_mapping(domain, pin)) == 0)
1061 irq = alloc_irq_from_domain(domain, ioapic, gsi, &tmp);
1062 else if (!mp_check_pin_attr(irq, &tmp))
1063 irq = -EBUSY;
1064 if (irq >= 0) {
1065 data = irq_get_chip_data(irq);
1066 data->count++;
1067 }
1068 }
1069 mutex_unlock(&ioapic_mutex);
1070
1071 return irq;
1072 }
1073
1074 static int pin_2_irq(int idx, int ioapic, int pin, unsigned int flags)
1075 {
1076 u32 gsi = mp_pin_to_gsi(ioapic, pin);
1077
1078
1079
1080
1081 if (mp_irqs[idx].dstirq != pin)
1082 pr_err("broken BIOS or MPTABLE parser, ayiee!!\n");
1083
1084 #ifdef CONFIG_X86_32
1085
1086
1087
1088 if ((pin >= 16) && (pin <= 23)) {
1089 if (pirq_entries[pin-16] != -1) {
1090 if (!pirq_entries[pin-16]) {
1091 apic_printk(APIC_VERBOSE, KERN_DEBUG
1092 "disabling PIRQ%d\n", pin-16);
1093 } else {
1094 int irq = pirq_entries[pin-16];
1095 apic_printk(APIC_VERBOSE, KERN_DEBUG
1096 "using PIRQ%d -> IRQ %d\n",
1097 pin-16, irq);
1098 return irq;
1099 }
1100 }
1101 }
1102 #endif
1103
1104 return mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags, NULL);
1105 }
1106
1107 int mp_map_gsi_to_irq(u32 gsi, unsigned int flags, struct irq_alloc_info *info)
1108 {
1109 int ioapic, pin, idx;
1110
1111 ioapic = mp_find_ioapic(gsi);
1112 if (ioapic < 0)
1113 return -ENODEV;
1114
1115 pin = mp_find_ioapic_pin(ioapic, gsi);
1116 idx = find_irq_entry(ioapic, pin, mp_INT);
1117 if ((flags & IOAPIC_MAP_CHECK) && idx < 0)
1118 return -ENODEV;
1119
1120 return mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags, info);
1121 }
1122
1123 void mp_unmap_irq(int irq)
1124 {
1125 struct irq_data *irq_data = irq_get_irq_data(irq);
1126 struct mp_chip_data *data;
1127
1128 if (!irq_data || !irq_data->domain)
1129 return;
1130
1131 data = irq_data->chip_data;
1132 if (!data || data->isa_irq)
1133 return;
1134
1135 mutex_lock(&ioapic_mutex);
1136 if (--data->count == 0)
1137 irq_domain_free_irqs(irq, 1);
1138 mutex_unlock(&ioapic_mutex);
1139 }
1140
1141
1142
1143
1144
1145 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
1146 {
1147 int irq, i, best_ioapic = -1, best_idx = -1;
1148
1149 apic_printk(APIC_DEBUG,
1150 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
1151 bus, slot, pin);
1152 if (test_bit(bus, mp_bus_not_pci)) {
1153 apic_printk(APIC_VERBOSE,
1154 "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
1155 return -1;
1156 }
1157
1158 for (i = 0; i < mp_irq_entries; i++) {
1159 int lbus = mp_irqs[i].srcbus;
1160 int ioapic_idx, found = 0;
1161
1162 if (bus != lbus || mp_irqs[i].irqtype != mp_INT ||
1163 slot != ((mp_irqs[i].srcbusirq >> 2) & 0x1f))
1164 continue;
1165
1166 for_each_ioapic(ioapic_idx)
1167 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic ||
1168 mp_irqs[i].dstapic == MP_APIC_ALL) {
1169 found = 1;
1170 break;
1171 }
1172 if (!found)
1173 continue;
1174
1175
1176 irq = pin_2_irq(i, ioapic_idx, mp_irqs[i].dstirq, 0);
1177 if (irq > 0 && !IO_APIC_IRQ(irq))
1178 continue;
1179
1180 if (pin == (mp_irqs[i].srcbusirq & 3)) {
1181 best_idx = i;
1182 best_ioapic = ioapic_idx;
1183 goto out;
1184 }
1185
1186
1187
1188
1189
1190 if (best_idx < 0) {
1191 best_idx = i;
1192 best_ioapic = ioapic_idx;
1193 }
1194 }
1195 if (best_idx < 0)
1196 return -1;
1197
1198 out:
1199 return pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq,
1200 IOAPIC_MAP_ALLOC);
1201 }
1202 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
1203
1204 static struct irq_chip ioapic_chip, ioapic_ir_chip;
1205
1206 static void __init setup_IO_APIC_irqs(void)
1207 {
1208 unsigned int ioapic, pin;
1209 int idx;
1210
1211 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1212
1213 for_each_ioapic_pin(ioapic, pin) {
1214 idx = find_irq_entry(ioapic, pin, mp_INT);
1215 if (idx < 0)
1216 apic_printk(APIC_VERBOSE,
1217 KERN_DEBUG " apic %d pin %d not connected\n",
1218 mpc_ioapic_id(ioapic), pin);
1219 else
1220 pin_2_irq(idx, ioapic, pin,
1221 ioapic ? 0 : IOAPIC_MAP_ALLOC);
1222 }
1223 }
1224
1225 void ioapic_zap_locks(void)
1226 {
1227 raw_spin_lock_init(&ioapic_lock);
1228 }
1229
1230 static void io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
1231 {
1232 struct IO_APIC_route_entry entry;
1233 char buf[256];
1234 int i;
1235
1236 printk(KERN_DEBUG "IOAPIC %d:\n", apic);
1237 for (i = 0; i <= nr_entries; i++) {
1238 entry = ioapic_read_entry(apic, i);
1239 snprintf(buf, sizeof(buf),
1240 " pin%02x, %s, %s, %s, V(%02X), IRR(%1d), S(%1d)",
1241 i,
1242 entry.masked ? "disabled" : "enabled ",
1243 entry.is_level ? "level" : "edge ",
1244 entry.active_low ? "low " : "high",
1245 entry.vector, entry.irr, entry.delivery_status);
1246 if (entry.ir_format) {
1247 printk(KERN_DEBUG "%s, remapped, I(%04X), Z(%X)\n",
1248 buf,
1249 (entry.ir_index_15 << 15) | entry.ir_index_0_14,
1250 entry.ir_zero);
1251 } else {
1252 printk(KERN_DEBUG "%s, %s, D(%02X%02X), M(%1d)\n", buf,
1253 entry.dest_mode_logical ? "logical " : "physical",
1254 entry.virt_destid_8_14, entry.destid_0_7,
1255 entry.delivery_mode);
1256 }
1257 }
1258 }
1259
1260 static void __init print_IO_APIC(int ioapic_idx)
1261 {
1262 union IO_APIC_reg_00 reg_00;
1263 union IO_APIC_reg_01 reg_01;
1264 union IO_APIC_reg_02 reg_02;
1265 union IO_APIC_reg_03 reg_03;
1266 unsigned long flags;
1267
1268 raw_spin_lock_irqsave(&ioapic_lock, flags);
1269 reg_00.raw = io_apic_read(ioapic_idx, 0);
1270 reg_01.raw = io_apic_read(ioapic_idx, 1);
1271 if (reg_01.bits.version >= 0x10)
1272 reg_02.raw = io_apic_read(ioapic_idx, 2);
1273 if (reg_01.bits.version >= 0x20)
1274 reg_03.raw = io_apic_read(ioapic_idx, 3);
1275 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1276
1277 printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx));
1278 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1279 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1280 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1281 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1282
1283 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
1284 printk(KERN_DEBUG "....... : max redirection entries: %02X\n",
1285 reg_01.bits.entries);
1286
1287 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1288 printk(KERN_DEBUG "....... : IO APIC version: %02X\n",
1289 reg_01.bits.version);
1290
1291
1292
1293
1294
1295
1296 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1297 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1298 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1299 }
1300
1301
1302
1303
1304
1305
1306 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1307 reg_03.raw != reg_01.raw) {
1308 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1309 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1310 }
1311
1312 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1313 io_apic_print_entries(ioapic_idx, reg_01.bits.entries);
1314 }
1315
1316 void __init print_IO_APICs(void)
1317 {
1318 int ioapic_idx;
1319 unsigned int irq;
1320
1321 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1322 for_each_ioapic(ioapic_idx)
1323 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1324 mpc_ioapic_id(ioapic_idx),
1325 ioapics[ioapic_idx].nr_registers);
1326
1327
1328
1329
1330
1331 printk(KERN_INFO "testing the IO APIC.......................\n");
1332
1333 for_each_ioapic(ioapic_idx)
1334 print_IO_APIC(ioapic_idx);
1335
1336 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1337 for_each_active_irq(irq) {
1338 struct irq_pin_list *entry;
1339 struct irq_chip *chip;
1340 struct mp_chip_data *data;
1341
1342 chip = irq_get_chip(irq);
1343 if (chip != &ioapic_chip && chip != &ioapic_ir_chip)
1344 continue;
1345 data = irq_get_chip_data(irq);
1346 if (!data)
1347 continue;
1348 if (list_empty(&data->irq_2_pin))
1349 continue;
1350
1351 printk(KERN_DEBUG "IRQ%d ", irq);
1352 for_each_irq_pin(entry, data->irq_2_pin)
1353 pr_cont("-> %d:%d", entry->apic, entry->pin);
1354 pr_cont("\n");
1355 }
1356
1357 printk(KERN_INFO ".................................... done.\n");
1358 }
1359
1360
1361 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1362
1363 void __init enable_IO_APIC(void)
1364 {
1365 int i8259_apic, i8259_pin;
1366 int apic, pin;
1367
1368 if (skip_ioapic_setup)
1369 nr_ioapics = 0;
1370
1371 if (!nr_legacy_irqs() || !nr_ioapics)
1372 return;
1373
1374 for_each_ioapic_pin(apic, pin) {
1375
1376 struct IO_APIC_route_entry entry = ioapic_read_entry(apic, pin);
1377
1378
1379
1380
1381 if (!entry.masked &&
1382 entry.delivery_mode == APIC_DELIVERY_MODE_EXTINT) {
1383 ioapic_i8259.apic = apic;
1384 ioapic_i8259.pin = pin;
1385 goto found_i8259;
1386 }
1387 }
1388 found_i8259:
1389
1390
1391
1392
1393
1394 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1395 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1396
1397 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1398 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1399 ioapic_i8259.pin = i8259_pin;
1400 ioapic_i8259.apic = i8259_apic;
1401 }
1402
1403 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1404 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1405 {
1406 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1407 }
1408
1409
1410
1411
1412 clear_IO_APIC();
1413 }
1414
1415 void native_restore_boot_irq_mode(void)
1416 {
1417
1418
1419
1420
1421
1422 if (ioapic_i8259.pin != -1) {
1423 struct IO_APIC_route_entry entry;
1424 u32 apic_id = read_apic_id();
1425
1426 memset(&entry, 0, sizeof(entry));
1427 entry.masked = false;
1428 entry.is_level = false;
1429 entry.active_low = false;
1430 entry.dest_mode_logical = false;
1431 entry.delivery_mode = APIC_DELIVERY_MODE_EXTINT;
1432 entry.destid_0_7 = apic_id & 0xFF;
1433 entry.virt_destid_8_14 = apic_id >> 8;
1434
1435
1436
1437
1438 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1439 }
1440
1441 if (boot_cpu_has(X86_FEATURE_APIC) || apic_from_smp_config())
1442 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1443 }
1444
1445 void restore_boot_irq_mode(void)
1446 {
1447 if (!nr_legacy_irqs())
1448 return;
1449
1450 x86_apic_ops.restore();
1451 }
1452
1453 #ifdef CONFIG_X86_32
1454
1455
1456
1457
1458
1459
1460 void __init setup_ioapic_ids_from_mpc_nocheck(void)
1461 {
1462 union IO_APIC_reg_00 reg_00;
1463 physid_mask_t phys_id_present_map;
1464 int ioapic_idx;
1465 int i;
1466 unsigned char old_id;
1467 unsigned long flags;
1468
1469
1470
1471
1472
1473 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map);
1474
1475
1476
1477
1478 for_each_ioapic(ioapic_idx) {
1479
1480 raw_spin_lock_irqsave(&ioapic_lock, flags);
1481 reg_00.raw = io_apic_read(ioapic_idx, 0);
1482 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1483
1484 old_id = mpc_ioapic_id(ioapic_idx);
1485
1486 if (mpc_ioapic_id(ioapic_idx) >= get_physical_broadcast()) {
1487 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
1488 ioapic_idx, mpc_ioapic_id(ioapic_idx));
1489 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1490 reg_00.bits.ID);
1491 ioapics[ioapic_idx].mp_config.apicid = reg_00.bits.ID;
1492 }
1493
1494
1495
1496
1497
1498
1499 if (apic->check_apicid_used(&phys_id_present_map,
1500 mpc_ioapic_id(ioapic_idx))) {
1501 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
1502 ioapic_idx, mpc_ioapic_id(ioapic_idx));
1503 for (i = 0; i < get_physical_broadcast(); i++)
1504 if (!physid_isset(i, phys_id_present_map))
1505 break;
1506 if (i >= get_physical_broadcast())
1507 panic("Max APIC ID exceeded!\n");
1508 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1509 i);
1510 physid_set(i, phys_id_present_map);
1511 ioapics[ioapic_idx].mp_config.apicid = i;
1512 } else {
1513 physid_mask_t tmp;
1514 apic->apicid_to_cpu_present(mpc_ioapic_id(ioapic_idx),
1515 &tmp);
1516 apic_printk(APIC_VERBOSE, "Setting %d in the "
1517 "phys_id_present_map\n",
1518 mpc_ioapic_id(ioapic_idx));
1519 physids_or(phys_id_present_map, phys_id_present_map, tmp);
1520 }
1521
1522
1523
1524
1525
1526 if (old_id != mpc_ioapic_id(ioapic_idx))
1527 for (i = 0; i < mp_irq_entries; i++)
1528 if (mp_irqs[i].dstapic == old_id)
1529 mp_irqs[i].dstapic
1530 = mpc_ioapic_id(ioapic_idx);
1531
1532
1533
1534
1535
1536 if (mpc_ioapic_id(ioapic_idx) == reg_00.bits.ID)
1537 continue;
1538
1539 apic_printk(APIC_VERBOSE, KERN_INFO
1540 "...changing IO-APIC physical APIC ID to %d ...",
1541 mpc_ioapic_id(ioapic_idx));
1542
1543 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
1544 raw_spin_lock_irqsave(&ioapic_lock, flags);
1545 io_apic_write(ioapic_idx, 0, reg_00.raw);
1546 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1547
1548
1549
1550
1551 raw_spin_lock_irqsave(&ioapic_lock, flags);
1552 reg_00.raw = io_apic_read(ioapic_idx, 0);
1553 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1554 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx))
1555 pr_cont("could not set ID!\n");
1556 else
1557 apic_printk(APIC_VERBOSE, " ok.\n");
1558 }
1559 }
1560
1561 void __init setup_ioapic_ids_from_mpc(void)
1562 {
1563
1564 if (acpi_ioapic)
1565 return;
1566
1567
1568
1569
1570 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1571 || APIC_XAPIC(boot_cpu_apic_version))
1572 return;
1573 setup_ioapic_ids_from_mpc_nocheck();
1574 }
1575 #endif
1576
1577 int no_timer_check __initdata;
1578
1579 static int __init notimercheck(char *s)
1580 {
1581 no_timer_check = 1;
1582 return 1;
1583 }
1584 __setup("no_timer_check", notimercheck);
1585
1586 static void __init delay_with_tsc(void)
1587 {
1588 unsigned long long start, now;
1589 unsigned long end = jiffies + 4;
1590
1591 start = rdtsc();
1592
1593
1594
1595
1596
1597
1598
1599 do {
1600 rep_nop();
1601 now = rdtsc();
1602 } while ((now - start) < 40000000000ULL / HZ &&
1603 time_before_eq(jiffies, end));
1604 }
1605
1606 static void __init delay_without_tsc(void)
1607 {
1608 unsigned long end = jiffies + 4;
1609 int band = 1;
1610
1611
1612
1613
1614
1615
1616
1617
1618 do {
1619 __delay(((1U << band++) * 10000000UL) / HZ);
1620 } while (band < 12 && time_before_eq(jiffies, end));
1621 }
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631 static int __init timer_irq_works(void)
1632 {
1633 unsigned long t1 = jiffies;
1634
1635 if (no_timer_check)
1636 return 1;
1637
1638 local_irq_enable();
1639 if (boot_cpu_has(X86_FEATURE_TSC))
1640 delay_with_tsc();
1641 else
1642 delay_without_tsc();
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652 local_irq_disable();
1653
1654
1655 return time_after(jiffies, t1 + 4);
1656 }
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680 static unsigned int startup_ioapic_irq(struct irq_data *data)
1681 {
1682 int was_pending = 0, irq = data->irq;
1683 unsigned long flags;
1684
1685 raw_spin_lock_irqsave(&ioapic_lock, flags);
1686 if (irq < nr_legacy_irqs()) {
1687 legacy_pic->mask(irq);
1688 if (legacy_pic->irq_pending(irq))
1689 was_pending = 1;
1690 }
1691 __unmask_ioapic(data->chip_data);
1692 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1693
1694 return was_pending;
1695 }
1696
1697 atomic_t irq_mis_count;
1698
1699 #ifdef CONFIG_GENERIC_PENDING_IRQ
1700 static bool io_apic_level_ack_pending(struct mp_chip_data *data)
1701 {
1702 struct irq_pin_list *entry;
1703 unsigned long flags;
1704
1705 raw_spin_lock_irqsave(&ioapic_lock, flags);
1706 for_each_irq_pin(entry, data->irq_2_pin) {
1707 struct IO_APIC_route_entry e;
1708 int pin;
1709
1710 pin = entry->pin;
1711 e.w1 = io_apic_read(entry->apic, 0x10 + pin*2);
1712
1713 if (e.irr) {
1714 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1715 return true;
1716 }
1717 }
1718 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1719
1720 return false;
1721 }
1722
1723 static inline bool ioapic_prepare_move(struct irq_data *data)
1724 {
1725
1726 if (unlikely(irqd_is_setaffinity_pending(data))) {
1727 if (!irqd_irq_masked(data))
1728 mask_ioapic_irq(data);
1729 return true;
1730 }
1731 return false;
1732 }
1733
1734 static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
1735 {
1736 if (unlikely(moveit)) {
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763 if (!io_apic_level_ack_pending(data->chip_data))
1764 irq_move_masked_irq(data);
1765
1766 if (!irqd_irq_masked(data))
1767 unmask_ioapic_irq(data);
1768 }
1769 }
1770 #else
1771 static inline bool ioapic_prepare_move(struct irq_data *data)
1772 {
1773 return false;
1774 }
1775 static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
1776 {
1777 }
1778 #endif
1779
1780 static void ioapic_ack_level(struct irq_data *irq_data)
1781 {
1782 struct irq_cfg *cfg = irqd_cfg(irq_data);
1783 unsigned long v;
1784 bool moveit;
1785 int i;
1786
1787 irq_complete_move(cfg);
1788 moveit = ioapic_prepare_move(irq_data);
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822 i = cfg->vector;
1823 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
1824
1825
1826
1827
1828
1829 ack_APIC_irq();
1830
1831
1832
1833
1834
1835
1836
1837
1838 if (!(v & (1 << (i & 0x1f)))) {
1839 atomic_inc(&irq_mis_count);
1840 eoi_ioapic_pin(cfg->vector, irq_data->chip_data);
1841 }
1842
1843 ioapic_finish_move(irq_data, moveit);
1844 }
1845
1846 static void ioapic_ir_ack_level(struct irq_data *irq_data)
1847 {
1848 struct mp_chip_data *data = irq_data->chip_data;
1849
1850
1851
1852
1853
1854
1855
1856 apic_ack_irq(irq_data);
1857 eoi_ioapic_pin(data->entry.vector, data);
1858 }
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876 static void ioapic_setup_msg_from_msi(struct irq_data *irq_data,
1877 struct IO_APIC_route_entry *entry)
1878 {
1879 struct msi_msg msg;
1880
1881
1882 irq_chip_compose_msi_msg(irq_data, &msg);
1883
1884
1885
1886
1887
1888
1889 entry->vector = msg.arch_data.vector;
1890
1891 entry->delivery_mode = msg.arch_data.delivery_mode;
1892
1893 entry->dest_mode_logical = msg.arch_addr_lo.dest_mode_logical;
1894
1895 entry->ir_format = msg.arch_addr_lo.dmar_format;
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906 entry->ir_index_0_14 = msg.arch_addr_lo.dmar_index_0_14;
1907 }
1908
1909 static void ioapic_configure_entry(struct irq_data *irqd)
1910 {
1911 struct mp_chip_data *mpd = irqd->chip_data;
1912 struct irq_pin_list *entry;
1913
1914 ioapic_setup_msg_from_msi(irqd, &mpd->entry);
1915
1916 for_each_irq_pin(entry, mpd->irq_2_pin)
1917 __ioapic_write_entry(entry->apic, entry->pin, mpd->entry);
1918 }
1919
1920 static int ioapic_set_affinity(struct irq_data *irq_data,
1921 const struct cpumask *mask, bool force)
1922 {
1923 struct irq_data *parent = irq_data->parent_data;
1924 unsigned long flags;
1925 int ret;
1926
1927 ret = parent->chip->irq_set_affinity(parent, mask, force);
1928 raw_spin_lock_irqsave(&ioapic_lock, flags);
1929 if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE)
1930 ioapic_configure_entry(irq_data);
1931 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1932
1933 return ret;
1934 }
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950 static int ioapic_irq_get_chip_state(struct irq_data *irqd,
1951 enum irqchip_irq_state which,
1952 bool *state)
1953 {
1954 struct mp_chip_data *mcd = irqd->chip_data;
1955 struct IO_APIC_route_entry rentry;
1956 struct irq_pin_list *p;
1957
1958 if (which != IRQCHIP_STATE_ACTIVE)
1959 return -EINVAL;
1960
1961 *state = false;
1962 raw_spin_lock(&ioapic_lock);
1963 for_each_irq_pin(p, mcd->irq_2_pin) {
1964 rentry = __ioapic_read_entry(p->apic, p->pin);
1965
1966
1967
1968
1969
1970
1971 if (rentry.irr && rentry.is_level) {
1972 *state = true;
1973 break;
1974 }
1975 }
1976 raw_spin_unlock(&ioapic_lock);
1977 return 0;
1978 }
1979
1980 static struct irq_chip ioapic_chip __read_mostly = {
1981 .name = "IO-APIC",
1982 .irq_startup = startup_ioapic_irq,
1983 .irq_mask = mask_ioapic_irq,
1984 .irq_unmask = unmask_ioapic_irq,
1985 .irq_ack = irq_chip_ack_parent,
1986 .irq_eoi = ioapic_ack_level,
1987 .irq_set_affinity = ioapic_set_affinity,
1988 .irq_retrigger = irq_chip_retrigger_hierarchy,
1989 .irq_get_irqchip_state = ioapic_irq_get_chip_state,
1990 .flags = IRQCHIP_SKIP_SET_WAKE |
1991 IRQCHIP_AFFINITY_PRE_STARTUP,
1992 };
1993
1994 static struct irq_chip ioapic_ir_chip __read_mostly = {
1995 .name = "IR-IO-APIC",
1996 .irq_startup = startup_ioapic_irq,
1997 .irq_mask = mask_ioapic_irq,
1998 .irq_unmask = unmask_ioapic_irq,
1999 .irq_ack = irq_chip_ack_parent,
2000 .irq_eoi = ioapic_ir_ack_level,
2001 .irq_set_affinity = ioapic_set_affinity,
2002 .irq_retrigger = irq_chip_retrigger_hierarchy,
2003 .irq_get_irqchip_state = ioapic_irq_get_chip_state,
2004 .flags = IRQCHIP_SKIP_SET_WAKE |
2005 IRQCHIP_AFFINITY_PRE_STARTUP,
2006 };
2007
2008 static inline void init_IO_APIC_traps(void)
2009 {
2010 struct irq_cfg *cfg;
2011 unsigned int irq;
2012
2013 for_each_active_irq(irq) {
2014 cfg = irq_cfg(irq);
2015 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
2016
2017
2018
2019
2020
2021 if (irq < nr_legacy_irqs())
2022 legacy_pic->make_irq(irq);
2023 else
2024
2025 irq_set_chip(irq, &no_irq_chip);
2026 }
2027 }
2028 }
2029
2030
2031
2032
2033
2034 static void mask_lapic_irq(struct irq_data *data)
2035 {
2036 unsigned long v;
2037
2038 v = apic_read(APIC_LVT0);
2039 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2040 }
2041
2042 static void unmask_lapic_irq(struct irq_data *data)
2043 {
2044 unsigned long v;
2045
2046 v = apic_read(APIC_LVT0);
2047 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2048 }
2049
2050 static void ack_lapic_irq(struct irq_data *data)
2051 {
2052 ack_APIC_irq();
2053 }
2054
2055 static struct irq_chip lapic_chip __read_mostly = {
2056 .name = "local-APIC",
2057 .irq_mask = mask_lapic_irq,
2058 .irq_unmask = unmask_lapic_irq,
2059 .irq_ack = ack_lapic_irq,
2060 };
2061
2062 static void lapic_register_intr(int irq)
2063 {
2064 irq_clear_status_flags(irq, IRQ_LEVEL);
2065 irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2066 "edge");
2067 }
2068
2069
2070
2071
2072
2073
2074
2075
2076 static inline void __init unlock_ExtINT_logic(void)
2077 {
2078 int apic, pin, i;
2079 struct IO_APIC_route_entry entry0, entry1;
2080 unsigned char save_control, save_freq_select;
2081 u32 apic_id;
2082
2083 pin = find_isa_irq_pin(8, mp_INT);
2084 if (pin == -1) {
2085 WARN_ON_ONCE(1);
2086 return;
2087 }
2088 apic = find_isa_irq_apic(8, mp_INT);
2089 if (apic == -1) {
2090 WARN_ON_ONCE(1);
2091 return;
2092 }
2093
2094 entry0 = ioapic_read_entry(apic, pin);
2095 clear_IO_APIC_pin(apic, pin);
2096
2097 apic_id = hard_smp_processor_id();
2098 memset(&entry1, 0, sizeof(entry1));
2099
2100 entry1.dest_mode_logical = true;
2101 entry1.masked = false;
2102 entry1.destid_0_7 = apic_id & 0xFF;
2103 entry1.virt_destid_8_14 = apic_id >> 8;
2104 entry1.delivery_mode = APIC_DELIVERY_MODE_EXTINT;
2105 entry1.active_low = entry0.active_low;
2106 entry1.is_level = false;
2107 entry1.vector = 0;
2108
2109 ioapic_write_entry(apic, pin, entry1);
2110
2111 save_control = CMOS_READ(RTC_CONTROL);
2112 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2113 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2114 RTC_FREQ_SELECT);
2115 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2116
2117 i = 100;
2118 while (i-- > 0) {
2119 mdelay(10);
2120 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2121 i -= 10;
2122 }
2123
2124 CMOS_WRITE(save_control, RTC_CONTROL);
2125 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2126 clear_IO_APIC_pin(apic, pin);
2127
2128 ioapic_write_entry(apic, pin, entry0);
2129 }
2130
2131 static int disable_timer_pin_1 __initdata;
2132
2133 static int __init disable_timer_pin_setup(char *arg)
2134 {
2135 disable_timer_pin_1 = 1;
2136 return 0;
2137 }
2138 early_param("disable_timer_pin_1", disable_timer_pin_setup);
2139
2140 static int mp_alloc_timer_irq(int ioapic, int pin)
2141 {
2142 int irq = -1;
2143 struct irq_domain *domain = mp_ioapic_irqdomain(ioapic);
2144
2145 if (domain) {
2146 struct irq_alloc_info info;
2147
2148 ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 0, 0);
2149 info.devid = mpc_ioapic_id(ioapic);
2150 info.ioapic.pin = pin;
2151 mutex_lock(&ioapic_mutex);
2152 irq = alloc_isa_irq_from_domain(domain, 0, ioapic, pin, &info);
2153 mutex_unlock(&ioapic_mutex);
2154 }
2155
2156 return irq;
2157 }
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167 static inline void __init check_timer(void)
2168 {
2169 struct irq_data *irq_data = irq_get_irq_data(0);
2170 struct mp_chip_data *data = irq_data->chip_data;
2171 struct irq_cfg *cfg = irqd_cfg(irq_data);
2172 int node = cpu_to_node(0);
2173 int apic1, pin1, apic2, pin2;
2174 int no_pin1 = 0;
2175
2176 if (!global_clock_event)
2177 return;
2178
2179 local_irq_disable();
2180
2181
2182
2183
2184 legacy_pic->mask(0);
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2196 legacy_pic->init(1);
2197
2198 pin1 = find_isa_irq_pin(0, mp_INT);
2199 apic1 = find_isa_irq_apic(0, mp_INT);
2200 pin2 = ioapic_i8259.pin;
2201 apic2 = ioapic_i8259.apic;
2202
2203 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2204 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2205 cfg->vector, apic1, pin1, apic2, pin2);
2206
2207
2208
2209
2210
2211
2212
2213
2214 if (pin1 == -1) {
2215 panic_if_irq_remap("BIOS bug: timer not connected to IO-APIC");
2216 pin1 = pin2;
2217 apic1 = apic2;
2218 no_pin1 = 1;
2219 } else if (pin2 == -1) {
2220 pin2 = pin1;
2221 apic2 = apic1;
2222 }
2223
2224 if (pin1 != -1) {
2225
2226 if (no_pin1) {
2227 mp_alloc_timer_irq(apic1, pin1);
2228 } else {
2229
2230
2231
2232
2233
2234 int idx = find_irq_entry(apic1, pin1, mp_INT);
2235
2236 if (idx != -1 && irq_is_level(idx))
2237 unmask_ioapic_irq(irq_get_irq_data(0));
2238 }
2239 irq_domain_deactivate_irq(irq_data);
2240 irq_domain_activate_irq(irq_data, false);
2241 if (timer_irq_works()) {
2242 if (disable_timer_pin_1 > 0)
2243 clear_IO_APIC_pin(0, pin1);
2244 goto out;
2245 }
2246 panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC");
2247 clear_IO_APIC_pin(apic1, pin1);
2248 if (!no_pin1)
2249 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2250 "8254 timer not connected to IO-APIC\n");
2251
2252 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2253 "(IRQ0) through the 8259A ...\n");
2254 apic_printk(APIC_QUIET, KERN_INFO
2255 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2256
2257
2258
2259 replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
2260 irq_domain_deactivate_irq(irq_data);
2261 irq_domain_activate_irq(irq_data, false);
2262 legacy_pic->unmask(0);
2263 if (timer_irq_works()) {
2264 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2265 goto out;
2266 }
2267
2268
2269
2270 legacy_pic->mask(0);
2271 clear_IO_APIC_pin(apic2, pin2);
2272 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2273 }
2274
2275 apic_printk(APIC_QUIET, KERN_INFO
2276 "...trying to set up timer as Virtual Wire IRQ...\n");
2277
2278 lapic_register_intr(0);
2279 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector);
2280 legacy_pic->unmask(0);
2281
2282 if (timer_irq_works()) {
2283 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2284 goto out;
2285 }
2286 legacy_pic->mask(0);
2287 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2288 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2289
2290 apic_printk(APIC_QUIET, KERN_INFO
2291 "...trying to set up timer as ExtINT IRQ...\n");
2292
2293 legacy_pic->init(0);
2294 legacy_pic->make_irq(0);
2295 apic_write(APIC_LVT0, APIC_DM_EXTINT);
2296 legacy_pic->unmask(0);
2297
2298 unlock_ExtINT_logic();
2299
2300 if (timer_irq_works()) {
2301 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2302 goto out;
2303 }
2304 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2305 if (apic_is_x2apic_enabled())
2306 apic_printk(APIC_QUIET, KERN_INFO
2307 "Perhaps problem with the pre-enabled x2apic mode\n"
2308 "Try booting with x2apic and interrupt-remapping disabled in the bios.\n");
2309 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2310 "report. Then try booting with the 'noapic' option.\n");
2311 out:
2312 local_irq_enable();
2313 }
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332 #define PIC_IRQS (1UL << PIC_CASCADE_IR)
2333
2334 static int mp_irqdomain_create(int ioapic)
2335 {
2336 struct irq_domain *parent;
2337 int hwirqs = mp_ioapic_pin_count(ioapic);
2338 struct ioapic *ip = &ioapics[ioapic];
2339 struct ioapic_domain_cfg *cfg = &ip->irqdomain_cfg;
2340 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic);
2341 struct fwnode_handle *fn;
2342 struct irq_fwspec fwspec;
2343
2344 if (cfg->type == IOAPIC_DOMAIN_INVALID)
2345 return 0;
2346
2347
2348 if (cfg->dev) {
2349 fn = of_node_to_fwnode(cfg->dev);
2350 } else {
2351 fn = irq_domain_alloc_named_id_fwnode("IO-APIC", mpc_ioapic_id(ioapic));
2352 if (!fn)
2353 return -ENOMEM;
2354 }
2355
2356 fwspec.fwnode = fn;
2357 fwspec.param_count = 1;
2358 fwspec.param[0] = mpc_ioapic_id(ioapic);
2359
2360 parent = irq_find_matching_fwspec(&fwspec, DOMAIN_BUS_ANY);
2361 if (!parent) {
2362 if (!cfg->dev)
2363 irq_domain_free_fwnode(fn);
2364 return -ENODEV;
2365 }
2366
2367 ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops,
2368 (void *)(long)ioapic);
2369
2370 if (!ip->irqdomain) {
2371
2372 if (!cfg->dev)
2373 irq_domain_free_fwnode(fn);
2374 return -ENOMEM;
2375 }
2376
2377 ip->irqdomain->parent = parent;
2378
2379 if (cfg->type == IOAPIC_DOMAIN_LEGACY ||
2380 cfg->type == IOAPIC_DOMAIN_STRICT)
2381 ioapic_dynirq_base = max(ioapic_dynirq_base,
2382 gsi_cfg->gsi_end + 1);
2383
2384 return 0;
2385 }
2386
2387 static void ioapic_destroy_irqdomain(int idx)
2388 {
2389 struct ioapic_domain_cfg *cfg = &ioapics[idx].irqdomain_cfg;
2390 struct fwnode_handle *fn = ioapics[idx].irqdomain->fwnode;
2391
2392 if (ioapics[idx].irqdomain) {
2393 irq_domain_remove(ioapics[idx].irqdomain);
2394 if (!cfg->dev)
2395 irq_domain_free_fwnode(fn);
2396 ioapics[idx].irqdomain = NULL;
2397 }
2398 }
2399
2400 void __init setup_IO_APIC(void)
2401 {
2402 int ioapic;
2403
2404 if (skip_ioapic_setup || !nr_ioapics)
2405 return;
2406
2407 io_apic_irqs = nr_legacy_irqs() ? ~PIC_IRQS : ~0UL;
2408
2409 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2410 for_each_ioapic(ioapic)
2411 BUG_ON(mp_irqdomain_create(ioapic));
2412
2413
2414
2415
2416 x86_init.mpparse.setup_ioapic_ids();
2417
2418 sync_Arb_IDs();
2419 setup_IO_APIC_irqs();
2420 init_IO_APIC_traps();
2421 if (nr_legacy_irqs())
2422 check_timer();
2423
2424 ioapic_initialized = 1;
2425 }
2426
2427 static void resume_ioapic_id(int ioapic_idx)
2428 {
2429 unsigned long flags;
2430 union IO_APIC_reg_00 reg_00;
2431
2432 raw_spin_lock_irqsave(&ioapic_lock, flags);
2433 reg_00.raw = io_apic_read(ioapic_idx, 0);
2434 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) {
2435 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
2436 io_apic_write(ioapic_idx, 0, reg_00.raw);
2437 }
2438 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2439 }
2440
2441 static void ioapic_resume(void)
2442 {
2443 int ioapic_idx;
2444
2445 for_each_ioapic_reverse(ioapic_idx)
2446 resume_ioapic_id(ioapic_idx);
2447
2448 restore_ioapic_entries();
2449 }
2450
2451 static struct syscore_ops ioapic_syscore_ops = {
2452 .suspend = save_ioapic_entries,
2453 .resume = ioapic_resume,
2454 };
2455
2456 static int __init ioapic_init_ops(void)
2457 {
2458 register_syscore_ops(&ioapic_syscore_ops);
2459
2460 return 0;
2461 }
2462
2463 device_initcall(ioapic_init_ops);
2464
2465 static int io_apic_get_redir_entries(int ioapic)
2466 {
2467 union IO_APIC_reg_01 reg_01;
2468 unsigned long flags;
2469
2470 raw_spin_lock_irqsave(&ioapic_lock, flags);
2471 reg_01.raw = io_apic_read(ioapic, 1);
2472 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2473
2474
2475
2476
2477
2478 return reg_01.bits.entries + 1;
2479 }
2480
2481 unsigned int arch_dynirq_lower_bound(unsigned int from)
2482 {
2483
2484
2485
2486
2487 if (!ioapic_initialized)
2488 return gsi_top;
2489
2490
2491
2492
2493 return ioapic_dynirq_base ? : from;
2494 }
2495
2496 #ifdef CONFIG_X86_32
2497 static int io_apic_get_unique_id(int ioapic, int apic_id)
2498 {
2499 union IO_APIC_reg_00 reg_00;
2500 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
2501 physid_mask_t tmp;
2502 unsigned long flags;
2503 int i = 0;
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514 if (physids_empty(apic_id_map))
2515 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
2516
2517 raw_spin_lock_irqsave(&ioapic_lock, flags);
2518 reg_00.raw = io_apic_read(ioapic, 0);
2519 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2520
2521 if (apic_id >= get_physical_broadcast()) {
2522 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
2523 "%d\n", ioapic, apic_id, reg_00.bits.ID);
2524 apic_id = reg_00.bits.ID;
2525 }
2526
2527
2528
2529
2530
2531 if (apic->check_apicid_used(&apic_id_map, apic_id)) {
2532
2533 for (i = 0; i < get_physical_broadcast(); i++) {
2534 if (!apic->check_apicid_used(&apic_id_map, i))
2535 break;
2536 }
2537
2538 if (i == get_physical_broadcast())
2539 panic("Max apic_id exceeded!\n");
2540
2541 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
2542 "trying %d\n", ioapic, apic_id, i);
2543
2544 apic_id = i;
2545 }
2546
2547 apic->apicid_to_cpu_present(apic_id, &tmp);
2548 physids_or(apic_id_map, apic_id_map, tmp);
2549
2550 if (reg_00.bits.ID != apic_id) {
2551 reg_00.bits.ID = apic_id;
2552
2553 raw_spin_lock_irqsave(&ioapic_lock, flags);
2554 io_apic_write(ioapic, 0, reg_00.raw);
2555 reg_00.raw = io_apic_read(ioapic, 0);
2556 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2557
2558
2559 if (reg_00.bits.ID != apic_id) {
2560 pr_err("IOAPIC[%d]: Unable to change apic_id!\n",
2561 ioapic);
2562 return -1;
2563 }
2564 }
2565
2566 apic_printk(APIC_VERBOSE, KERN_INFO
2567 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
2568
2569 return apic_id;
2570 }
2571
2572 static u8 io_apic_unique_id(int idx, u8 id)
2573 {
2574 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
2575 !APIC_XAPIC(boot_cpu_apic_version))
2576 return io_apic_get_unique_id(idx, id);
2577 else
2578 return id;
2579 }
2580 #else
2581 static u8 io_apic_unique_id(int idx, u8 id)
2582 {
2583 union IO_APIC_reg_00 reg_00;
2584 DECLARE_BITMAP(used, 256);
2585 unsigned long flags;
2586 u8 new_id;
2587 int i;
2588
2589 bitmap_zero(used, 256);
2590 for_each_ioapic(i)
2591 __set_bit(mpc_ioapic_id(i), used);
2592
2593
2594 if (!test_bit(id, used))
2595 return id;
2596
2597
2598
2599
2600
2601 raw_spin_lock_irqsave(&ioapic_lock, flags);
2602 reg_00.raw = io_apic_read(idx, 0);
2603 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2604 new_id = reg_00.bits.ID;
2605 if (!test_bit(new_id, used)) {
2606 apic_printk(APIC_VERBOSE, KERN_INFO
2607 "IOAPIC[%d]: Using reg apic_id %d instead of %d\n",
2608 idx, new_id, id);
2609 return new_id;
2610 }
2611
2612
2613
2614
2615 new_id = find_first_zero_bit(used, 256);
2616 reg_00.bits.ID = new_id;
2617 raw_spin_lock_irqsave(&ioapic_lock, flags);
2618 io_apic_write(idx, 0, reg_00.raw);
2619 reg_00.raw = io_apic_read(idx, 0);
2620 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2621
2622 BUG_ON(reg_00.bits.ID != new_id);
2623
2624 return new_id;
2625 }
2626 #endif
2627
2628 static int io_apic_get_version(int ioapic)
2629 {
2630 union IO_APIC_reg_01 reg_01;
2631 unsigned long flags;
2632
2633 raw_spin_lock_irqsave(&ioapic_lock, flags);
2634 reg_01.raw = io_apic_read(ioapic, 1);
2635 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2636
2637 return reg_01.bits.version;
2638 }
2639
2640
2641
2642
2643
2644 #define IOAPIC_RESOURCE_NAME_SIZE 11
2645
2646 static struct resource *ioapic_resources;
2647
2648 static struct resource * __init ioapic_setup_resources(void)
2649 {
2650 unsigned long n;
2651 struct resource *res;
2652 char *mem;
2653 int i;
2654
2655 if (nr_ioapics == 0)
2656 return NULL;
2657
2658 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
2659 n *= nr_ioapics;
2660
2661 mem = memblock_alloc(n, SMP_CACHE_BYTES);
2662 if (!mem)
2663 panic("%s: Failed to allocate %lu bytes\n", __func__, n);
2664 res = (void *)mem;
2665
2666 mem += sizeof(struct resource) * nr_ioapics;
2667
2668 for_each_ioapic(i) {
2669 res[i].name = mem;
2670 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
2671 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
2672 mem += IOAPIC_RESOURCE_NAME_SIZE;
2673 ioapics[i].iomem_res = &res[i];
2674 }
2675
2676 ioapic_resources = res;
2677
2678 return res;
2679 }
2680
2681 static void io_apic_set_fixmap(enum fixed_addresses idx, phys_addr_t phys)
2682 {
2683 pgprot_t flags = FIXMAP_PAGE_NOCACHE;
2684
2685
2686
2687
2688
2689 flags = pgprot_decrypted(flags);
2690
2691 __set_fixmap(idx, phys, flags);
2692 }
2693
2694 void __init io_apic_init_mappings(void)
2695 {
2696 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
2697 struct resource *ioapic_res;
2698 int i;
2699
2700 ioapic_res = ioapic_setup_resources();
2701 for_each_ioapic(i) {
2702 if (smp_found_config) {
2703 ioapic_phys = mpc_ioapic_addr(i);
2704 #ifdef CONFIG_X86_32
2705 if (!ioapic_phys) {
2706 printk(KERN_ERR
2707 "WARNING: bogus zero IO-APIC "
2708 "address found in MPTABLE, "
2709 "disabling IO/APIC support!\n");
2710 smp_found_config = 0;
2711 skip_ioapic_setup = 1;
2712 goto fake_ioapic_page;
2713 }
2714 #endif
2715 } else {
2716 #ifdef CONFIG_X86_32
2717 fake_ioapic_page:
2718 #endif
2719 ioapic_phys = (unsigned long)memblock_alloc(PAGE_SIZE,
2720 PAGE_SIZE);
2721 if (!ioapic_phys)
2722 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
2723 __func__, PAGE_SIZE, PAGE_SIZE);
2724 ioapic_phys = __pa(ioapic_phys);
2725 }
2726 io_apic_set_fixmap(idx, ioapic_phys);
2727 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
2728 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK),
2729 ioapic_phys);
2730 idx++;
2731
2732 ioapic_res->start = ioapic_phys;
2733 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1;
2734 ioapic_res++;
2735 }
2736 }
2737
2738 void __init ioapic_insert_resources(void)
2739 {
2740 int i;
2741 struct resource *r = ioapic_resources;
2742
2743 if (!r) {
2744 if (nr_ioapics > 0)
2745 printk(KERN_ERR
2746 "IO APIC resources couldn't be allocated.\n");
2747 return;
2748 }
2749
2750 for_each_ioapic(i) {
2751 insert_resource(&iomem_resource, r);
2752 r++;
2753 }
2754 }
2755
2756 int mp_find_ioapic(u32 gsi)
2757 {
2758 int i;
2759
2760 if (nr_ioapics == 0)
2761 return -1;
2762
2763
2764 for_each_ioapic(i) {
2765 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(i);
2766 if (gsi >= gsi_cfg->gsi_base && gsi <= gsi_cfg->gsi_end)
2767 return i;
2768 }
2769
2770 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
2771 return -1;
2772 }
2773
2774 int mp_find_ioapic_pin(int ioapic, u32 gsi)
2775 {
2776 struct mp_ioapic_gsi *gsi_cfg;
2777
2778 if (WARN_ON(ioapic < 0))
2779 return -1;
2780
2781 gsi_cfg = mp_ioapic_gsi_routing(ioapic);
2782 if (WARN_ON(gsi > gsi_cfg->gsi_end))
2783 return -1;
2784
2785 return gsi - gsi_cfg->gsi_base;
2786 }
2787
2788 static int bad_ioapic_register(int idx)
2789 {
2790 union IO_APIC_reg_00 reg_00;
2791 union IO_APIC_reg_01 reg_01;
2792 union IO_APIC_reg_02 reg_02;
2793
2794 reg_00.raw = io_apic_read(idx, 0);
2795 reg_01.raw = io_apic_read(idx, 1);
2796 reg_02.raw = io_apic_read(idx, 2);
2797
2798 if (reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1) {
2799 pr_warn("I/O APIC 0x%x registers return all ones, skipping!\n",
2800 mpc_ioapic_addr(idx));
2801 return 1;
2802 }
2803
2804 return 0;
2805 }
2806
2807 static int find_free_ioapic_entry(void)
2808 {
2809 int idx;
2810
2811 for (idx = 0; idx < MAX_IO_APICS; idx++)
2812 if (ioapics[idx].nr_registers == 0)
2813 return idx;
2814
2815 return MAX_IO_APICS;
2816 }
2817
2818
2819
2820
2821
2822
2823
2824
2825 int mp_register_ioapic(int id, u32 address, u32 gsi_base,
2826 struct ioapic_domain_cfg *cfg)
2827 {
2828 bool hotplug = !!ioapic_initialized;
2829 struct mp_ioapic_gsi *gsi_cfg;
2830 int idx, ioapic, entries;
2831 u32 gsi_end;
2832
2833 if (!address) {
2834 pr_warn("Bogus (zero) I/O APIC address found, skipping!\n");
2835 return -EINVAL;
2836 }
2837 for_each_ioapic(ioapic)
2838 if (ioapics[ioapic].mp_config.apicaddr == address) {
2839 pr_warn("address 0x%x conflicts with IOAPIC%d\n",
2840 address, ioapic);
2841 return -EEXIST;
2842 }
2843
2844 idx = find_free_ioapic_entry();
2845 if (idx >= MAX_IO_APICS) {
2846 pr_warn("Max # of I/O APICs (%d) exceeded (found %d), skipping\n",
2847 MAX_IO_APICS, idx);
2848 return -ENOSPC;
2849 }
2850
2851 ioapics[idx].mp_config.type = MP_IOAPIC;
2852 ioapics[idx].mp_config.flags = MPC_APIC_USABLE;
2853 ioapics[idx].mp_config.apicaddr = address;
2854
2855 io_apic_set_fixmap(FIX_IO_APIC_BASE_0 + idx, address);
2856 if (bad_ioapic_register(idx)) {
2857 clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
2858 return -ENODEV;
2859 }
2860
2861 ioapics[idx].mp_config.apicid = io_apic_unique_id(idx, id);
2862 ioapics[idx].mp_config.apicver = io_apic_get_version(idx);
2863
2864
2865
2866
2867
2868 entries = io_apic_get_redir_entries(idx);
2869 gsi_end = gsi_base + entries - 1;
2870 for_each_ioapic(ioapic) {
2871 gsi_cfg = mp_ioapic_gsi_routing(ioapic);
2872 if ((gsi_base >= gsi_cfg->gsi_base &&
2873 gsi_base <= gsi_cfg->gsi_end) ||
2874 (gsi_end >= gsi_cfg->gsi_base &&
2875 gsi_end <= gsi_cfg->gsi_end)) {
2876 pr_warn("GSI range [%u-%u] for new IOAPIC conflicts with GSI[%u-%u]\n",
2877 gsi_base, gsi_end,
2878 gsi_cfg->gsi_base, gsi_cfg->gsi_end);
2879 clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
2880 return -ENOSPC;
2881 }
2882 }
2883 gsi_cfg = mp_ioapic_gsi_routing(idx);
2884 gsi_cfg->gsi_base = gsi_base;
2885 gsi_cfg->gsi_end = gsi_end;
2886
2887 ioapics[idx].irqdomain = NULL;
2888 ioapics[idx].irqdomain_cfg = *cfg;
2889
2890
2891
2892
2893
2894
2895 if (hotplug) {
2896 if (mp_irqdomain_create(idx)) {
2897 clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
2898 return -ENOMEM;
2899 }
2900 alloc_ioapic_saved_registers(idx);
2901 }
2902
2903 if (gsi_cfg->gsi_end >= gsi_top)
2904 gsi_top = gsi_cfg->gsi_end + 1;
2905 if (nr_ioapics <= idx)
2906 nr_ioapics = idx + 1;
2907
2908
2909 ioapics[idx].nr_registers = entries;
2910
2911 pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n",
2912 idx, mpc_ioapic_id(idx),
2913 mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
2914 gsi_cfg->gsi_base, gsi_cfg->gsi_end);
2915
2916 return 0;
2917 }
2918
2919 int mp_unregister_ioapic(u32 gsi_base)
2920 {
2921 int ioapic, pin;
2922 int found = 0;
2923
2924 for_each_ioapic(ioapic)
2925 if (ioapics[ioapic].gsi_config.gsi_base == gsi_base) {
2926 found = 1;
2927 break;
2928 }
2929 if (!found) {
2930 pr_warn("can't find IOAPIC for GSI %d\n", gsi_base);
2931 return -ENODEV;
2932 }
2933
2934 for_each_pin(ioapic, pin) {
2935 u32 gsi = mp_pin_to_gsi(ioapic, pin);
2936 int irq = mp_map_gsi_to_irq(gsi, 0, NULL);
2937 struct mp_chip_data *data;
2938
2939 if (irq >= 0) {
2940 data = irq_get_chip_data(irq);
2941 if (data && data->count) {
2942 pr_warn("pin%d on IOAPIC%d is still in use.\n",
2943 pin, ioapic);
2944 return -EBUSY;
2945 }
2946 }
2947 }
2948
2949
2950 ioapics[ioapic].nr_registers = 0;
2951 ioapic_destroy_irqdomain(ioapic);
2952 free_ioapic_saved_registers(ioapic);
2953 if (ioapics[ioapic].iomem_res)
2954 release_resource(ioapics[ioapic].iomem_res);
2955 clear_fixmap(FIX_IO_APIC_BASE_0 + ioapic);
2956 memset(&ioapics[ioapic], 0, sizeof(ioapics[ioapic]));
2957
2958 return 0;
2959 }
2960
2961 int mp_ioapic_registered(u32 gsi_base)
2962 {
2963 int ioapic;
2964
2965 for_each_ioapic(ioapic)
2966 if (ioapics[ioapic].gsi_config.gsi_base == gsi_base)
2967 return 1;
2968
2969 return 0;
2970 }
2971
2972 static void mp_irqdomain_get_attr(u32 gsi, struct mp_chip_data *data,
2973 struct irq_alloc_info *info)
2974 {
2975 if (info && info->ioapic.valid) {
2976 data->is_level = info->ioapic.is_level;
2977 data->active_low = info->ioapic.active_low;
2978 } else if (__acpi_get_override_irq(gsi, &data->is_level,
2979 &data->active_low) < 0) {
2980
2981 data->is_level = true;
2982 data->active_low = true;
2983 }
2984 }
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998 static void mp_preconfigure_entry(struct mp_chip_data *data)
2999 {
3000 struct IO_APIC_route_entry *entry = &data->entry;
3001
3002 memset(entry, 0, sizeof(*entry));
3003 entry->is_level = data->is_level;
3004 entry->active_low = data->active_low;
3005
3006
3007
3008
3009 entry->masked = data->is_level;
3010 }
3011
3012 int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
3013 unsigned int nr_irqs, void *arg)
3014 {
3015 struct irq_alloc_info *info = arg;
3016 struct mp_chip_data *data;
3017 struct irq_data *irq_data;
3018 int ret, ioapic, pin;
3019 unsigned long flags;
3020
3021 if (!info || nr_irqs > 1)
3022 return -EINVAL;
3023 irq_data = irq_domain_get_irq_data(domain, virq);
3024 if (!irq_data)
3025 return -EINVAL;
3026
3027 ioapic = mp_irqdomain_ioapic_idx(domain);
3028 pin = info->ioapic.pin;
3029 if (irq_find_mapping(domain, (irq_hw_number_t)pin) > 0)
3030 return -EEXIST;
3031
3032 data = kzalloc(sizeof(*data), GFP_KERNEL);
3033 if (!data)
3034 return -ENOMEM;
3035
3036 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info);
3037 if (ret < 0) {
3038 kfree(data);
3039 return ret;
3040 }
3041
3042 INIT_LIST_HEAD(&data->irq_2_pin);
3043 irq_data->hwirq = info->ioapic.pin;
3044 irq_data->chip = (domain->parent == x86_vector_domain) ?
3045 &ioapic_chip : &ioapic_ir_chip;
3046 irq_data->chip_data = data;
3047 mp_irqdomain_get_attr(mp_pin_to_gsi(ioapic, pin), data, info);
3048
3049 add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin);
3050
3051 mp_preconfigure_entry(data);
3052 mp_register_handler(virq, data->is_level);
3053
3054 local_irq_save(flags);
3055 if (virq < nr_legacy_irqs())
3056 legacy_pic->mask(virq);
3057 local_irq_restore(flags);
3058
3059 apic_printk(APIC_VERBOSE, KERN_DEBUG
3060 "IOAPIC[%d]: Preconfigured routing entry (%d-%d -> IRQ %d Level:%i ActiveLow:%i)\n",
3061 ioapic, mpc_ioapic_id(ioapic), pin, virq,
3062 data->is_level, data->active_low);
3063 return 0;
3064 }
3065
3066 void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
3067 unsigned int nr_irqs)
3068 {
3069 struct irq_data *irq_data;
3070 struct mp_chip_data *data;
3071
3072 BUG_ON(nr_irqs != 1);
3073 irq_data = irq_domain_get_irq_data(domain, virq);
3074 if (irq_data && irq_data->chip_data) {
3075 data = irq_data->chip_data;
3076 __remove_pin_from_irq(data, mp_irqdomain_ioapic_idx(domain),
3077 (int)irq_data->hwirq);
3078 WARN_ON(!list_empty(&data->irq_2_pin));
3079 kfree(irq_data->chip_data);
3080 }
3081 irq_domain_free_irqs_top(domain, virq, nr_irqs);
3082 }
3083
3084 int mp_irqdomain_activate(struct irq_domain *domain,
3085 struct irq_data *irq_data, bool reserve)
3086 {
3087 unsigned long flags;
3088
3089 raw_spin_lock_irqsave(&ioapic_lock, flags);
3090 ioapic_configure_entry(irq_data);
3091 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3092 return 0;
3093 }
3094
3095 void mp_irqdomain_deactivate(struct irq_domain *domain,
3096 struct irq_data *irq_data)
3097 {
3098
3099 ioapic_mask_entry(mp_irqdomain_ioapic_idx(domain),
3100 (int)irq_data->hwirq);
3101 }
3102
3103 int mp_irqdomain_ioapic_idx(struct irq_domain *domain)
3104 {
3105 return (int)(long)domain->host_data;
3106 }
3107
3108 const struct irq_domain_ops mp_ioapic_irqdomain_ops = {
3109 .alloc = mp_irqdomain_alloc,
3110 .free = mp_irqdomain_free,
3111 .activate = mp_irqdomain_activate,
3112 .deactivate = mp_irqdomain_deactivate,
3113 };