0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/interrupt.h>
0011 #include <linux/irq.h>
0012 #include <linux/seq_file.h>
0013 #include <linux/init.h>
0014 #include <linux/compiler.h>
0015 #include <linux/slab.h>
0016 #include <asm/irqdomain.h>
0017 #include <asm/hw_irq.h>
0018 #include <asm/traps.h>
0019 #include <asm/apic.h>
0020 #include <asm/i8259.h>
0021 #include <asm/desc.h>
0022 #include <asm/irq_remapping.h>
0023
0024 #include <asm/trace/irq_vectors.h>
0025
0026 struct apic_chip_data {
0027 struct irq_cfg hw_irq_cfg;
0028 unsigned int vector;
0029 unsigned int prev_vector;
0030 unsigned int cpu;
0031 unsigned int prev_cpu;
0032 unsigned int irq;
0033 struct hlist_node clist;
0034 unsigned int move_in_progress : 1,
0035 is_managed : 1,
0036 can_reserve : 1,
0037 has_reserved : 1;
0038 };
0039
0040 struct irq_domain *x86_vector_domain;
0041 EXPORT_SYMBOL_GPL(x86_vector_domain);
0042 static DEFINE_RAW_SPINLOCK(vector_lock);
0043 static cpumask_var_t vector_searchmask;
0044 static struct irq_chip lapic_controller;
0045 static struct irq_matrix *vector_matrix;
0046 #ifdef CONFIG_SMP
0047 static DEFINE_PER_CPU(struct hlist_head, cleanup_list);
0048 #endif
0049
0050 void lock_vector_lock(void)
0051 {
0052
0053
0054
0055 raw_spin_lock(&vector_lock);
0056 }
0057
0058 void unlock_vector_lock(void)
0059 {
0060 raw_spin_unlock(&vector_lock);
0061 }
0062
0063 void init_irq_alloc_info(struct irq_alloc_info *info,
0064 const struct cpumask *mask)
0065 {
0066 memset(info, 0, sizeof(*info));
0067 info->mask = mask;
0068 }
0069
0070 void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
0071 {
0072 if (src)
0073 *dst = *src;
0074 else
0075 memset(dst, 0, sizeof(*dst));
0076 }
0077
0078 static struct apic_chip_data *apic_chip_data(struct irq_data *irqd)
0079 {
0080 if (!irqd)
0081 return NULL;
0082
0083 while (irqd->parent_data)
0084 irqd = irqd->parent_data;
0085
0086 return irqd->chip_data;
0087 }
0088
0089 struct irq_cfg *irqd_cfg(struct irq_data *irqd)
0090 {
0091 struct apic_chip_data *apicd = apic_chip_data(irqd);
0092
0093 return apicd ? &apicd->hw_irq_cfg : NULL;
0094 }
0095 EXPORT_SYMBOL_GPL(irqd_cfg);
0096
0097 struct irq_cfg *irq_cfg(unsigned int irq)
0098 {
0099 return irqd_cfg(irq_get_irq_data(irq));
0100 }
0101
0102 static struct apic_chip_data *alloc_apic_chip_data(int node)
0103 {
0104 struct apic_chip_data *apicd;
0105
0106 apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node);
0107 if (apicd)
0108 INIT_HLIST_NODE(&apicd->clist);
0109 return apicd;
0110 }
0111
0112 static void free_apic_chip_data(struct apic_chip_data *apicd)
0113 {
0114 kfree(apicd);
0115 }
0116
0117 static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector,
0118 unsigned int cpu)
0119 {
0120 struct apic_chip_data *apicd = apic_chip_data(irqd);
0121
0122 lockdep_assert_held(&vector_lock);
0123
0124 apicd->hw_irq_cfg.vector = vector;
0125 apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu);
0126 irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
0127 trace_vector_config(irqd->irq, vector, cpu,
0128 apicd->hw_irq_cfg.dest_apicid);
0129 }
0130
0131 static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
0132 unsigned int newcpu)
0133 {
0134 struct apic_chip_data *apicd = apic_chip_data(irqd);
0135 struct irq_desc *desc = irq_data_to_desc(irqd);
0136 bool managed = irqd_affinity_is_managed(irqd);
0137
0138 lockdep_assert_held(&vector_lock);
0139
0140 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
0141 apicd->cpu);
0142
0143
0144
0145
0146
0147
0148
0149 apicd->prev_vector = 0;
0150 if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR)
0151 goto setnew;
0152
0153
0154
0155
0156
0157
0158
0159
0160 if (cpu_online(apicd->cpu)) {
0161 apicd->move_in_progress = true;
0162 apicd->prev_vector = apicd->vector;
0163 apicd->prev_cpu = apicd->cpu;
0164 WARN_ON_ONCE(apicd->cpu == newcpu);
0165 } else {
0166 irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
0167 managed);
0168 }
0169
0170 setnew:
0171 apicd->vector = newvec;
0172 apicd->cpu = newcpu;
0173 BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
0174 per_cpu(vector_irq, newcpu)[newvec] = desc;
0175 }
0176
0177 static void vector_assign_managed_shutdown(struct irq_data *irqd)
0178 {
0179 unsigned int cpu = cpumask_first(cpu_online_mask);
0180
0181 apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu);
0182 }
0183
0184 static int reserve_managed_vector(struct irq_data *irqd)
0185 {
0186 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
0187 struct apic_chip_data *apicd = apic_chip_data(irqd);
0188 unsigned long flags;
0189 int ret;
0190
0191 raw_spin_lock_irqsave(&vector_lock, flags);
0192 apicd->is_managed = true;
0193 ret = irq_matrix_reserve_managed(vector_matrix, affmsk);
0194 raw_spin_unlock_irqrestore(&vector_lock, flags);
0195 trace_vector_reserve_managed(irqd->irq, ret);
0196 return ret;
0197 }
0198
0199 static void reserve_irq_vector_locked(struct irq_data *irqd)
0200 {
0201 struct apic_chip_data *apicd = apic_chip_data(irqd);
0202
0203 irq_matrix_reserve(vector_matrix);
0204 apicd->can_reserve = true;
0205 apicd->has_reserved = true;
0206 irqd_set_can_reserve(irqd);
0207 trace_vector_reserve(irqd->irq, 0);
0208 vector_assign_managed_shutdown(irqd);
0209 }
0210
0211 static int reserve_irq_vector(struct irq_data *irqd)
0212 {
0213 unsigned long flags;
0214
0215 raw_spin_lock_irqsave(&vector_lock, flags);
0216 reserve_irq_vector_locked(irqd);
0217 raw_spin_unlock_irqrestore(&vector_lock, flags);
0218 return 0;
0219 }
0220
0221 static int
0222 assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest)
0223 {
0224 struct apic_chip_data *apicd = apic_chip_data(irqd);
0225 bool resvd = apicd->has_reserved;
0226 unsigned int cpu = apicd->cpu;
0227 int vector = apicd->vector;
0228
0229 lockdep_assert_held(&vector_lock);
0230
0231
0232
0233
0234
0235
0236 if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
0237 return 0;
0238
0239
0240
0241
0242
0243
0244
0245 if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
0246 return -EBUSY;
0247
0248 vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
0249 trace_vector_alloc(irqd->irq, vector, resvd, vector);
0250 if (vector < 0)
0251 return vector;
0252 apic_update_vector(irqd, vector, cpu);
0253 apic_update_irq_cfg(irqd, vector, cpu);
0254
0255 return 0;
0256 }
0257
0258 static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest)
0259 {
0260 unsigned long flags;
0261 int ret;
0262
0263 raw_spin_lock_irqsave(&vector_lock, flags);
0264 cpumask_and(vector_searchmask, dest, cpu_online_mask);
0265 ret = assign_vector_locked(irqd, vector_searchmask);
0266 raw_spin_unlock_irqrestore(&vector_lock, flags);
0267 return ret;
0268 }
0269
0270 static int assign_irq_vector_any_locked(struct irq_data *irqd)
0271 {
0272
0273 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
0274 int node = irq_data_get_node(irqd);
0275
0276 if (node != NUMA_NO_NODE) {
0277
0278 cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
0279 if (!assign_vector_locked(irqd, vector_searchmask))
0280 return 0;
0281 }
0282
0283
0284 cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
0285 if (!assign_vector_locked(irqd, vector_searchmask))
0286 return 0;
0287
0288 if (node != NUMA_NO_NODE) {
0289
0290 if (!assign_vector_locked(irqd, cpumask_of_node(node)))
0291 return 0;
0292 }
0293
0294
0295 return assign_vector_locked(irqd, cpu_online_mask);
0296 }
0297
0298 static int
0299 assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info)
0300 {
0301 if (irqd_affinity_is_managed(irqd))
0302 return reserve_managed_vector(irqd);
0303 if (info->mask)
0304 return assign_irq_vector(irqd, info->mask);
0305
0306
0307
0308
0309 return reserve_irq_vector(irqd);
0310 }
0311
0312 static int
0313 assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
0314 {
0315 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
0316 struct apic_chip_data *apicd = apic_chip_data(irqd);
0317 int vector, cpu;
0318
0319 cpumask_and(vector_searchmask, dest, affmsk);
0320
0321
0322 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
0323 return 0;
0324 vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
0325 &cpu);
0326 trace_vector_alloc_managed(irqd->irq, vector, vector);
0327 if (vector < 0)
0328 return vector;
0329 apic_update_vector(irqd, vector, cpu);
0330 apic_update_irq_cfg(irqd, vector, cpu);
0331 return 0;
0332 }
0333
0334 static void clear_irq_vector(struct irq_data *irqd)
0335 {
0336 struct apic_chip_data *apicd = apic_chip_data(irqd);
0337 bool managed = irqd_affinity_is_managed(irqd);
0338 unsigned int vector = apicd->vector;
0339
0340 lockdep_assert_held(&vector_lock);
0341
0342 if (!vector)
0343 return;
0344
0345 trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
0346 apicd->prev_cpu);
0347
0348 per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN;
0349 irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
0350 apicd->vector = 0;
0351
0352
0353 vector = apicd->prev_vector;
0354 if (!vector)
0355 return;
0356
0357 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN;
0358 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
0359 apicd->prev_vector = 0;
0360 apicd->move_in_progress = 0;
0361 hlist_del_init(&apicd->clist);
0362 }
0363
0364 static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd)
0365 {
0366 struct apic_chip_data *apicd = apic_chip_data(irqd);
0367 unsigned long flags;
0368
0369 trace_vector_deactivate(irqd->irq, apicd->is_managed,
0370 apicd->can_reserve, false);
0371
0372
0373 if (!apicd->is_managed && !apicd->can_reserve)
0374 return;
0375
0376 if (apicd->has_reserved)
0377 return;
0378
0379 raw_spin_lock_irqsave(&vector_lock, flags);
0380 clear_irq_vector(irqd);
0381 if (apicd->can_reserve)
0382 reserve_irq_vector_locked(irqd);
0383 else
0384 vector_assign_managed_shutdown(irqd);
0385 raw_spin_unlock_irqrestore(&vector_lock, flags);
0386 }
0387
0388 static int activate_reserved(struct irq_data *irqd)
0389 {
0390 struct apic_chip_data *apicd = apic_chip_data(irqd);
0391 int ret;
0392
0393 ret = assign_irq_vector_any_locked(irqd);
0394 if (!ret) {
0395 apicd->has_reserved = false;
0396
0397
0398
0399
0400
0401
0402
0403 if (!irqd_can_reserve(irqd))
0404 apicd->can_reserve = false;
0405 }
0406
0407
0408
0409
0410
0411 if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
0412 irq_data_get_affinity_mask(irqd))) {
0413 pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
0414 irqd->irq);
0415 }
0416
0417 return ret;
0418 }
0419
0420 static int activate_managed(struct irq_data *irqd)
0421 {
0422 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
0423 int ret;
0424
0425 cpumask_and(vector_searchmask, dest, cpu_online_mask);
0426 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
0427
0428 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
0429 return -EINVAL;
0430 }
0431
0432 ret = assign_managed_vector(irqd, vector_searchmask);
0433
0434
0435
0436
0437 if (WARN_ON_ONCE(ret < 0)) {
0438 pr_err("Managed startup irq %u, no vector available\n",
0439 irqd->irq);
0440 }
0441 return ret;
0442 }
0443
0444 static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
0445 bool reserve)
0446 {
0447 struct apic_chip_data *apicd = apic_chip_data(irqd);
0448 unsigned long flags;
0449 int ret = 0;
0450
0451 trace_vector_activate(irqd->irq, apicd->is_managed,
0452 apicd->can_reserve, reserve);
0453
0454 raw_spin_lock_irqsave(&vector_lock, flags);
0455 if (!apicd->can_reserve && !apicd->is_managed)
0456 assign_irq_vector_any_locked(irqd);
0457 else if (reserve || irqd_is_managed_and_shutdown(irqd))
0458 vector_assign_managed_shutdown(irqd);
0459 else if (apicd->is_managed)
0460 ret = activate_managed(irqd);
0461 else if (apicd->has_reserved)
0462 ret = activate_reserved(irqd);
0463 raw_spin_unlock_irqrestore(&vector_lock, flags);
0464 return ret;
0465 }
0466
0467 static void vector_free_reserved_and_managed(struct irq_data *irqd)
0468 {
0469 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
0470 struct apic_chip_data *apicd = apic_chip_data(irqd);
0471
0472 trace_vector_teardown(irqd->irq, apicd->is_managed,
0473 apicd->has_reserved);
0474
0475 if (apicd->has_reserved)
0476 irq_matrix_remove_reserved(vector_matrix);
0477 if (apicd->is_managed)
0478 irq_matrix_remove_managed(vector_matrix, dest);
0479 }
0480
0481 static void x86_vector_free_irqs(struct irq_domain *domain,
0482 unsigned int virq, unsigned int nr_irqs)
0483 {
0484 struct apic_chip_data *apicd;
0485 struct irq_data *irqd;
0486 unsigned long flags;
0487 int i;
0488
0489 for (i = 0; i < nr_irqs; i++) {
0490 irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i);
0491 if (irqd && irqd->chip_data) {
0492 raw_spin_lock_irqsave(&vector_lock, flags);
0493 clear_irq_vector(irqd);
0494 vector_free_reserved_and_managed(irqd);
0495 apicd = irqd->chip_data;
0496 irq_domain_reset_irq_data(irqd);
0497 raw_spin_unlock_irqrestore(&vector_lock, flags);
0498 free_apic_chip_data(apicd);
0499 }
0500 }
0501 }
0502
0503 static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
0504 struct apic_chip_data *apicd)
0505 {
0506 unsigned long flags;
0507 bool realloc = false;
0508
0509 apicd->vector = ISA_IRQ_VECTOR(virq);
0510 apicd->cpu = 0;
0511
0512 raw_spin_lock_irqsave(&vector_lock, flags);
0513
0514
0515
0516
0517 if (irqd_is_activated(irqd)) {
0518 trace_vector_setup(virq, true, 0);
0519 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
0520 } else {
0521
0522 apicd->can_reserve = true;
0523 irqd_set_can_reserve(irqd);
0524 clear_irq_vector(irqd);
0525 realloc = true;
0526 }
0527 raw_spin_unlock_irqrestore(&vector_lock, flags);
0528 return realloc;
0529 }
0530
0531 static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
0532 unsigned int nr_irqs, void *arg)
0533 {
0534 struct irq_alloc_info *info = arg;
0535 struct apic_chip_data *apicd;
0536 struct irq_data *irqd;
0537 int i, err, node;
0538
0539 if (disable_apic)
0540 return -ENXIO;
0541
0542
0543 if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
0544 return -ENOSYS;
0545
0546
0547
0548
0549
0550 if (WARN_ON_ONCE(info->flags & X86_IRQ_ALLOC_LEGACY &&
0551 virq == PIC_CASCADE_IR))
0552 return -EINVAL;
0553
0554 for (i = 0; i < nr_irqs; i++) {
0555 irqd = irq_domain_get_irq_data(domain, virq + i);
0556 BUG_ON(!irqd);
0557 node = irq_data_get_node(irqd);
0558 WARN_ON_ONCE(irqd->chip_data);
0559 apicd = alloc_apic_chip_data(node);
0560 if (!apicd) {
0561 err = -ENOMEM;
0562 goto error;
0563 }
0564
0565 apicd->irq = virq + i;
0566 irqd->chip = &lapic_controller;
0567 irqd->chip_data = apicd;
0568 irqd->hwirq = virq + i;
0569 irqd_set_single_target(irqd);
0570
0571
0572
0573
0574
0575 irqd_set_handle_enforce_irqctx(irqd);
0576
0577
0578 irqd_set_affinity_on_activate(irqd);
0579
0580
0581
0582
0583
0584
0585
0586
0587 if (info->flags & X86_IRQ_ALLOC_LEGACY) {
0588 if (!vector_configure_legacy(virq + i, irqd, apicd))
0589 continue;
0590 }
0591
0592 err = assign_irq_vector_policy(irqd, info);
0593 trace_vector_setup(virq + i, false, err);
0594 if (err) {
0595 irqd->chip_data = NULL;
0596 free_apic_chip_data(apicd);
0597 goto error;
0598 }
0599 }
0600
0601 return 0;
0602
0603 error:
0604 x86_vector_free_irqs(domain, virq, i);
0605 return err;
0606 }
0607
0608 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
0609 static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
0610 struct irq_data *irqd, int ind)
0611 {
0612 struct apic_chip_data apicd;
0613 unsigned long flags;
0614 int irq;
0615
0616 if (!irqd) {
0617 irq_matrix_debug_show(m, vector_matrix, ind);
0618 return;
0619 }
0620
0621 irq = irqd->irq;
0622 if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) {
0623 seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq));
0624 seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, "");
0625 return;
0626 }
0627
0628 if (!irqd->chip_data) {
0629 seq_printf(m, "%*sVector: Not assigned\n", ind, "");
0630 return;
0631 }
0632
0633 raw_spin_lock_irqsave(&vector_lock, flags);
0634 memcpy(&apicd, irqd->chip_data, sizeof(apicd));
0635 raw_spin_unlock_irqrestore(&vector_lock, flags);
0636
0637 seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector);
0638 seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu);
0639 if (apicd.prev_vector) {
0640 seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector);
0641 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu);
0642 }
0643 seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0);
0644 seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0);
0645 seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0);
0646 seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0);
0647 seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist));
0648 }
0649 #endif
0650
0651 int x86_fwspec_is_ioapic(struct irq_fwspec *fwspec)
0652 {
0653 if (fwspec->param_count != 1)
0654 return 0;
0655
0656 if (is_fwnode_irqchip(fwspec->fwnode)) {
0657 const char *fwname = fwnode_get_name(fwspec->fwnode);
0658 return fwname && !strncmp(fwname, "IO-APIC-", 8) &&
0659 simple_strtol(fwname+8, NULL, 10) == fwspec->param[0];
0660 }
0661 return to_of_node(fwspec->fwnode) &&
0662 of_device_is_compatible(to_of_node(fwspec->fwnode),
0663 "intel,ce4100-ioapic");
0664 }
0665
0666 int x86_fwspec_is_hpet(struct irq_fwspec *fwspec)
0667 {
0668 if (fwspec->param_count != 1)
0669 return 0;
0670
0671 if (is_fwnode_irqchip(fwspec->fwnode)) {
0672 const char *fwname = fwnode_get_name(fwspec->fwnode);
0673 return fwname && !strncmp(fwname, "HPET-MSI-", 9) &&
0674 simple_strtol(fwname+9, NULL, 10) == fwspec->param[0];
0675 }
0676 return 0;
0677 }
0678
0679 static int x86_vector_select(struct irq_domain *d, struct irq_fwspec *fwspec,
0680 enum irq_domain_bus_token bus_token)
0681 {
0682
0683
0684
0685
0686
0687 if (apic->apic_id_valid(32768))
0688 return 0;
0689
0690 return x86_fwspec_is_ioapic(fwspec) || x86_fwspec_is_hpet(fwspec);
0691 }
0692
0693 static const struct irq_domain_ops x86_vector_domain_ops = {
0694 .select = x86_vector_select,
0695 .alloc = x86_vector_alloc_irqs,
0696 .free = x86_vector_free_irqs,
0697 .activate = x86_vector_activate,
0698 .deactivate = x86_vector_deactivate,
0699 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
0700 .debug_show = x86_vector_debug_show,
0701 #endif
0702 };
0703
0704 int __init arch_probe_nr_irqs(void)
0705 {
0706 int nr;
0707
0708 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
0709 nr_irqs = NR_VECTORS * nr_cpu_ids;
0710
0711 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
0712 #if defined(CONFIG_PCI_MSI)
0713
0714
0715
0716 if (gsi_top <= NR_IRQS_LEGACY)
0717 nr += 8 * nr_cpu_ids;
0718 else
0719 nr += gsi_top * 16;
0720 #endif
0721 if (nr < nr_irqs)
0722 nr_irqs = nr;
0723
0724
0725
0726
0727
0728 return legacy_pic->probe();
0729 }
0730
0731 void lapic_assign_legacy_vector(unsigned int irq, bool replace)
0732 {
0733
0734
0735
0736
0737
0738 irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
0739 }
0740
0741 void __init lapic_update_legacy_vectors(void)
0742 {
0743 unsigned int i;
0744
0745 if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0)
0746 return;
0747
0748
0749
0750
0751
0752
0753
0754
0755 for (i = 0; i < nr_legacy_irqs(); i++) {
0756 if (i != PIC_CASCADE_IR)
0757 lapic_assign_legacy_vector(i, true);
0758 }
0759 }
0760
0761 void __init lapic_assign_system_vectors(void)
0762 {
0763 unsigned int i, vector;
0764
0765 for_each_set_bit(vector, system_vectors, NR_VECTORS)
0766 irq_matrix_assign_system(vector_matrix, vector, false);
0767
0768 if (nr_legacy_irqs() > 1)
0769 lapic_assign_legacy_vector(PIC_CASCADE_IR, false);
0770
0771
0772 irq_matrix_online(vector_matrix);
0773
0774
0775 for (i = 0; i < nr_legacy_irqs(); i++) {
0776
0777
0778
0779
0780
0781 if (i != PIC_CASCADE_IR)
0782 irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i));
0783 }
0784 }
0785
0786 int __init arch_early_irq_init(void)
0787 {
0788 struct fwnode_handle *fn;
0789
0790 fn = irq_domain_alloc_named_fwnode("VECTOR");
0791 BUG_ON(!fn);
0792 x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
0793 NULL);
0794 BUG_ON(x86_vector_domain == NULL);
0795 irq_set_default_host(x86_vector_domain);
0796
0797 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
0798
0799
0800
0801
0802
0803 vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR,
0804 FIRST_SYSTEM_VECTOR);
0805 BUG_ON(!vector_matrix);
0806
0807 return arch_early_ioapic_init();
0808 }
0809
0810 #ifdef CONFIG_SMP
0811
0812 static struct irq_desc *__setup_vector_irq(int vector)
0813 {
0814 int isairq = vector - ISA_IRQ_VECTOR(0);
0815
0816
0817 if (isairq < 0 || isairq >= nr_legacy_irqs())
0818 return VECTOR_UNUSED;
0819
0820 if (test_bit(isairq, &io_apic_irqs))
0821 return VECTOR_UNUSED;
0822 return irq_to_desc(isairq);
0823 }
0824
0825
0826 void lapic_online(void)
0827 {
0828 unsigned int vector;
0829
0830 lockdep_assert_held(&vector_lock);
0831
0832
0833 irq_matrix_online(vector_matrix);
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844 for (vector = 0; vector < NR_VECTORS; vector++)
0845 this_cpu_write(vector_irq[vector], __setup_vector_irq(vector));
0846 }
0847
0848 void lapic_offline(void)
0849 {
0850 lock_vector_lock();
0851 irq_matrix_offline(vector_matrix);
0852 unlock_vector_lock();
0853 }
0854
0855 static int apic_set_affinity(struct irq_data *irqd,
0856 const struct cpumask *dest, bool force)
0857 {
0858 int err;
0859
0860 if (WARN_ON_ONCE(!irqd_is_activated(irqd)))
0861 return -EIO;
0862
0863 raw_spin_lock(&vector_lock);
0864 cpumask_and(vector_searchmask, dest, cpu_online_mask);
0865 if (irqd_affinity_is_managed(irqd))
0866 err = assign_managed_vector(irqd, vector_searchmask);
0867 else
0868 err = assign_vector_locked(irqd, vector_searchmask);
0869 raw_spin_unlock(&vector_lock);
0870 return err ? err : IRQ_SET_MASK_OK;
0871 }
0872
0873 #else
0874 # define apic_set_affinity NULL
0875 #endif
0876
0877 static int apic_retrigger_irq(struct irq_data *irqd)
0878 {
0879 struct apic_chip_data *apicd = apic_chip_data(irqd);
0880 unsigned long flags;
0881
0882 raw_spin_lock_irqsave(&vector_lock, flags);
0883 apic->send_IPI(apicd->cpu, apicd->vector);
0884 raw_spin_unlock_irqrestore(&vector_lock, flags);
0885
0886 return 1;
0887 }
0888
0889 void apic_ack_irq(struct irq_data *irqd)
0890 {
0891 irq_move_irq(irqd);
0892 ack_APIC_irq();
0893 }
0894
0895 void apic_ack_edge(struct irq_data *irqd)
0896 {
0897 irq_complete_move(irqd_cfg(irqd));
0898 apic_ack_irq(irqd);
0899 }
0900
0901 static void x86_vector_msi_compose_msg(struct irq_data *data,
0902 struct msi_msg *msg)
0903 {
0904 __irq_msi_compose_msg(irqd_cfg(data), msg, false);
0905 }
0906
0907 static struct irq_chip lapic_controller = {
0908 .name = "APIC",
0909 .irq_ack = apic_ack_edge,
0910 .irq_set_affinity = apic_set_affinity,
0911 .irq_compose_msi_msg = x86_vector_msi_compose_msg,
0912 .irq_retrigger = apic_retrigger_irq,
0913 };
0914
0915 #ifdef CONFIG_SMP
0916
0917 static void free_moved_vector(struct apic_chip_data *apicd)
0918 {
0919 unsigned int vector = apicd->prev_vector;
0920 unsigned int cpu = apicd->prev_cpu;
0921 bool managed = apicd->is_managed;
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933 trace_vector_free_moved(apicd->irq, cpu, vector, managed);
0934 irq_matrix_free(vector_matrix, cpu, vector, managed);
0935 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
0936 hlist_del_init(&apicd->clist);
0937 apicd->prev_vector = 0;
0938 apicd->move_in_progress = 0;
0939 }
0940
0941 DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup)
0942 {
0943 struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
0944 struct apic_chip_data *apicd;
0945 struct hlist_node *tmp;
0946
0947 ack_APIC_irq();
0948
0949 raw_spin_lock(&vector_lock);
0950
0951 hlist_for_each_entry_safe(apicd, tmp, clhead, clist) {
0952 unsigned int irr, vector = apicd->prev_vector;
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
0964 if (irr & (1U << (vector % 32))) {
0965 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
0966 continue;
0967 }
0968 free_moved_vector(apicd);
0969 }
0970
0971 raw_spin_unlock(&vector_lock);
0972 }
0973
0974 static void __send_cleanup_vector(struct apic_chip_data *apicd)
0975 {
0976 unsigned int cpu;
0977
0978 raw_spin_lock(&vector_lock);
0979 apicd->move_in_progress = 0;
0980 cpu = apicd->prev_cpu;
0981 if (cpu_online(cpu)) {
0982 hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu));
0983 apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR);
0984 } else {
0985 apicd->prev_vector = 0;
0986 }
0987 raw_spin_unlock(&vector_lock);
0988 }
0989
0990 void send_cleanup_vector(struct irq_cfg *cfg)
0991 {
0992 struct apic_chip_data *apicd;
0993
0994 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
0995 if (apicd->move_in_progress)
0996 __send_cleanup_vector(apicd);
0997 }
0998
0999 void irq_complete_move(struct irq_cfg *cfg)
1000 {
1001 struct apic_chip_data *apicd;
1002
1003 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
1004 if (likely(!apicd->move_in_progress))
1005 return;
1006
1007
1008
1009
1010
1011
1012
1013 if (apicd->cpu == smp_processor_id())
1014 __send_cleanup_vector(apicd);
1015 }
1016
1017
1018
1019
1020 void irq_force_complete_move(struct irq_desc *desc)
1021 {
1022 struct apic_chip_data *apicd;
1023 struct irq_data *irqd;
1024 unsigned int vector;
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 irqd = irq_domain_get_irq_data(x86_vector_domain,
1036 irq_desc_get_irq(desc));
1037 if (!irqd)
1038 return;
1039
1040 raw_spin_lock(&vector_lock);
1041 apicd = apic_chip_data(irqd);
1042 if (!apicd)
1043 goto unlock;
1044
1045
1046
1047
1048 vector = apicd->prev_vector;
1049 if (!vector)
1050 goto unlock;
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067 if (apicd->move_in_progress) {
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
1101 irqd->irq, vector);
1102 }
1103 free_moved_vector(apicd);
1104 unlock:
1105 raw_spin_unlock(&vector_lock);
1106 }
1107
1108 #ifdef CONFIG_HOTPLUG_CPU
1109
1110
1111
1112
1113 int lapic_can_unplug_cpu(void)
1114 {
1115 unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
1116 int ret = 0;
1117
1118 raw_spin_lock(&vector_lock);
1119 tomove = irq_matrix_allocated(vector_matrix);
1120 avl = irq_matrix_available(vector_matrix, true);
1121 if (avl < tomove) {
1122 pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n",
1123 cpu, tomove, avl);
1124 ret = -ENOSPC;
1125 goto out;
1126 }
1127 rsvd = irq_matrix_reserved(vector_matrix);
1128 if (avl < rsvd) {
1129 pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n",
1130 rsvd, avl);
1131 }
1132 out:
1133 raw_spin_unlock(&vector_lock);
1134 return ret;
1135 }
1136 #endif
1137 #endif
1138
1139 static void __init print_APIC_field(int base)
1140 {
1141 int i;
1142
1143 printk(KERN_DEBUG);
1144
1145 for (i = 0; i < 8; i++)
1146 pr_cont("%08x", apic_read(base + i*0x10));
1147
1148 pr_cont("\n");
1149 }
1150
1151 static void __init print_local_APIC(void *dummy)
1152 {
1153 unsigned int i, v, ver, maxlvt;
1154 u64 icr;
1155
1156 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
1157 smp_processor_id(), hard_smp_processor_id());
1158 v = apic_read(APIC_ID);
1159 pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
1160 v = apic_read(APIC_LVR);
1161 pr_info("... APIC VERSION: %08x\n", v);
1162 ver = GET_APIC_VERSION(v);
1163 maxlvt = lapic_get_maxlvt();
1164
1165 v = apic_read(APIC_TASKPRI);
1166 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1167
1168
1169 if (APIC_INTEGRATED(ver)) {
1170 if (!APIC_XAPIC(ver)) {
1171 v = apic_read(APIC_ARBPRI);
1172 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
1173 v, v & APIC_ARBPRI_MASK);
1174 }
1175 v = apic_read(APIC_PROCPRI);
1176 pr_debug("... APIC PROCPRI: %08x\n", v);
1177 }
1178
1179
1180
1181
1182
1183 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1184 v = apic_read(APIC_RRR);
1185 pr_debug("... APIC RRR: %08x\n", v);
1186 }
1187
1188 v = apic_read(APIC_LDR);
1189 pr_debug("... APIC LDR: %08x\n", v);
1190 if (!x2apic_enabled()) {
1191 v = apic_read(APIC_DFR);
1192 pr_debug("... APIC DFR: %08x\n", v);
1193 }
1194 v = apic_read(APIC_SPIV);
1195 pr_debug("... APIC SPIV: %08x\n", v);
1196
1197 pr_debug("... APIC ISR field:\n");
1198 print_APIC_field(APIC_ISR);
1199 pr_debug("... APIC TMR field:\n");
1200 print_APIC_field(APIC_TMR);
1201 pr_debug("... APIC IRR field:\n");
1202 print_APIC_field(APIC_IRR);
1203
1204
1205 if (APIC_INTEGRATED(ver)) {
1206
1207 if (maxlvt > 3)
1208 apic_write(APIC_ESR, 0);
1209
1210 v = apic_read(APIC_ESR);
1211 pr_debug("... APIC ESR: %08x\n", v);
1212 }
1213
1214 icr = apic_icr_read();
1215 pr_debug("... APIC ICR: %08x\n", (u32)icr);
1216 pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
1217
1218 v = apic_read(APIC_LVTT);
1219 pr_debug("... APIC LVTT: %08x\n", v);
1220
1221 if (maxlvt > 3) {
1222
1223 v = apic_read(APIC_LVTPC);
1224 pr_debug("... APIC LVTPC: %08x\n", v);
1225 }
1226 v = apic_read(APIC_LVT0);
1227 pr_debug("... APIC LVT0: %08x\n", v);
1228 v = apic_read(APIC_LVT1);
1229 pr_debug("... APIC LVT1: %08x\n", v);
1230
1231 if (maxlvt > 2) {
1232
1233 v = apic_read(APIC_LVTERR);
1234 pr_debug("... APIC LVTERR: %08x\n", v);
1235 }
1236
1237 v = apic_read(APIC_TMICT);
1238 pr_debug("... APIC TMICT: %08x\n", v);
1239 v = apic_read(APIC_TMCCT);
1240 pr_debug("... APIC TMCCT: %08x\n", v);
1241 v = apic_read(APIC_TDCR);
1242 pr_debug("... APIC TDCR: %08x\n", v);
1243
1244 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1245 v = apic_read(APIC_EFEAT);
1246 maxlvt = (v >> 16) & 0xff;
1247 pr_debug("... APIC EFEAT: %08x\n", v);
1248 v = apic_read(APIC_ECTRL);
1249 pr_debug("... APIC ECTRL: %08x\n", v);
1250 for (i = 0; i < maxlvt; i++) {
1251 v = apic_read(APIC_EILVTn(i));
1252 pr_debug("... APIC EILVT%d: %08x\n", i, v);
1253 }
1254 }
1255 pr_cont("\n");
1256 }
1257
1258 static void __init print_local_APICs(int maxcpu)
1259 {
1260 int cpu;
1261
1262 if (!maxcpu)
1263 return;
1264
1265 preempt_disable();
1266 for_each_online_cpu(cpu) {
1267 if (cpu >= maxcpu)
1268 break;
1269 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1270 }
1271 preempt_enable();
1272 }
1273
1274 static void __init print_PIC(void)
1275 {
1276 unsigned int v;
1277 unsigned long flags;
1278
1279 if (!nr_legacy_irqs())
1280 return;
1281
1282 pr_debug("\nprinting PIC contents\n");
1283
1284 raw_spin_lock_irqsave(&i8259A_lock, flags);
1285
1286 v = inb(0xa1) << 8 | inb(0x21);
1287 pr_debug("... PIC IMR: %04x\n", v);
1288
1289 v = inb(0xa0) << 8 | inb(0x20);
1290 pr_debug("... PIC IRR: %04x\n", v);
1291
1292 outb(0x0b, 0xa0);
1293 outb(0x0b, 0x20);
1294 v = inb(0xa0) << 8 | inb(0x20);
1295 outb(0x0a, 0xa0);
1296 outb(0x0a, 0x20);
1297
1298 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1299
1300 pr_debug("... PIC ISR: %04x\n", v);
1301
1302 v = inb(PIC_ELCR2) << 8 | inb(PIC_ELCR1);
1303 pr_debug("... PIC ELCR: %04x\n", v);
1304 }
1305
1306 static int show_lapic __initdata = 1;
1307 static __init int setup_show_lapic(char *arg)
1308 {
1309 int num = -1;
1310
1311 if (strcmp(arg, "all") == 0) {
1312 show_lapic = CONFIG_NR_CPUS;
1313 } else {
1314 get_option(&arg, &num);
1315 if (num >= 0)
1316 show_lapic = num;
1317 }
1318
1319 return 1;
1320 }
1321 __setup("show_lapic=", setup_show_lapic);
1322
1323 static int __init print_ICs(void)
1324 {
1325 if (apic_verbosity == APIC_QUIET)
1326 return 0;
1327
1328 print_PIC();
1329
1330
1331 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
1332 return 0;
1333
1334 print_local_APICs(show_lapic);
1335 print_IO_APICs();
1336
1337 return 0;
1338 }
1339
1340 late_initcall(print_ICs);