0001
0002
0003
0004
0005 #include <linux/cpu.h>
0006 #include <linux/interrupt.h>
0007 #include <linux/kernel_stat.h>
0008 #include <linux/of.h>
0009 #include <linux/seq_file.h>
0010 #include <linux/smp.h>
0011 #include <linux/ftrace.h>
0012 #include <linux/delay.h>
0013 #include <linux/export.h>
0014 #include <linux/irq.h>
0015
0016 #include <asm/irq_stack.h>
0017 #include <asm/apic.h>
0018 #include <asm/io_apic.h>
0019 #include <asm/irq.h>
0020 #include <asm/mce.h>
0021 #include <asm/hw_irq.h>
0022 #include <asm/desc.h>
0023 #include <asm/traps.h>
0024 #include <asm/thermal.h>
0025
0026 #define CREATE_TRACE_POINTS
0027 #include <asm/trace/irq_vectors.h>
0028
0029 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
0030 EXPORT_PER_CPU_SYMBOL(irq_stat);
0031
0032 atomic_t irq_err_count;
0033
0034
0035
0036
0037
0038 void ack_bad_irq(unsigned int irq)
0039 {
0040 if (printk_ratelimit())
0041 pr_err("unexpected IRQ trap at vector %02x\n", irq);
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 ack_APIC_irq();
0053 }
0054
0055 #define irq_stats(x) (&per_cpu(irq_stat, x))
0056
0057
0058
0059 int arch_show_interrupts(struct seq_file *p, int prec)
0060 {
0061 int j;
0062
0063 seq_printf(p, "%*s: ", prec, "NMI");
0064 for_each_online_cpu(j)
0065 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
0066 seq_puts(p, " Non-maskable interrupts\n");
0067 #ifdef CONFIG_X86_LOCAL_APIC
0068 seq_printf(p, "%*s: ", prec, "LOC");
0069 for_each_online_cpu(j)
0070 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
0071 seq_puts(p, " Local timer interrupts\n");
0072
0073 seq_printf(p, "%*s: ", prec, "SPU");
0074 for_each_online_cpu(j)
0075 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
0076 seq_puts(p, " Spurious interrupts\n");
0077 seq_printf(p, "%*s: ", prec, "PMI");
0078 for_each_online_cpu(j)
0079 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
0080 seq_puts(p, " Performance monitoring interrupts\n");
0081 seq_printf(p, "%*s: ", prec, "IWI");
0082 for_each_online_cpu(j)
0083 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
0084 seq_puts(p, " IRQ work interrupts\n");
0085 seq_printf(p, "%*s: ", prec, "RTR");
0086 for_each_online_cpu(j)
0087 seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
0088 seq_puts(p, " APIC ICR read retries\n");
0089 if (x86_platform_ipi_callback) {
0090 seq_printf(p, "%*s: ", prec, "PLT");
0091 for_each_online_cpu(j)
0092 seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
0093 seq_puts(p, " Platform interrupts\n");
0094 }
0095 #endif
0096 #ifdef CONFIG_SMP
0097 seq_printf(p, "%*s: ", prec, "RES");
0098 for_each_online_cpu(j)
0099 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
0100 seq_puts(p, " Rescheduling interrupts\n");
0101 seq_printf(p, "%*s: ", prec, "CAL");
0102 for_each_online_cpu(j)
0103 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
0104 seq_puts(p, " Function call interrupts\n");
0105 seq_printf(p, "%*s: ", prec, "TLB");
0106 for_each_online_cpu(j)
0107 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
0108 seq_puts(p, " TLB shootdowns\n");
0109 #endif
0110 #ifdef CONFIG_X86_THERMAL_VECTOR
0111 seq_printf(p, "%*s: ", prec, "TRM");
0112 for_each_online_cpu(j)
0113 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
0114 seq_puts(p, " Thermal event interrupts\n");
0115 #endif
0116 #ifdef CONFIG_X86_MCE_THRESHOLD
0117 seq_printf(p, "%*s: ", prec, "THR");
0118 for_each_online_cpu(j)
0119 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
0120 seq_puts(p, " Threshold APIC interrupts\n");
0121 #endif
0122 #ifdef CONFIG_X86_MCE_AMD
0123 seq_printf(p, "%*s: ", prec, "DFR");
0124 for_each_online_cpu(j)
0125 seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count);
0126 seq_puts(p, " Deferred Error APIC interrupts\n");
0127 #endif
0128 #ifdef CONFIG_X86_MCE
0129 seq_printf(p, "%*s: ", prec, "MCE");
0130 for_each_online_cpu(j)
0131 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
0132 seq_puts(p, " Machine check exceptions\n");
0133 seq_printf(p, "%*s: ", prec, "MCP");
0134 for_each_online_cpu(j)
0135 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
0136 seq_puts(p, " Machine check polls\n");
0137 #endif
0138 #ifdef CONFIG_X86_HV_CALLBACK_VECTOR
0139 if (test_bit(HYPERVISOR_CALLBACK_VECTOR, system_vectors)) {
0140 seq_printf(p, "%*s: ", prec, "HYP");
0141 for_each_online_cpu(j)
0142 seq_printf(p, "%10u ",
0143 irq_stats(j)->irq_hv_callback_count);
0144 seq_puts(p, " Hypervisor callback interrupts\n");
0145 }
0146 #endif
0147 #if IS_ENABLED(CONFIG_HYPERV)
0148 if (test_bit(HYPERV_REENLIGHTENMENT_VECTOR, system_vectors)) {
0149 seq_printf(p, "%*s: ", prec, "HRE");
0150 for_each_online_cpu(j)
0151 seq_printf(p, "%10u ",
0152 irq_stats(j)->irq_hv_reenlightenment_count);
0153 seq_puts(p, " Hyper-V reenlightenment interrupts\n");
0154 }
0155 if (test_bit(HYPERV_STIMER0_VECTOR, system_vectors)) {
0156 seq_printf(p, "%*s: ", prec, "HVS");
0157 for_each_online_cpu(j)
0158 seq_printf(p, "%10u ",
0159 irq_stats(j)->hyperv_stimer0_count);
0160 seq_puts(p, " Hyper-V stimer0 interrupts\n");
0161 }
0162 #endif
0163 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
0164 #if defined(CONFIG_X86_IO_APIC)
0165 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
0166 #endif
0167 #ifdef CONFIG_HAVE_KVM
0168 seq_printf(p, "%*s: ", prec, "PIN");
0169 for_each_online_cpu(j)
0170 seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
0171 seq_puts(p, " Posted-interrupt notification event\n");
0172
0173 seq_printf(p, "%*s: ", prec, "NPI");
0174 for_each_online_cpu(j)
0175 seq_printf(p, "%10u ",
0176 irq_stats(j)->kvm_posted_intr_nested_ipis);
0177 seq_puts(p, " Nested posted-interrupt event\n");
0178
0179 seq_printf(p, "%*s: ", prec, "PIW");
0180 for_each_online_cpu(j)
0181 seq_printf(p, "%10u ",
0182 irq_stats(j)->kvm_posted_intr_wakeup_ipis);
0183 seq_puts(p, " Posted-interrupt wakeup event\n");
0184 #endif
0185 return 0;
0186 }
0187
0188
0189
0190
0191 u64 arch_irq_stat_cpu(unsigned int cpu)
0192 {
0193 u64 sum = irq_stats(cpu)->__nmi_count;
0194
0195 #ifdef CONFIG_X86_LOCAL_APIC
0196 sum += irq_stats(cpu)->apic_timer_irqs;
0197 sum += irq_stats(cpu)->irq_spurious_count;
0198 sum += irq_stats(cpu)->apic_perf_irqs;
0199 sum += irq_stats(cpu)->apic_irq_work_irqs;
0200 sum += irq_stats(cpu)->icr_read_retry_count;
0201 if (x86_platform_ipi_callback)
0202 sum += irq_stats(cpu)->x86_platform_ipis;
0203 #endif
0204 #ifdef CONFIG_SMP
0205 sum += irq_stats(cpu)->irq_resched_count;
0206 sum += irq_stats(cpu)->irq_call_count;
0207 #endif
0208 #ifdef CONFIG_X86_THERMAL_VECTOR
0209 sum += irq_stats(cpu)->irq_thermal_count;
0210 #endif
0211 #ifdef CONFIG_X86_MCE_THRESHOLD
0212 sum += irq_stats(cpu)->irq_threshold_count;
0213 #endif
0214 #ifdef CONFIG_X86_MCE
0215 sum += per_cpu(mce_exception_count, cpu);
0216 sum += per_cpu(mce_poll_count, cpu);
0217 #endif
0218 return sum;
0219 }
0220
0221 u64 arch_irq_stat(void)
0222 {
0223 u64 sum = atomic_read(&irq_err_count);
0224 return sum;
0225 }
0226
0227 static __always_inline void handle_irq(struct irq_desc *desc,
0228 struct pt_regs *regs)
0229 {
0230 if (IS_ENABLED(CONFIG_X86_64))
0231 generic_handle_irq_desc(desc);
0232 else
0233 __handle_irq(desc, regs);
0234 }
0235
0236
0237
0238
0239
0240 DEFINE_IDTENTRY_IRQ(common_interrupt)
0241 {
0242 struct pt_regs *old_regs = set_irq_regs(regs);
0243 struct irq_desc *desc;
0244
0245
0246 RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
0247
0248 desc = __this_cpu_read(vector_irq[vector]);
0249 if (likely(!IS_ERR_OR_NULL(desc))) {
0250 handle_irq(desc, regs);
0251 } else {
0252 ack_APIC_irq();
0253
0254 if (desc == VECTOR_UNUSED) {
0255 pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n",
0256 __func__, smp_processor_id(),
0257 vector);
0258 } else {
0259 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
0260 }
0261 }
0262
0263 set_irq_regs(old_regs);
0264 }
0265
0266 #ifdef CONFIG_X86_LOCAL_APIC
0267
0268 void (*x86_platform_ipi_callback)(void) = NULL;
0269
0270
0271
0272 DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi)
0273 {
0274 struct pt_regs *old_regs = set_irq_regs(regs);
0275
0276 ack_APIC_irq();
0277 trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
0278 inc_irq_stat(x86_platform_ipis);
0279 if (x86_platform_ipi_callback)
0280 x86_platform_ipi_callback();
0281 trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
0282 set_irq_regs(old_regs);
0283 }
0284 #endif
0285
0286 #ifdef CONFIG_HAVE_KVM
0287 static void dummy_handler(void) {}
0288 static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler;
0289
0290 void kvm_set_posted_intr_wakeup_handler(void (*handler)(void))
0291 {
0292 if (handler)
0293 kvm_posted_intr_wakeup_handler = handler;
0294 else {
0295 kvm_posted_intr_wakeup_handler = dummy_handler;
0296 synchronize_rcu();
0297 }
0298 }
0299 EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler);
0300
0301
0302
0303
0304 DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_ipi)
0305 {
0306 ack_APIC_irq();
0307 inc_irq_stat(kvm_posted_intr_ipis);
0308 }
0309
0310
0311
0312
0313 DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted_intr_wakeup_ipi)
0314 {
0315 ack_APIC_irq();
0316 inc_irq_stat(kvm_posted_intr_wakeup_ipis);
0317 kvm_posted_intr_wakeup_handler();
0318 }
0319
0320
0321
0322
0323 DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi)
0324 {
0325 ack_APIC_irq();
0326 inc_irq_stat(kvm_posted_intr_nested_ipis);
0327 }
0328 #endif
0329
0330
0331 #ifdef CONFIG_HOTPLUG_CPU
0332
0333 void fixup_irqs(void)
0334 {
0335 unsigned int irr, vector;
0336 struct irq_desc *desc;
0337 struct irq_data *data;
0338 struct irq_chip *chip;
0339
0340 irq_migrate_all_off_this_cpu();
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351 mdelay(1);
0352
0353
0354
0355
0356
0357
0358 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
0359 if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
0360 continue;
0361
0362 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
0363 if (irr & (1 << (vector % 32))) {
0364 desc = __this_cpu_read(vector_irq[vector]);
0365
0366 raw_spin_lock(&desc->lock);
0367 data = irq_desc_get_irq_data(desc);
0368 chip = irq_data_get_irq_chip(data);
0369 if (chip->irq_retrigger) {
0370 chip->irq_retrigger(data);
0371 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
0372 }
0373 raw_spin_unlock(&desc->lock);
0374 }
0375 if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
0376 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
0377 }
0378 }
0379 #endif
0380
0381 #ifdef CONFIG_X86_THERMAL_VECTOR
0382 static void smp_thermal_vector(void)
0383 {
0384 if (x86_thermal_enabled())
0385 intel_thermal_interrupt();
0386 else
0387 pr_err("CPU%d: Unexpected LVT thermal interrupt!\n",
0388 smp_processor_id());
0389 }
0390
0391 DEFINE_IDTENTRY_SYSVEC(sysvec_thermal)
0392 {
0393 trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
0394 inc_irq_stat(irq_thermal_count);
0395 smp_thermal_vector();
0396 trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
0397 ack_APIC_irq();
0398 }
0399 #endif