0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/kernel_stat.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/seq_file.h>
0014 #include <linux/proc_fs.h>
0015 #include <linux/profile.h>
0016 #include <linux/export.h>
0017 #include <linux/kernel.h>
0018 #include <linux/ftrace.h>
0019 #include <linux/errno.h>
0020 #include <linux/slab.h>
0021 #include <linux/init.h>
0022 #include <linux/cpu.h>
0023 #include <linux/irq.h>
0024 #include <linux/entry-common.h>
0025 #include <asm/irq_regs.h>
0026 #include <asm/cputime.h>
0027 #include <asm/lowcore.h>
0028 #include <asm/irq.h>
0029 #include <asm/hw_irq.h>
0030 #include <asm/stacktrace.h>
0031 #include <asm/softirq_stack.h>
0032 #include "entry.h"
0033
0034 DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
0035 EXPORT_PER_CPU_SYMBOL_GPL(irq_stat);
0036
0037 struct irq_class {
0038 int irq;
0039 char *name;
0040 char *desc;
0041 };
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053 static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = {
0054 {.irq = EXT_INTERRUPT, .name = "EXT"},
0055 {.irq = IO_INTERRUPT, .name = "I/O"},
0056 {.irq = THIN_INTERRUPT, .name = "AIO"},
0057 };
0058
0059
0060
0061
0062
0063
0064 static const struct irq_class irqclass_sub_desc[] = {
0065 {.irq = IRQEXT_CLK, .name = "CLK", .desc = "[EXT] Clock Comparator"},
0066 {.irq = IRQEXT_EXC, .name = "EXC", .desc = "[EXT] External Call"},
0067 {.irq = IRQEXT_EMS, .name = "EMS", .desc = "[EXT] Emergency Signal"},
0068 {.irq = IRQEXT_TMR, .name = "TMR", .desc = "[EXT] CPU Timer"},
0069 {.irq = IRQEXT_TLA, .name = "TAL", .desc = "[EXT] Timing Alert"},
0070 {.irq = IRQEXT_PFL, .name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
0071 {.irq = IRQEXT_DSD, .name = "DSD", .desc = "[EXT] DASD Diag"},
0072 {.irq = IRQEXT_VRT, .name = "VRT", .desc = "[EXT] Virtio"},
0073 {.irq = IRQEXT_SCP, .name = "SCP", .desc = "[EXT] Service Call"},
0074 {.irq = IRQEXT_IUC, .name = "IUC", .desc = "[EXT] IUCV"},
0075 {.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
0076 {.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
0077 {.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"},
0078 {.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
0079 {.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"},
0080 {.irq = IRQIO_C15, .name = "C15", .desc = "[I/O] 3215"},
0081 {.irq = IRQIO_C70, .name = "C70", .desc = "[I/O] 3270"},
0082 {.irq = IRQIO_TAP, .name = "TAP", .desc = "[I/O] Tape"},
0083 {.irq = IRQIO_VMR, .name = "VMR", .desc = "[I/O] Unit Record Devices"},
0084 {.irq = IRQIO_LCS, .name = "LCS", .desc = "[I/O] LCS"},
0085 {.irq = IRQIO_CTC, .name = "CTC", .desc = "[I/O] CTC"},
0086 {.irq = IRQIO_ADM, .name = "ADM", .desc = "[I/O] EADM Subchannel"},
0087 {.irq = IRQIO_CSC, .name = "CSC", .desc = "[I/O] CHSC Subchannel"},
0088 {.irq = IRQIO_VIR, .name = "VIR", .desc = "[I/O] Virtual I/O Devices"},
0089 {.irq = IRQIO_QAI, .name = "QAI", .desc = "[AIO] QDIO Adapter Interrupt"},
0090 {.irq = IRQIO_APB, .name = "APB", .desc = "[AIO] AP Bus"},
0091 {.irq = IRQIO_PCF, .name = "PCF", .desc = "[AIO] PCI Floating Interrupt"},
0092 {.irq = IRQIO_PCD, .name = "PCD", .desc = "[AIO] PCI Directed Interrupt"},
0093 {.irq = IRQIO_MSI, .name = "MSI", .desc = "[AIO] MSI Interrupt"},
0094 {.irq = IRQIO_VAI, .name = "VAI", .desc = "[AIO] Virtual I/O Devices AI"},
0095 {.irq = IRQIO_GAL, .name = "GAL", .desc = "[AIO] GIB Alert"},
0096 {.irq = NMI_NMI, .name = "NMI", .desc = "[NMI] Machine Check"},
0097 {.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"},
0098 };
0099
0100 static void do_IRQ(struct pt_regs *regs, int irq)
0101 {
0102 if (tod_after_eq(S390_lowcore.int_clock,
0103 S390_lowcore.clock_comparator))
0104
0105 clock_comparator_work();
0106 generic_handle_irq(irq);
0107 }
0108
0109 static int on_async_stack(void)
0110 {
0111 unsigned long frame = current_frame_address();
0112
0113 return ((S390_lowcore.async_stack ^ frame) & ~(THREAD_SIZE - 1)) == 0;
0114 }
0115
0116 static void do_irq_async(struct pt_regs *regs, int irq)
0117 {
0118 if (on_async_stack()) {
0119 do_IRQ(regs, irq);
0120 } else {
0121 call_on_stack(2, S390_lowcore.async_stack, void, do_IRQ,
0122 struct pt_regs *, regs, int, irq);
0123 }
0124 }
0125
0126 static int irq_pending(struct pt_regs *regs)
0127 {
0128 int cc;
0129
0130 asm volatile("tpi 0\n"
0131 "ipm %0" : "=d" (cc) : : "cc");
0132 return cc >> 28;
0133 }
0134
0135 void noinstr do_io_irq(struct pt_regs *regs)
0136 {
0137 irqentry_state_t state = irqentry_enter(regs);
0138 struct pt_regs *old_regs = set_irq_regs(regs);
0139 int from_idle;
0140
0141 irq_enter_rcu();
0142
0143 if (user_mode(regs)) {
0144 update_timer_sys();
0145 if (static_branch_likely(&cpu_has_bear))
0146 current->thread.last_break = regs->last_break;
0147 }
0148
0149 from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit;
0150 if (from_idle)
0151 account_idle_time_irq();
0152
0153 do {
0154 regs->tpi_info = S390_lowcore.tpi_info;
0155 if (S390_lowcore.tpi_info.adapter_IO)
0156 do_irq_async(regs, THIN_INTERRUPT);
0157 else
0158 do_irq_async(regs, IO_INTERRUPT);
0159 } while (MACHINE_IS_LPAR && irq_pending(regs));
0160
0161 irq_exit_rcu();
0162
0163 set_irq_regs(old_regs);
0164 irqentry_exit(regs, state);
0165
0166 if (from_idle)
0167 regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT);
0168 }
0169
0170 void noinstr do_ext_irq(struct pt_regs *regs)
0171 {
0172 irqentry_state_t state = irqentry_enter(regs);
0173 struct pt_regs *old_regs = set_irq_regs(regs);
0174 int from_idle;
0175
0176 irq_enter_rcu();
0177
0178 if (user_mode(regs)) {
0179 update_timer_sys();
0180 if (static_branch_likely(&cpu_has_bear))
0181 current->thread.last_break = regs->last_break;
0182 }
0183
0184 regs->int_code = S390_lowcore.ext_int_code_addr;
0185 regs->int_parm = S390_lowcore.ext_params;
0186 regs->int_parm_long = S390_lowcore.ext_params2;
0187
0188 from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit;
0189 if (from_idle)
0190 account_idle_time_irq();
0191
0192 do_irq_async(regs, EXT_INTERRUPT);
0193
0194 irq_exit_rcu();
0195 set_irq_regs(old_regs);
0196 irqentry_exit(regs, state);
0197
0198 if (from_idle)
0199 regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT);
0200 }
0201
0202 static void show_msi_interrupt(struct seq_file *p, int irq)
0203 {
0204 struct irq_desc *desc;
0205 unsigned long flags;
0206 int cpu;
0207
0208 rcu_read_lock();
0209 desc = irq_to_desc(irq);
0210 if (!desc)
0211 goto out;
0212
0213 raw_spin_lock_irqsave(&desc->lock, flags);
0214 seq_printf(p, "%3d: ", irq);
0215 for_each_online_cpu(cpu)
0216 seq_printf(p, "%10u ", irq_desc_kstat_cpu(desc, cpu));
0217
0218 if (desc->irq_data.chip)
0219 seq_printf(p, " %8s", desc->irq_data.chip->name);
0220
0221 if (desc->action)
0222 seq_printf(p, " %s", desc->action->name);
0223
0224 seq_putc(p, '\n');
0225 raw_spin_unlock_irqrestore(&desc->lock, flags);
0226 out:
0227 rcu_read_unlock();
0228 }
0229
0230
0231
0232
0233 int show_interrupts(struct seq_file *p, void *v)
0234 {
0235 int index = *(loff_t *) v;
0236 int cpu, irq;
0237
0238 cpus_read_lock();
0239 if (index == 0) {
0240 seq_puts(p, " ");
0241 for_each_online_cpu(cpu)
0242 seq_printf(p, "CPU%-8d", cpu);
0243 seq_putc(p, '\n');
0244 }
0245 if (index < NR_IRQS_BASE) {
0246 seq_printf(p, "%s: ", irqclass_main_desc[index].name);
0247 irq = irqclass_main_desc[index].irq;
0248 for_each_online_cpu(cpu)
0249 seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
0250 seq_putc(p, '\n');
0251 goto out;
0252 }
0253 if (index < nr_irqs) {
0254 show_msi_interrupt(p, index);
0255 goto out;
0256 }
0257 for (index = 0; index < NR_ARCH_IRQS; index++) {
0258 seq_printf(p, "%s: ", irqclass_sub_desc[index].name);
0259 irq = irqclass_sub_desc[index].irq;
0260 for_each_online_cpu(cpu)
0261 seq_printf(p, "%10u ",
0262 per_cpu(irq_stat, cpu).irqs[irq]);
0263 if (irqclass_sub_desc[index].desc)
0264 seq_printf(p, " %s", irqclass_sub_desc[index].desc);
0265 seq_putc(p, '\n');
0266 }
0267 out:
0268 cpus_read_unlock();
0269 return 0;
0270 }
0271
0272 unsigned int arch_dynirq_lower_bound(unsigned int from)
0273 {
0274 return from < NR_IRQS_BASE ? NR_IRQS_BASE : from;
0275 }
0276
0277
0278
0279
0280
0281 static struct hlist_head ext_int_hash[32] ____cacheline_aligned;
0282
0283 struct ext_int_info {
0284 ext_int_handler_t handler;
0285 struct hlist_node entry;
0286 struct rcu_head rcu;
0287 u16 code;
0288 };
0289
0290
0291 static DEFINE_SPINLOCK(ext_int_hash_lock);
0292
0293 static inline int ext_hash(u16 code)
0294 {
0295 BUILD_BUG_ON(!is_power_of_2(ARRAY_SIZE(ext_int_hash)));
0296
0297 return (code + (code >> 9)) & (ARRAY_SIZE(ext_int_hash) - 1);
0298 }
0299
0300 int register_external_irq(u16 code, ext_int_handler_t handler)
0301 {
0302 struct ext_int_info *p;
0303 unsigned long flags;
0304 int index;
0305
0306 p = kmalloc(sizeof(*p), GFP_ATOMIC);
0307 if (!p)
0308 return -ENOMEM;
0309 p->code = code;
0310 p->handler = handler;
0311 index = ext_hash(code);
0312
0313 spin_lock_irqsave(&ext_int_hash_lock, flags);
0314 hlist_add_head_rcu(&p->entry, &ext_int_hash[index]);
0315 spin_unlock_irqrestore(&ext_int_hash_lock, flags);
0316 return 0;
0317 }
0318 EXPORT_SYMBOL(register_external_irq);
0319
0320 int unregister_external_irq(u16 code, ext_int_handler_t handler)
0321 {
0322 struct ext_int_info *p;
0323 unsigned long flags;
0324 int index = ext_hash(code);
0325
0326 spin_lock_irqsave(&ext_int_hash_lock, flags);
0327 hlist_for_each_entry_rcu(p, &ext_int_hash[index], entry) {
0328 if (p->code == code && p->handler == handler) {
0329 hlist_del_rcu(&p->entry);
0330 kfree_rcu(p, rcu);
0331 }
0332 }
0333 spin_unlock_irqrestore(&ext_int_hash_lock, flags);
0334 return 0;
0335 }
0336 EXPORT_SYMBOL(unregister_external_irq);
0337
0338 static irqreturn_t do_ext_interrupt(int irq, void *dummy)
0339 {
0340 struct pt_regs *regs = get_irq_regs();
0341 struct ext_code ext_code;
0342 struct ext_int_info *p;
0343 int index;
0344
0345 ext_code.int_code = regs->int_code;
0346 if (ext_code.code != EXT_IRQ_CLK_COMP)
0347 set_cpu_flag(CIF_NOHZ_DELAY);
0348
0349 index = ext_hash(ext_code.code);
0350 rcu_read_lock();
0351 hlist_for_each_entry_rcu(p, &ext_int_hash[index], entry) {
0352 if (unlikely(p->code != ext_code.code))
0353 continue;
0354 p->handler(ext_code, regs->int_parm, regs->int_parm_long);
0355 }
0356 rcu_read_unlock();
0357 return IRQ_HANDLED;
0358 }
0359
0360 static void __init init_ext_interrupts(void)
0361 {
0362 int idx;
0363
0364 for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
0365 INIT_HLIST_HEAD(&ext_int_hash[idx]);
0366
0367 irq_set_chip_and_handler(EXT_INTERRUPT,
0368 &dummy_irq_chip, handle_percpu_irq);
0369 if (request_irq(EXT_INTERRUPT, do_ext_interrupt, 0, "EXT", NULL))
0370 panic("Failed to register EXT interrupt\n");
0371 }
0372
0373 void __init init_IRQ(void)
0374 {
0375 BUILD_BUG_ON(ARRAY_SIZE(irqclass_sub_desc) != NR_ARCH_IRQS);
0376 init_cio_interrupts();
0377 init_airq_interrupts();
0378 init_ext_interrupts();
0379 }
0380
0381 static DEFINE_SPINLOCK(irq_subclass_lock);
0382 static unsigned char irq_subclass_refcount[64];
0383
0384 void irq_subclass_register(enum irq_subclass subclass)
0385 {
0386 spin_lock(&irq_subclass_lock);
0387 if (!irq_subclass_refcount[subclass])
0388 ctl_set_bit(0, subclass);
0389 irq_subclass_refcount[subclass]++;
0390 spin_unlock(&irq_subclass_lock);
0391 }
0392 EXPORT_SYMBOL(irq_subclass_register);
0393
0394 void irq_subclass_unregister(enum irq_subclass subclass)
0395 {
0396 spin_lock(&irq_subclass_lock);
0397 irq_subclass_refcount[subclass]--;
0398 if (!irq_subclass_refcount[subclass])
0399 ctl_clear_bit(0, subclass);
0400 spin_unlock(&irq_subclass_lock);
0401 }
0402 EXPORT_SYMBOL(irq_subclass_unregister);