Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
0004  *
0005  * This file contains the /proc/irq/ handling code.
0006  */
0007 
0008 #include <linux/irq.h>
0009 #include <linux/gfp.h>
0010 #include <linux/proc_fs.h>
0011 #include <linux/seq_file.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/kernel_stat.h>
0014 #include <linux/mutex.h>
0015 
0016 #include "internals.h"
0017 
0018 /*
0019  * Access rules:
0020  *
0021  * procfs protects read/write of /proc/irq/N/ files against a
0022  * concurrent free of the interrupt descriptor. remove_proc_entry()
0023  * immediately prevents new read/writes to happen and waits for
0024  * already running read/write functions to complete.
0025  *
0026  * We remove the proc entries first and then delete the interrupt
0027  * descriptor from the radix tree and free it. So it is guaranteed
0028  * that irq_to_desc(N) is valid as long as the read/writes are
0029  * permitted by procfs.
0030  *
0031  * The read from /proc/interrupts is a different problem because there
0032  * is no protection. So the lookup and the access to irqdesc
0033  * information must be protected by sparse_irq_lock.
0034  */
0035 static struct proc_dir_entry *root_irq_dir;
0036 
0037 #ifdef CONFIG_SMP
0038 
0039 enum {
0040     AFFINITY,
0041     AFFINITY_LIST,
0042     EFFECTIVE,
0043     EFFECTIVE_LIST,
0044 };
0045 
0046 static int show_irq_affinity(int type, struct seq_file *m)
0047 {
0048     struct irq_desc *desc = irq_to_desc((long)m->private);
0049     const struct cpumask *mask;
0050 
0051     switch (type) {
0052     case AFFINITY:
0053     case AFFINITY_LIST:
0054         mask = desc->irq_common_data.affinity;
0055 #ifdef CONFIG_GENERIC_PENDING_IRQ
0056         if (irqd_is_setaffinity_pending(&desc->irq_data))
0057             mask = desc->pending_mask;
0058 #endif
0059         break;
0060     case EFFECTIVE:
0061     case EFFECTIVE_LIST:
0062 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
0063         mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
0064         break;
0065 #endif
0066     default:
0067         return -EINVAL;
0068     }
0069 
0070     switch (type) {
0071     case AFFINITY_LIST:
0072     case EFFECTIVE_LIST:
0073         seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
0074         break;
0075     case AFFINITY:
0076     case EFFECTIVE:
0077         seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
0078         break;
0079     }
0080     return 0;
0081 }
0082 
0083 static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
0084 {
0085     struct irq_desc *desc = irq_to_desc((long)m->private);
0086     unsigned long flags;
0087     cpumask_var_t mask;
0088 
0089     if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
0090         return -ENOMEM;
0091 
0092     raw_spin_lock_irqsave(&desc->lock, flags);
0093     if (desc->affinity_hint)
0094         cpumask_copy(mask, desc->affinity_hint);
0095     raw_spin_unlock_irqrestore(&desc->lock, flags);
0096 
0097     seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
0098     free_cpumask_var(mask);
0099 
0100     return 0;
0101 }
0102 
0103 int no_irq_affinity;
0104 static int irq_affinity_proc_show(struct seq_file *m, void *v)
0105 {
0106     return show_irq_affinity(AFFINITY, m);
0107 }
0108 
0109 static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
0110 {
0111     return show_irq_affinity(AFFINITY_LIST, m);
0112 }
0113 
0114 #ifndef CONFIG_AUTO_IRQ_AFFINITY
0115 static inline int irq_select_affinity_usr(unsigned int irq)
0116 {
0117     /*
0118      * If the interrupt is started up already then this fails. The
0119      * interrupt is assigned to an online CPU already. There is no
0120      * point to move it around randomly. Tell user space that the
0121      * selected mask is bogus.
0122      *
0123      * If not then any change to the affinity is pointless because the
0124      * startup code invokes irq_setup_affinity() which will select
0125      * a online CPU anyway.
0126      */
0127     return -EINVAL;
0128 }
0129 #else
0130 /* ALPHA magic affinity auto selector. Keep it for historical reasons. */
0131 static inline int irq_select_affinity_usr(unsigned int irq)
0132 {
0133     return irq_select_affinity(irq);
0134 }
0135 #endif
0136 
0137 static ssize_t write_irq_affinity(int type, struct file *file,
0138         const char __user *buffer, size_t count, loff_t *pos)
0139 {
0140     unsigned int irq = (int)(long)pde_data(file_inode(file));
0141     cpumask_var_t new_value;
0142     int err;
0143 
0144     if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
0145         return -EIO;
0146 
0147     if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
0148         return -ENOMEM;
0149 
0150     if (type)
0151         err = cpumask_parselist_user(buffer, count, new_value);
0152     else
0153         err = cpumask_parse_user(buffer, count, new_value);
0154     if (err)
0155         goto free_cpumask;
0156 
0157     /*
0158      * Do not allow disabling IRQs completely - it's a too easy
0159      * way to make the system unusable accidentally :-) At least
0160      * one online CPU still has to be targeted.
0161      */
0162     if (!cpumask_intersects(new_value, cpu_online_mask)) {
0163         /*
0164          * Special case for empty set - allow the architecture code
0165          * to set default SMP affinity.
0166          */
0167         err = irq_select_affinity_usr(irq) ? -EINVAL : count;
0168     } else {
0169         err = irq_set_affinity(irq, new_value);
0170         if (!err)
0171             err = count;
0172     }
0173 
0174 free_cpumask:
0175     free_cpumask_var(new_value);
0176     return err;
0177 }
0178 
0179 static ssize_t irq_affinity_proc_write(struct file *file,
0180         const char __user *buffer, size_t count, loff_t *pos)
0181 {
0182     return write_irq_affinity(0, file, buffer, count, pos);
0183 }
0184 
0185 static ssize_t irq_affinity_list_proc_write(struct file *file,
0186         const char __user *buffer, size_t count, loff_t *pos)
0187 {
0188     return write_irq_affinity(1, file, buffer, count, pos);
0189 }
0190 
0191 static int irq_affinity_proc_open(struct inode *inode, struct file *file)
0192 {
0193     return single_open(file, irq_affinity_proc_show, pde_data(inode));
0194 }
0195 
0196 static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
0197 {
0198     return single_open(file, irq_affinity_list_proc_show, pde_data(inode));
0199 }
0200 
0201 static const struct proc_ops irq_affinity_proc_ops = {
0202     .proc_open  = irq_affinity_proc_open,
0203     .proc_read  = seq_read,
0204     .proc_lseek = seq_lseek,
0205     .proc_release   = single_release,
0206     .proc_write = irq_affinity_proc_write,
0207 };
0208 
0209 static const struct proc_ops irq_affinity_list_proc_ops = {
0210     .proc_open  = irq_affinity_list_proc_open,
0211     .proc_read  = seq_read,
0212     .proc_lseek = seq_lseek,
0213     .proc_release   = single_release,
0214     .proc_write = irq_affinity_list_proc_write,
0215 };
0216 
0217 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
0218 static int irq_effective_aff_proc_show(struct seq_file *m, void *v)
0219 {
0220     return show_irq_affinity(EFFECTIVE, m);
0221 }
0222 
0223 static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v)
0224 {
0225     return show_irq_affinity(EFFECTIVE_LIST, m);
0226 }
0227 #endif
0228 
0229 static int default_affinity_show(struct seq_file *m, void *v)
0230 {
0231     seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
0232     return 0;
0233 }
0234 
0235 static ssize_t default_affinity_write(struct file *file,
0236         const char __user *buffer, size_t count, loff_t *ppos)
0237 {
0238     cpumask_var_t new_value;
0239     int err;
0240 
0241     if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
0242         return -ENOMEM;
0243 
0244     err = cpumask_parse_user(buffer, count, new_value);
0245     if (err)
0246         goto out;
0247 
0248     /*
0249      * Do not allow disabling IRQs completely - it's a too easy
0250      * way to make the system unusable accidentally :-) At least
0251      * one online CPU still has to be targeted.
0252      */
0253     if (!cpumask_intersects(new_value, cpu_online_mask)) {
0254         err = -EINVAL;
0255         goto out;
0256     }
0257 
0258     cpumask_copy(irq_default_affinity, new_value);
0259     err = count;
0260 
0261 out:
0262     free_cpumask_var(new_value);
0263     return err;
0264 }
0265 
0266 static int default_affinity_open(struct inode *inode, struct file *file)
0267 {
0268     return single_open(file, default_affinity_show, pde_data(inode));
0269 }
0270 
0271 static const struct proc_ops default_affinity_proc_ops = {
0272     .proc_open  = default_affinity_open,
0273     .proc_read  = seq_read,
0274     .proc_lseek = seq_lseek,
0275     .proc_release   = single_release,
0276     .proc_write = default_affinity_write,
0277 };
0278 
0279 static int irq_node_proc_show(struct seq_file *m, void *v)
0280 {
0281     struct irq_desc *desc = irq_to_desc((long) m->private);
0282 
0283     seq_printf(m, "%d\n", irq_desc_get_node(desc));
0284     return 0;
0285 }
0286 #endif
0287 
0288 static int irq_spurious_proc_show(struct seq_file *m, void *v)
0289 {
0290     struct irq_desc *desc = irq_to_desc((long) m->private);
0291 
0292     seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
0293            desc->irq_count, desc->irqs_unhandled,
0294            jiffies_to_msecs(desc->last_unhandled));
0295     return 0;
0296 }
0297 
0298 #define MAX_NAMELEN 128
0299 
0300 static int name_unique(unsigned int irq, struct irqaction *new_action)
0301 {
0302     struct irq_desc *desc = irq_to_desc(irq);
0303     struct irqaction *action;
0304     unsigned long flags;
0305     int ret = 1;
0306 
0307     raw_spin_lock_irqsave(&desc->lock, flags);
0308     for_each_action_of_desc(desc, action) {
0309         if ((action != new_action) && action->name &&
0310                 !strcmp(new_action->name, action->name)) {
0311             ret = 0;
0312             break;
0313         }
0314     }
0315     raw_spin_unlock_irqrestore(&desc->lock, flags);
0316     return ret;
0317 }
0318 
0319 void register_handler_proc(unsigned int irq, struct irqaction *action)
0320 {
0321     char name [MAX_NAMELEN];
0322     struct irq_desc *desc = irq_to_desc(irq);
0323 
0324     if (!desc->dir || action->dir || !action->name ||
0325                     !name_unique(irq, action))
0326         return;
0327 
0328     snprintf(name, MAX_NAMELEN, "%s", action->name);
0329 
0330     /* create /proc/irq/1234/handler/ */
0331     action->dir = proc_mkdir(name, desc->dir);
0332 }
0333 
0334 #undef MAX_NAMELEN
0335 
0336 #define MAX_NAMELEN 10
0337 
0338 void register_irq_proc(unsigned int irq, struct irq_desc *desc)
0339 {
0340     static DEFINE_MUTEX(register_lock);
0341     void __maybe_unused *irqp = (void *)(unsigned long) irq;
0342     char name [MAX_NAMELEN];
0343 
0344     if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
0345         return;
0346 
0347     /*
0348      * irq directories are registered only when a handler is
0349      * added, not when the descriptor is created, so multiple
0350      * tasks might try to register at the same time.
0351      */
0352     mutex_lock(&register_lock);
0353 
0354     if (desc->dir)
0355         goto out_unlock;
0356 
0357     sprintf(name, "%d", irq);
0358 
0359     /* create /proc/irq/1234 */
0360     desc->dir = proc_mkdir(name, root_irq_dir);
0361     if (!desc->dir)
0362         goto out_unlock;
0363 
0364 #ifdef CONFIG_SMP
0365     /* create /proc/irq/<irq>/smp_affinity */
0366     proc_create_data("smp_affinity", 0644, desc->dir,
0367              &irq_affinity_proc_ops, irqp);
0368 
0369     /* create /proc/irq/<irq>/affinity_hint */
0370     proc_create_single_data("affinity_hint", 0444, desc->dir,
0371             irq_affinity_hint_proc_show, irqp);
0372 
0373     /* create /proc/irq/<irq>/smp_affinity_list */
0374     proc_create_data("smp_affinity_list", 0644, desc->dir,
0375              &irq_affinity_list_proc_ops, irqp);
0376 
0377     proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show,
0378             irqp);
0379 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
0380     proc_create_single_data("effective_affinity", 0444, desc->dir,
0381             irq_effective_aff_proc_show, irqp);
0382     proc_create_single_data("effective_affinity_list", 0444, desc->dir,
0383             irq_effective_aff_list_proc_show, irqp);
0384 # endif
0385 #endif
0386     proc_create_single_data("spurious", 0444, desc->dir,
0387             irq_spurious_proc_show, (void *)(long)irq);
0388 
0389 out_unlock:
0390     mutex_unlock(&register_lock);
0391 }
0392 
0393 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
0394 {
0395     char name [MAX_NAMELEN];
0396 
0397     if (!root_irq_dir || !desc->dir)
0398         return;
0399 #ifdef CONFIG_SMP
0400     remove_proc_entry("smp_affinity", desc->dir);
0401     remove_proc_entry("affinity_hint", desc->dir);
0402     remove_proc_entry("smp_affinity_list", desc->dir);
0403     remove_proc_entry("node", desc->dir);
0404 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
0405     remove_proc_entry("effective_affinity", desc->dir);
0406     remove_proc_entry("effective_affinity_list", desc->dir);
0407 # endif
0408 #endif
0409     remove_proc_entry("spurious", desc->dir);
0410 
0411     sprintf(name, "%u", irq);
0412     remove_proc_entry(name, root_irq_dir);
0413 }
0414 
0415 #undef MAX_NAMELEN
0416 
0417 void unregister_handler_proc(unsigned int irq, struct irqaction *action)
0418 {
0419     proc_remove(action->dir);
0420 }
0421 
0422 static void register_default_affinity_proc(void)
0423 {
0424 #ifdef CONFIG_SMP
0425     proc_create("irq/default_smp_affinity", 0644, NULL,
0426             &default_affinity_proc_ops);
0427 #endif
0428 }
0429 
0430 void init_irq_proc(void)
0431 {
0432     unsigned int irq;
0433     struct irq_desc *desc;
0434 
0435     /* create /proc/irq */
0436     root_irq_dir = proc_mkdir("irq", NULL);
0437     if (!root_irq_dir)
0438         return;
0439 
0440     register_default_affinity_proc();
0441 
0442     /*
0443      * Create entries for all existing IRQs.
0444      */
0445     for_each_irq_desc(irq, desc)
0446         register_irq_proc(irq, desc);
0447 }
0448 
0449 #ifdef CONFIG_GENERIC_IRQ_SHOW
0450 
0451 int __weak arch_show_interrupts(struct seq_file *p, int prec)
0452 {
0453     return 0;
0454 }
0455 
0456 #ifndef ACTUAL_NR_IRQS
0457 # define ACTUAL_NR_IRQS nr_irqs
0458 #endif
0459 
0460 int show_interrupts(struct seq_file *p, void *v)
0461 {
0462     static int prec;
0463 
0464     unsigned long flags, any_count = 0;
0465     int i = *(loff_t *) v, j;
0466     struct irqaction *action;
0467     struct irq_desc *desc;
0468 
0469     if (i > ACTUAL_NR_IRQS)
0470         return 0;
0471 
0472     if (i == ACTUAL_NR_IRQS)
0473         return arch_show_interrupts(p, prec);
0474 
0475     /* print header and calculate the width of the first column */
0476     if (i == 0) {
0477         for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
0478             j *= 10;
0479 
0480         seq_printf(p, "%*s", prec + 8, "");
0481         for_each_online_cpu(j)
0482             seq_printf(p, "CPU%-8d", j);
0483         seq_putc(p, '\n');
0484     }
0485 
0486     rcu_read_lock();
0487     desc = irq_to_desc(i);
0488     if (!desc || irq_settings_is_hidden(desc))
0489         goto outsparse;
0490 
0491     if (desc->kstat_irqs) {
0492         for_each_online_cpu(j)
0493             any_count |= data_race(*per_cpu_ptr(desc->kstat_irqs, j));
0494     }
0495 
0496     if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
0497         goto outsparse;
0498 
0499     seq_printf(p, "%*d: ", prec, i);
0500     for_each_online_cpu(j)
0501         seq_printf(p, "%10u ", desc->kstat_irqs ?
0502                     *per_cpu_ptr(desc->kstat_irqs, j) : 0);
0503 
0504     raw_spin_lock_irqsave(&desc->lock, flags);
0505     if (desc->irq_data.chip) {
0506         if (desc->irq_data.chip->irq_print_chip)
0507             desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
0508         else if (desc->irq_data.chip->name)
0509             seq_printf(p, " %8s", desc->irq_data.chip->name);
0510         else
0511             seq_printf(p, " %8s", "-");
0512     } else {
0513         seq_printf(p, " %8s", "None");
0514     }
0515     if (desc->irq_data.domain)
0516         seq_printf(p, " %*lu", prec, desc->irq_data.hwirq);
0517     else
0518         seq_printf(p, " %*s", prec, "");
0519 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
0520     seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
0521 #endif
0522     if (desc->name)
0523         seq_printf(p, "-%-8s", desc->name);
0524 
0525     action = desc->action;
0526     if (action) {
0527         seq_printf(p, "  %s", action->name);
0528         while ((action = action->next) != NULL)
0529             seq_printf(p, ", %s", action->name);
0530     }
0531 
0532     seq_putc(p, '\n');
0533     raw_spin_unlock_irqrestore(&desc->lock, flags);
0534 outsparse:
0535     rcu_read_unlock();
0536     return 0;
0537 }
0538 #endif