Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
0004  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
0005  *
0006  * This file contains the interrupt descriptor management code. Detailed
0007  * information is available in Documentation/core-api/genericirq.rst
0008  *
0009  */
0010 #include <linux/irq.h>
0011 #include <linux/slab.h>
0012 #include <linux/export.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/kernel_stat.h>
0015 #include <linux/radix-tree.h>
0016 #include <linux/bitmap.h>
0017 #include <linux/irqdomain.h>
0018 #include <linux/sysfs.h>
0019 
0020 #include "internals.h"
0021 
0022 /*
0023  * lockdep: we want to handle all irq_desc locks as a single lock-class:
0024  */
0025 static struct lock_class_key irq_desc_lock_class;
0026 
0027 #if defined(CONFIG_SMP)
0028 static int __init irq_affinity_setup(char *str)
0029 {
0030     alloc_bootmem_cpumask_var(&irq_default_affinity);
0031     cpulist_parse(str, irq_default_affinity);
0032     /*
0033      * Set at least the boot cpu. We don't want to end up with
0034      * bugreports caused by random commandline masks
0035      */
0036     cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
0037     return 1;
0038 }
0039 __setup("irqaffinity=", irq_affinity_setup);
0040 
0041 static void __init init_irq_default_affinity(void)
0042 {
0043     if (!cpumask_available(irq_default_affinity))
0044         zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
0045     if (cpumask_empty(irq_default_affinity))
0046         cpumask_setall(irq_default_affinity);
0047 }
0048 #else
0049 static void __init init_irq_default_affinity(void)
0050 {
0051 }
0052 #endif
0053 
0054 #ifdef CONFIG_SMP
0055 static int alloc_masks(struct irq_desc *desc, int node)
0056 {
0057     if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
0058                      GFP_KERNEL, node))
0059         return -ENOMEM;
0060 
0061 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
0062     if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity,
0063                      GFP_KERNEL, node)) {
0064         free_cpumask_var(desc->irq_common_data.affinity);
0065         return -ENOMEM;
0066     }
0067 #endif
0068 
0069 #ifdef CONFIG_GENERIC_PENDING_IRQ
0070     if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) {
0071 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
0072         free_cpumask_var(desc->irq_common_data.effective_affinity);
0073 #endif
0074         free_cpumask_var(desc->irq_common_data.affinity);
0075         return -ENOMEM;
0076     }
0077 #endif
0078     return 0;
0079 }
0080 
0081 static void desc_smp_init(struct irq_desc *desc, int node,
0082               const struct cpumask *affinity)
0083 {
0084     if (!affinity)
0085         affinity = irq_default_affinity;
0086     cpumask_copy(desc->irq_common_data.affinity, affinity);
0087 
0088 #ifdef CONFIG_GENERIC_PENDING_IRQ
0089     cpumask_clear(desc->pending_mask);
0090 #endif
0091 #ifdef CONFIG_NUMA
0092     desc->irq_common_data.node = node;
0093 #endif
0094 }
0095 
0096 #else
0097 static inline int
0098 alloc_masks(struct irq_desc *desc, int node) { return 0; }
0099 static inline void
0100 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
0101 #endif
0102 
0103 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
0104                   const struct cpumask *affinity, struct module *owner)
0105 {
0106     int cpu;
0107 
0108     desc->irq_common_data.handler_data = NULL;
0109     desc->irq_common_data.msi_desc = NULL;
0110 
0111     desc->irq_data.common = &desc->irq_common_data;
0112     desc->irq_data.irq = irq;
0113     desc->irq_data.chip = &no_irq_chip;
0114     desc->irq_data.chip_data = NULL;
0115     irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
0116     irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
0117     irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
0118     desc->handle_irq = handle_bad_irq;
0119     desc->depth = 1;
0120     desc->irq_count = 0;
0121     desc->irqs_unhandled = 0;
0122     desc->tot_count = 0;
0123     desc->name = NULL;
0124     desc->owner = owner;
0125     for_each_possible_cpu(cpu)
0126         *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
0127     desc_smp_init(desc, node, affinity);
0128 }
0129 
0130 int nr_irqs = NR_IRQS;
0131 EXPORT_SYMBOL_GPL(nr_irqs);
0132 
0133 static DEFINE_MUTEX(sparse_irq_lock);
0134 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
0135 
0136 #ifdef CONFIG_SPARSE_IRQ
0137 
0138 static void irq_kobj_release(struct kobject *kobj);
0139 
0140 #ifdef CONFIG_SYSFS
0141 static struct kobject *irq_kobj_base;
0142 
0143 #define IRQ_ATTR_RO(_name) \
0144 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
0145 
0146 static ssize_t per_cpu_count_show(struct kobject *kobj,
0147                   struct kobj_attribute *attr, char *buf)
0148 {
0149     struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
0150     ssize_t ret = 0;
0151     char *p = "";
0152     int cpu;
0153 
0154     for_each_possible_cpu(cpu) {
0155         unsigned int c = irq_desc_kstat_cpu(desc, cpu);
0156 
0157         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
0158         p = ",";
0159     }
0160 
0161     ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
0162     return ret;
0163 }
0164 IRQ_ATTR_RO(per_cpu_count);
0165 
0166 static ssize_t chip_name_show(struct kobject *kobj,
0167                   struct kobj_attribute *attr, char *buf)
0168 {
0169     struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
0170     ssize_t ret = 0;
0171 
0172     raw_spin_lock_irq(&desc->lock);
0173     if (desc->irq_data.chip && desc->irq_data.chip->name) {
0174         ret = scnprintf(buf, PAGE_SIZE, "%s\n",
0175                 desc->irq_data.chip->name);
0176     }
0177     raw_spin_unlock_irq(&desc->lock);
0178 
0179     return ret;
0180 }
0181 IRQ_ATTR_RO(chip_name);
0182 
0183 static ssize_t hwirq_show(struct kobject *kobj,
0184               struct kobj_attribute *attr, char *buf)
0185 {
0186     struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
0187     ssize_t ret = 0;
0188 
0189     raw_spin_lock_irq(&desc->lock);
0190     if (desc->irq_data.domain)
0191         ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq);
0192     raw_spin_unlock_irq(&desc->lock);
0193 
0194     return ret;
0195 }
0196 IRQ_ATTR_RO(hwirq);
0197 
0198 static ssize_t type_show(struct kobject *kobj,
0199              struct kobj_attribute *attr, char *buf)
0200 {
0201     struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
0202     ssize_t ret = 0;
0203 
0204     raw_spin_lock_irq(&desc->lock);
0205     ret = sprintf(buf, "%s\n",
0206               irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
0207     raw_spin_unlock_irq(&desc->lock);
0208 
0209     return ret;
0210 
0211 }
0212 IRQ_ATTR_RO(type);
0213 
0214 static ssize_t wakeup_show(struct kobject *kobj,
0215                struct kobj_attribute *attr, char *buf)
0216 {
0217     struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
0218     ssize_t ret = 0;
0219 
0220     raw_spin_lock_irq(&desc->lock);
0221     ret = sprintf(buf, "%s\n",
0222               irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled");
0223     raw_spin_unlock_irq(&desc->lock);
0224 
0225     return ret;
0226 
0227 }
0228 IRQ_ATTR_RO(wakeup);
0229 
0230 static ssize_t name_show(struct kobject *kobj,
0231              struct kobj_attribute *attr, char *buf)
0232 {
0233     struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
0234     ssize_t ret = 0;
0235 
0236     raw_spin_lock_irq(&desc->lock);
0237     if (desc->name)
0238         ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
0239     raw_spin_unlock_irq(&desc->lock);
0240 
0241     return ret;
0242 }
0243 IRQ_ATTR_RO(name);
0244 
0245 static ssize_t actions_show(struct kobject *kobj,
0246                 struct kobj_attribute *attr, char *buf)
0247 {
0248     struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
0249     struct irqaction *action;
0250     ssize_t ret = 0;
0251     char *p = "";
0252 
0253     raw_spin_lock_irq(&desc->lock);
0254     for_each_action_of_desc(desc, action) {
0255         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
0256                  p, action->name);
0257         p = ",";
0258     }
0259     raw_spin_unlock_irq(&desc->lock);
0260 
0261     if (ret)
0262         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
0263 
0264     return ret;
0265 }
0266 IRQ_ATTR_RO(actions);
0267 
0268 static struct attribute *irq_attrs[] = {
0269     &per_cpu_count_attr.attr,
0270     &chip_name_attr.attr,
0271     &hwirq_attr.attr,
0272     &type_attr.attr,
0273     &wakeup_attr.attr,
0274     &name_attr.attr,
0275     &actions_attr.attr,
0276     NULL
0277 };
0278 ATTRIBUTE_GROUPS(irq);
0279 
0280 static struct kobj_type irq_kobj_type = {
0281     .release    = irq_kobj_release,
0282     .sysfs_ops  = &kobj_sysfs_ops,
0283     .default_groups = irq_groups,
0284 };
0285 
0286 static void irq_sysfs_add(int irq, struct irq_desc *desc)
0287 {
0288     if (irq_kobj_base) {
0289         /*
0290          * Continue even in case of failure as this is nothing
0291          * crucial.
0292          */
0293         if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
0294             pr_warn("Failed to add kobject for irq %d\n", irq);
0295     }
0296 }
0297 
0298 static void irq_sysfs_del(struct irq_desc *desc)
0299 {
0300     /*
0301      * If irq_sysfs_init() has not yet been invoked (early boot), then
0302      * irq_kobj_base is NULL and the descriptor was never added.
0303      * kobject_del() complains about a object with no parent, so make
0304      * it conditional.
0305      */
0306     if (irq_kobj_base)
0307         kobject_del(&desc->kobj);
0308 }
0309 
0310 static int __init irq_sysfs_init(void)
0311 {
0312     struct irq_desc *desc;
0313     int irq;
0314 
0315     /* Prevent concurrent irq alloc/free */
0316     irq_lock_sparse();
0317 
0318     irq_kobj_base = kobject_create_and_add("irq", kernel_kobj);
0319     if (!irq_kobj_base) {
0320         irq_unlock_sparse();
0321         return -ENOMEM;
0322     }
0323 
0324     /* Add the already allocated interrupts */
0325     for_each_irq_desc(irq, desc)
0326         irq_sysfs_add(irq, desc);
0327     irq_unlock_sparse();
0328 
0329     return 0;
0330 }
0331 postcore_initcall(irq_sysfs_init);
0332 
0333 #else /* !CONFIG_SYSFS */
0334 
0335 static struct kobj_type irq_kobj_type = {
0336     .release    = irq_kobj_release,
0337 };
0338 
0339 static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
0340 static void irq_sysfs_del(struct irq_desc *desc) {}
0341 
0342 #endif /* CONFIG_SYSFS */
0343 
0344 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
0345 
0346 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
0347 {
0348     radix_tree_insert(&irq_desc_tree, irq, desc);
0349 }
0350 
0351 struct irq_desc *irq_to_desc(unsigned int irq)
0352 {
0353     return radix_tree_lookup(&irq_desc_tree, irq);
0354 }
0355 #ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE
0356 EXPORT_SYMBOL_GPL(irq_to_desc);
0357 #endif
0358 
0359 static void delete_irq_desc(unsigned int irq)
0360 {
0361     radix_tree_delete(&irq_desc_tree, irq);
0362 }
0363 
0364 #ifdef CONFIG_SMP
0365 static void free_masks(struct irq_desc *desc)
0366 {
0367 #ifdef CONFIG_GENERIC_PENDING_IRQ
0368     free_cpumask_var(desc->pending_mask);
0369 #endif
0370     free_cpumask_var(desc->irq_common_data.affinity);
0371 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
0372     free_cpumask_var(desc->irq_common_data.effective_affinity);
0373 #endif
0374 }
0375 #else
0376 static inline void free_masks(struct irq_desc *desc) { }
0377 #endif
0378 
0379 void irq_lock_sparse(void)
0380 {
0381     mutex_lock(&sparse_irq_lock);
0382 }
0383 
0384 void irq_unlock_sparse(void)
0385 {
0386     mutex_unlock(&sparse_irq_lock);
0387 }
0388 
0389 static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
0390                    const struct cpumask *affinity,
0391                    struct module *owner)
0392 {
0393     struct irq_desc *desc;
0394 
0395     desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
0396     if (!desc)
0397         return NULL;
0398     /* allocate based on nr_cpu_ids */
0399     desc->kstat_irqs = alloc_percpu(unsigned int);
0400     if (!desc->kstat_irqs)
0401         goto err_desc;
0402 
0403     if (alloc_masks(desc, node))
0404         goto err_kstat;
0405 
0406     raw_spin_lock_init(&desc->lock);
0407     lockdep_set_class(&desc->lock, &irq_desc_lock_class);
0408     mutex_init(&desc->request_mutex);
0409     init_rcu_head(&desc->rcu);
0410     init_waitqueue_head(&desc->wait_for_threads);
0411 
0412     desc_set_defaults(irq, desc, node, affinity, owner);
0413     irqd_set(&desc->irq_data, flags);
0414     kobject_init(&desc->kobj, &irq_kobj_type);
0415 
0416     return desc;
0417 
0418 err_kstat:
0419     free_percpu(desc->kstat_irqs);
0420 err_desc:
0421     kfree(desc);
0422     return NULL;
0423 }
0424 
0425 static void irq_kobj_release(struct kobject *kobj)
0426 {
0427     struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
0428 
0429     free_masks(desc);
0430     free_percpu(desc->kstat_irqs);
0431     kfree(desc);
0432 }
0433 
0434 static void delayed_free_desc(struct rcu_head *rhp)
0435 {
0436     struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
0437 
0438     kobject_put(&desc->kobj);
0439 }
0440 
0441 static void free_desc(unsigned int irq)
0442 {
0443     struct irq_desc *desc = irq_to_desc(irq);
0444 
0445     irq_remove_debugfs_entry(desc);
0446     unregister_irq_proc(irq, desc);
0447 
0448     /*
0449      * sparse_irq_lock protects also show_interrupts() and
0450      * kstat_irq_usr(). Once we deleted the descriptor from the
0451      * sparse tree we can free it. Access in proc will fail to
0452      * lookup the descriptor.
0453      *
0454      * The sysfs entry must be serialized against a concurrent
0455      * irq_sysfs_init() as well.
0456      */
0457     irq_sysfs_del(desc);
0458     delete_irq_desc(irq);
0459 
0460     /*
0461      * We free the descriptor, masks and stat fields via RCU. That
0462      * allows demultiplex interrupts to do rcu based management of
0463      * the child interrupts.
0464      * This also allows us to use rcu in kstat_irqs_usr().
0465      */
0466     call_rcu(&desc->rcu, delayed_free_desc);
0467 }
0468 
0469 static int alloc_descs(unsigned int start, unsigned int cnt, int node,
0470                const struct irq_affinity_desc *affinity,
0471                struct module *owner)
0472 {
0473     struct irq_desc *desc;
0474     int i;
0475 
0476     /* Validate affinity mask(s) */
0477     if (affinity) {
0478         for (i = 0; i < cnt; i++) {
0479             if (cpumask_empty(&affinity[i].mask))
0480                 return -EINVAL;
0481         }
0482     }
0483 
0484     for (i = 0; i < cnt; i++) {
0485         const struct cpumask *mask = NULL;
0486         unsigned int flags = 0;
0487 
0488         if (affinity) {
0489             if (affinity->is_managed) {
0490                 flags = IRQD_AFFINITY_MANAGED |
0491                     IRQD_MANAGED_SHUTDOWN;
0492             }
0493             mask = &affinity->mask;
0494             node = cpu_to_node(cpumask_first(mask));
0495             affinity++;
0496         }
0497 
0498         desc = alloc_desc(start + i, node, flags, mask, owner);
0499         if (!desc)
0500             goto err;
0501         irq_insert_desc(start + i, desc);
0502         irq_sysfs_add(start + i, desc);
0503         irq_add_debugfs_entry(start + i, desc);
0504     }
0505     bitmap_set(allocated_irqs, start, cnt);
0506     return start;
0507 
0508 err:
0509     for (i--; i >= 0; i--)
0510         free_desc(start + i);
0511     return -ENOMEM;
0512 }
0513 
0514 static int irq_expand_nr_irqs(unsigned int nr)
0515 {
0516     if (nr > IRQ_BITMAP_BITS)
0517         return -ENOMEM;
0518     nr_irqs = nr;
0519     return 0;
0520 }
0521 
0522 int __init early_irq_init(void)
0523 {
0524     int i, initcnt, node = first_online_node;
0525     struct irq_desc *desc;
0526 
0527     init_irq_default_affinity();
0528 
0529     /* Let arch update nr_irqs and return the nr of preallocated irqs */
0530     initcnt = arch_probe_nr_irqs();
0531     printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n",
0532            NR_IRQS, nr_irqs, initcnt);
0533 
0534     if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
0535         nr_irqs = IRQ_BITMAP_BITS;
0536 
0537     if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
0538         initcnt = IRQ_BITMAP_BITS;
0539 
0540     if (initcnt > nr_irqs)
0541         nr_irqs = initcnt;
0542 
0543     for (i = 0; i < initcnt; i++) {
0544         desc = alloc_desc(i, node, 0, NULL, NULL);
0545         set_bit(i, allocated_irqs);
0546         irq_insert_desc(i, desc);
0547     }
0548     return arch_early_irq_init();
0549 }
0550 
0551 #else /* !CONFIG_SPARSE_IRQ */
0552 
0553 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
0554     [0 ... NR_IRQS-1] = {
0555         .handle_irq = handle_bad_irq,
0556         .depth      = 1,
0557         .lock       = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
0558     }
0559 };
0560 
0561 int __init early_irq_init(void)
0562 {
0563     int count, i, node = first_online_node;
0564     struct irq_desc *desc;
0565 
0566     init_irq_default_affinity();
0567 
0568     printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
0569 
0570     desc = irq_desc;
0571     count = ARRAY_SIZE(irq_desc);
0572 
0573     for (i = 0; i < count; i++) {
0574         desc[i].kstat_irqs = alloc_percpu(unsigned int);
0575         alloc_masks(&desc[i], node);
0576         raw_spin_lock_init(&desc[i].lock);
0577         lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
0578         mutex_init(&desc[i].request_mutex);
0579         init_waitqueue_head(&desc[i].wait_for_threads);
0580         desc_set_defaults(i, &desc[i], node, NULL, NULL);
0581     }
0582     return arch_early_irq_init();
0583 }
0584 
0585 struct irq_desc *irq_to_desc(unsigned int irq)
0586 {
0587     return (irq < NR_IRQS) ? irq_desc + irq : NULL;
0588 }
0589 EXPORT_SYMBOL(irq_to_desc);
0590 
0591 static void free_desc(unsigned int irq)
0592 {
0593     struct irq_desc *desc = irq_to_desc(irq);
0594     unsigned long flags;
0595 
0596     raw_spin_lock_irqsave(&desc->lock, flags);
0597     desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
0598     raw_spin_unlock_irqrestore(&desc->lock, flags);
0599 }
0600 
0601 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
0602                   const struct irq_affinity_desc *affinity,
0603                   struct module *owner)
0604 {
0605     u32 i;
0606 
0607     for (i = 0; i < cnt; i++) {
0608         struct irq_desc *desc = irq_to_desc(start + i);
0609 
0610         desc->owner = owner;
0611     }
0612     bitmap_set(allocated_irqs, start, cnt);
0613     return start;
0614 }
0615 
0616 static int irq_expand_nr_irqs(unsigned int nr)
0617 {
0618     return -ENOMEM;
0619 }
0620 
0621 void irq_mark_irq(unsigned int irq)
0622 {
0623     mutex_lock(&sparse_irq_lock);
0624     bitmap_set(allocated_irqs, irq, 1);
0625     mutex_unlock(&sparse_irq_lock);
0626 }
0627 
0628 #ifdef CONFIG_GENERIC_IRQ_LEGACY
0629 void irq_init_desc(unsigned int irq)
0630 {
0631     free_desc(irq);
0632 }
0633 #endif
0634 
0635 #endif /* !CONFIG_SPARSE_IRQ */
0636 
0637 int handle_irq_desc(struct irq_desc *desc)
0638 {
0639     struct irq_data *data;
0640 
0641     if (!desc)
0642         return -EINVAL;
0643 
0644     data = irq_desc_get_irq_data(desc);
0645     if (WARN_ON_ONCE(!in_hardirq() && handle_enforce_irqctx(data)))
0646         return -EPERM;
0647 
0648     generic_handle_irq_desc(desc);
0649     return 0;
0650 }
0651 
0652 /**
0653  * generic_handle_irq - Invoke the handler for a particular irq
0654  * @irq:    The irq number to handle
0655  *
0656  * Returns: 0 on success, or -EINVAL if conversion has failed
0657  *
0658  *      This function must be called from an IRQ context with irq regs
0659  *      initialized.
0660   */
0661 int generic_handle_irq(unsigned int irq)
0662 {
0663     return handle_irq_desc(irq_to_desc(irq));
0664 }
0665 EXPORT_SYMBOL_GPL(generic_handle_irq);
0666 
0667 /**
0668  * generic_handle_irq_safe - Invoke the handler for a particular irq from any
0669  *               context.
0670  * @irq:    The irq number to handle
0671  *
0672  * Returns: 0 on success, a negative value on error.
0673  *
0674  * This function can be called from any context (IRQ or process context). It
0675  * will report an error if not invoked from IRQ context and the irq has been
0676  * marked to enforce IRQ-context only.
0677  */
0678 int generic_handle_irq_safe(unsigned int irq)
0679 {
0680     unsigned long flags;
0681     int ret;
0682 
0683     local_irq_save(flags);
0684     ret = handle_irq_desc(irq_to_desc(irq));
0685     local_irq_restore(flags);
0686     return ret;
0687 }
0688 EXPORT_SYMBOL_GPL(generic_handle_irq_safe);
0689 
0690 #ifdef CONFIG_IRQ_DOMAIN
0691 /**
0692  * generic_handle_domain_irq - Invoke the handler for a HW irq belonging
0693  *                             to a domain.
0694  * @domain: The domain where to perform the lookup
0695  * @hwirq:  The HW irq number to convert to a logical one
0696  *
0697  * Returns: 0 on success, or -EINVAL if conversion has failed
0698  *
0699  *      This function must be called from an IRQ context with irq regs
0700  *      initialized.
0701  */
0702 int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq)
0703 {
0704     return handle_irq_desc(irq_resolve_mapping(domain, hwirq));
0705 }
0706 EXPORT_SYMBOL_GPL(generic_handle_domain_irq);
0707 
0708 /**
0709  * generic_handle_domain_nmi - Invoke the handler for a HW nmi belonging
0710  *                             to a domain.
0711  * @domain: The domain where to perform the lookup
0712  * @hwirq:  The HW irq number to convert to a logical one
0713  *
0714  * Returns: 0 on success, or -EINVAL if conversion has failed
0715  *
0716  *      This function must be called from an NMI context with irq regs
0717  *      initialized.
0718  **/
0719 int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq)
0720 {
0721     WARN_ON_ONCE(!in_nmi());
0722     return handle_irq_desc(irq_resolve_mapping(domain, hwirq));
0723 }
0724 #endif
0725 
0726 /* Dynamic interrupt handling */
0727 
0728 /**
0729  * irq_free_descs - free irq descriptors
0730  * @from:   Start of descriptor range
0731  * @cnt:    Number of consecutive irqs to free
0732  */
0733 void irq_free_descs(unsigned int from, unsigned int cnt)
0734 {
0735     int i;
0736 
0737     if (from >= nr_irqs || (from + cnt) > nr_irqs)
0738         return;
0739 
0740     mutex_lock(&sparse_irq_lock);
0741     for (i = 0; i < cnt; i++)
0742         free_desc(from + i);
0743 
0744     bitmap_clear(allocated_irqs, from, cnt);
0745     mutex_unlock(&sparse_irq_lock);
0746 }
0747 EXPORT_SYMBOL_GPL(irq_free_descs);
0748 
0749 /**
0750  * __irq_alloc_descs - allocate and initialize a range of irq descriptors
0751  * @irq:    Allocate for specific irq number if irq >= 0
0752  * @from:   Start the search from this irq number
0753  * @cnt:    Number of consecutive irqs to allocate.
0754  * @node:   Preferred node on which the irq descriptor should be allocated
0755  * @owner:  Owning module (can be NULL)
0756  * @affinity:   Optional pointer to an affinity mask array of size @cnt which
0757  *      hints where the irq descriptors should be allocated and which
0758  *      default affinities to use
0759  *
0760  * Returns the first irq number or error code
0761  */
0762 int __ref
0763 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
0764           struct module *owner, const struct irq_affinity_desc *affinity)
0765 {
0766     int start, ret;
0767 
0768     if (!cnt)
0769         return -EINVAL;
0770 
0771     if (irq >= 0) {
0772         if (from > irq)
0773             return -EINVAL;
0774         from = irq;
0775     } else {
0776         /*
0777          * For interrupts which are freely allocated the
0778          * architecture can force a lower bound to the @from
0779          * argument. x86 uses this to exclude the GSI space.
0780          */
0781         from = arch_dynirq_lower_bound(from);
0782     }
0783 
0784     mutex_lock(&sparse_irq_lock);
0785 
0786     start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
0787                        from, cnt, 0);
0788     ret = -EEXIST;
0789     if (irq >=0 && start != irq)
0790         goto unlock;
0791 
0792     if (start + cnt > nr_irqs) {
0793         ret = irq_expand_nr_irqs(start + cnt);
0794         if (ret)
0795             goto unlock;
0796     }
0797     ret = alloc_descs(start, cnt, node, affinity, owner);
0798 unlock:
0799     mutex_unlock(&sparse_irq_lock);
0800     return ret;
0801 }
0802 EXPORT_SYMBOL_GPL(__irq_alloc_descs);
0803 
0804 /**
0805  * irq_get_next_irq - get next allocated irq number
0806  * @offset: where to start the search
0807  *
0808  * Returns next irq number after offset or nr_irqs if none is found.
0809  */
0810 unsigned int irq_get_next_irq(unsigned int offset)
0811 {
0812     return find_next_bit(allocated_irqs, nr_irqs, offset);
0813 }
0814 
0815 struct irq_desc *
0816 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
0817             unsigned int check)
0818 {
0819     struct irq_desc *desc = irq_to_desc(irq);
0820 
0821     if (desc) {
0822         if (check & _IRQ_DESC_CHECK) {
0823             if ((check & _IRQ_DESC_PERCPU) &&
0824                 !irq_settings_is_per_cpu_devid(desc))
0825                 return NULL;
0826 
0827             if (!(check & _IRQ_DESC_PERCPU) &&
0828                 irq_settings_is_per_cpu_devid(desc))
0829                 return NULL;
0830         }
0831 
0832         if (bus)
0833             chip_bus_lock(desc);
0834         raw_spin_lock_irqsave(&desc->lock, *flags);
0835     }
0836     return desc;
0837 }
0838 
0839 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
0840     __releases(&desc->lock)
0841 {
0842     raw_spin_unlock_irqrestore(&desc->lock, flags);
0843     if (bus)
0844         chip_bus_sync_unlock(desc);
0845 }
0846 
0847 int irq_set_percpu_devid_partition(unsigned int irq,
0848                    const struct cpumask *affinity)
0849 {
0850     struct irq_desc *desc = irq_to_desc(irq);
0851 
0852     if (!desc)
0853         return -EINVAL;
0854 
0855     if (desc->percpu_enabled)
0856         return -EINVAL;
0857 
0858     desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
0859 
0860     if (!desc->percpu_enabled)
0861         return -ENOMEM;
0862 
0863     if (affinity)
0864         desc->percpu_affinity = affinity;
0865     else
0866         desc->percpu_affinity = cpu_possible_mask;
0867 
0868     irq_set_percpu_devid_flags(irq);
0869     return 0;
0870 }
0871 
0872 int irq_set_percpu_devid(unsigned int irq)
0873 {
0874     return irq_set_percpu_devid_partition(irq, NULL);
0875 }
0876 
0877 int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
0878 {
0879     struct irq_desc *desc = irq_to_desc(irq);
0880 
0881     if (!desc || !desc->percpu_enabled)
0882         return -EINVAL;
0883 
0884     if (affinity)
0885         cpumask_copy(affinity, desc->percpu_affinity);
0886 
0887     return 0;
0888 }
0889 EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition);
0890 
0891 void kstat_incr_irq_this_cpu(unsigned int irq)
0892 {
0893     kstat_incr_irqs_this_cpu(irq_to_desc(irq));
0894 }
0895 
0896 /**
0897  * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
0898  * @irq:    The interrupt number
0899  * @cpu:    The cpu number
0900  *
0901  * Returns the sum of interrupt counts on @cpu since boot for
0902  * @irq. The caller must ensure that the interrupt is not removed
0903  * concurrently.
0904  */
0905 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
0906 {
0907     struct irq_desc *desc = irq_to_desc(irq);
0908 
0909     return desc && desc->kstat_irqs ?
0910             *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
0911 }
0912 
0913 static bool irq_is_nmi(struct irq_desc *desc)
0914 {
0915     return desc->istate & IRQS_NMI;
0916 }
0917 
0918 static unsigned int kstat_irqs(unsigned int irq)
0919 {
0920     struct irq_desc *desc = irq_to_desc(irq);
0921     unsigned int sum = 0;
0922     int cpu;
0923 
0924     if (!desc || !desc->kstat_irqs)
0925         return 0;
0926     if (!irq_settings_is_per_cpu_devid(desc) &&
0927         !irq_settings_is_per_cpu(desc) &&
0928         !irq_is_nmi(desc))
0929         return data_race(desc->tot_count);
0930 
0931     for_each_possible_cpu(cpu)
0932         sum += data_race(*per_cpu_ptr(desc->kstat_irqs, cpu));
0933     return sum;
0934 }
0935 
0936 /**
0937  * kstat_irqs_usr - Get the statistics for an interrupt from thread context
0938  * @irq:    The interrupt number
0939  *
0940  * Returns the sum of interrupt counts on all cpus since boot for @irq.
0941  *
0942  * It uses rcu to protect the access since a concurrent removal of an
0943  * interrupt descriptor is observing an rcu grace period before
0944  * delayed_free_desc()/irq_kobj_release().
0945  */
0946 unsigned int kstat_irqs_usr(unsigned int irq)
0947 {
0948     unsigned int sum;
0949 
0950     rcu_read_lock();
0951     sum = kstat_irqs(irq);
0952     rcu_read_unlock();
0953     return sum;
0954 }
0955 
0956 #ifdef CONFIG_LOCKDEP
0957 void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
0958                  struct lock_class_key *request_class)
0959 {
0960     struct irq_desc *desc = irq_to_desc(irq);
0961 
0962     if (desc) {
0963         lockdep_set_class(&desc->lock, lock_class);
0964         lockdep_set_class(&desc->request_mutex, request_class);
0965     }
0966 }
0967 EXPORT_SYMBOL_GPL(__irq_set_lockdep_class);
0968 #endif