0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/irq.h>
0011 #include <linux/slab.h>
0012 #include <linux/export.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/kernel_stat.h>
0015 #include <linux/radix-tree.h>
0016 #include <linux/bitmap.h>
0017 #include <linux/irqdomain.h>
0018 #include <linux/sysfs.h>
0019
0020 #include "internals.h"
0021
0022
0023
0024
0025 static struct lock_class_key irq_desc_lock_class;
0026
0027 #if defined(CONFIG_SMP)
0028 static int __init irq_affinity_setup(char *str)
0029 {
0030 alloc_bootmem_cpumask_var(&irq_default_affinity);
0031 cpulist_parse(str, irq_default_affinity);
0032
0033
0034
0035
0036 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
0037 return 1;
0038 }
0039 __setup("irqaffinity=", irq_affinity_setup);
0040
0041 static void __init init_irq_default_affinity(void)
0042 {
0043 if (!cpumask_available(irq_default_affinity))
0044 zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
0045 if (cpumask_empty(irq_default_affinity))
0046 cpumask_setall(irq_default_affinity);
0047 }
0048 #else
0049 static void __init init_irq_default_affinity(void)
0050 {
0051 }
0052 #endif
0053
0054 #ifdef CONFIG_SMP
0055 static int alloc_masks(struct irq_desc *desc, int node)
0056 {
0057 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
0058 GFP_KERNEL, node))
0059 return -ENOMEM;
0060
0061 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
0062 if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity,
0063 GFP_KERNEL, node)) {
0064 free_cpumask_var(desc->irq_common_data.affinity);
0065 return -ENOMEM;
0066 }
0067 #endif
0068
0069 #ifdef CONFIG_GENERIC_PENDING_IRQ
0070 if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) {
0071 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
0072 free_cpumask_var(desc->irq_common_data.effective_affinity);
0073 #endif
0074 free_cpumask_var(desc->irq_common_data.affinity);
0075 return -ENOMEM;
0076 }
0077 #endif
0078 return 0;
0079 }
0080
0081 static void desc_smp_init(struct irq_desc *desc, int node,
0082 const struct cpumask *affinity)
0083 {
0084 if (!affinity)
0085 affinity = irq_default_affinity;
0086 cpumask_copy(desc->irq_common_data.affinity, affinity);
0087
0088 #ifdef CONFIG_GENERIC_PENDING_IRQ
0089 cpumask_clear(desc->pending_mask);
0090 #endif
0091 #ifdef CONFIG_NUMA
0092 desc->irq_common_data.node = node;
0093 #endif
0094 }
0095
0096 #else
0097 static inline int
0098 alloc_masks(struct irq_desc *desc, int node) { return 0; }
0099 static inline void
0100 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
0101 #endif
0102
0103 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
0104 const struct cpumask *affinity, struct module *owner)
0105 {
0106 int cpu;
0107
0108 desc->irq_common_data.handler_data = NULL;
0109 desc->irq_common_data.msi_desc = NULL;
0110
0111 desc->irq_data.common = &desc->irq_common_data;
0112 desc->irq_data.irq = irq;
0113 desc->irq_data.chip = &no_irq_chip;
0114 desc->irq_data.chip_data = NULL;
0115 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
0116 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
0117 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
0118 desc->handle_irq = handle_bad_irq;
0119 desc->depth = 1;
0120 desc->irq_count = 0;
0121 desc->irqs_unhandled = 0;
0122 desc->tot_count = 0;
0123 desc->name = NULL;
0124 desc->owner = owner;
0125 for_each_possible_cpu(cpu)
0126 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
0127 desc_smp_init(desc, node, affinity);
0128 }
0129
0130 int nr_irqs = NR_IRQS;
0131 EXPORT_SYMBOL_GPL(nr_irqs);
0132
0133 static DEFINE_MUTEX(sparse_irq_lock);
0134 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
0135
0136 #ifdef CONFIG_SPARSE_IRQ
0137
0138 static void irq_kobj_release(struct kobject *kobj);
0139
0140 #ifdef CONFIG_SYSFS
0141 static struct kobject *irq_kobj_base;
0142
0143 #define IRQ_ATTR_RO(_name) \
0144 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
0145
0146 static ssize_t per_cpu_count_show(struct kobject *kobj,
0147 struct kobj_attribute *attr, char *buf)
0148 {
0149 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
0150 ssize_t ret = 0;
0151 char *p = "";
0152 int cpu;
0153
0154 for_each_possible_cpu(cpu) {
0155 unsigned int c = irq_desc_kstat_cpu(desc, cpu);
0156
0157 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
0158 p = ",";
0159 }
0160
0161 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
0162 return ret;
0163 }
0164 IRQ_ATTR_RO(per_cpu_count);
0165
0166 static ssize_t chip_name_show(struct kobject *kobj,
0167 struct kobj_attribute *attr, char *buf)
0168 {
0169 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
0170 ssize_t ret = 0;
0171
0172 raw_spin_lock_irq(&desc->lock);
0173 if (desc->irq_data.chip && desc->irq_data.chip->name) {
0174 ret = scnprintf(buf, PAGE_SIZE, "%s\n",
0175 desc->irq_data.chip->name);
0176 }
0177 raw_spin_unlock_irq(&desc->lock);
0178
0179 return ret;
0180 }
0181 IRQ_ATTR_RO(chip_name);
0182
0183 static ssize_t hwirq_show(struct kobject *kobj,
0184 struct kobj_attribute *attr, char *buf)
0185 {
0186 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
0187 ssize_t ret = 0;
0188
0189 raw_spin_lock_irq(&desc->lock);
0190 if (desc->irq_data.domain)
0191 ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq);
0192 raw_spin_unlock_irq(&desc->lock);
0193
0194 return ret;
0195 }
0196 IRQ_ATTR_RO(hwirq);
0197
0198 static ssize_t type_show(struct kobject *kobj,
0199 struct kobj_attribute *attr, char *buf)
0200 {
0201 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
0202 ssize_t ret = 0;
0203
0204 raw_spin_lock_irq(&desc->lock);
0205 ret = sprintf(buf, "%s\n",
0206 irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
0207 raw_spin_unlock_irq(&desc->lock);
0208
0209 return ret;
0210
0211 }
0212 IRQ_ATTR_RO(type);
0213
0214 static ssize_t wakeup_show(struct kobject *kobj,
0215 struct kobj_attribute *attr, char *buf)
0216 {
0217 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
0218 ssize_t ret = 0;
0219
0220 raw_spin_lock_irq(&desc->lock);
0221 ret = sprintf(buf, "%s\n",
0222 irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled");
0223 raw_spin_unlock_irq(&desc->lock);
0224
0225 return ret;
0226
0227 }
0228 IRQ_ATTR_RO(wakeup);
0229
0230 static ssize_t name_show(struct kobject *kobj,
0231 struct kobj_attribute *attr, char *buf)
0232 {
0233 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
0234 ssize_t ret = 0;
0235
0236 raw_spin_lock_irq(&desc->lock);
0237 if (desc->name)
0238 ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
0239 raw_spin_unlock_irq(&desc->lock);
0240
0241 return ret;
0242 }
0243 IRQ_ATTR_RO(name);
0244
0245 static ssize_t actions_show(struct kobject *kobj,
0246 struct kobj_attribute *attr, char *buf)
0247 {
0248 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
0249 struct irqaction *action;
0250 ssize_t ret = 0;
0251 char *p = "";
0252
0253 raw_spin_lock_irq(&desc->lock);
0254 for_each_action_of_desc(desc, action) {
0255 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
0256 p, action->name);
0257 p = ",";
0258 }
0259 raw_spin_unlock_irq(&desc->lock);
0260
0261 if (ret)
0262 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
0263
0264 return ret;
0265 }
0266 IRQ_ATTR_RO(actions);
0267
0268 static struct attribute *irq_attrs[] = {
0269 &per_cpu_count_attr.attr,
0270 &chip_name_attr.attr,
0271 &hwirq_attr.attr,
0272 &type_attr.attr,
0273 &wakeup_attr.attr,
0274 &name_attr.attr,
0275 &actions_attr.attr,
0276 NULL
0277 };
0278 ATTRIBUTE_GROUPS(irq);
0279
0280 static struct kobj_type irq_kobj_type = {
0281 .release = irq_kobj_release,
0282 .sysfs_ops = &kobj_sysfs_ops,
0283 .default_groups = irq_groups,
0284 };
0285
0286 static void irq_sysfs_add(int irq, struct irq_desc *desc)
0287 {
0288 if (irq_kobj_base) {
0289
0290
0291
0292
0293 if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
0294 pr_warn("Failed to add kobject for irq %d\n", irq);
0295 }
0296 }
0297
0298 static void irq_sysfs_del(struct irq_desc *desc)
0299 {
0300
0301
0302
0303
0304
0305
0306 if (irq_kobj_base)
0307 kobject_del(&desc->kobj);
0308 }
0309
0310 static int __init irq_sysfs_init(void)
0311 {
0312 struct irq_desc *desc;
0313 int irq;
0314
0315
0316 irq_lock_sparse();
0317
0318 irq_kobj_base = kobject_create_and_add("irq", kernel_kobj);
0319 if (!irq_kobj_base) {
0320 irq_unlock_sparse();
0321 return -ENOMEM;
0322 }
0323
0324
0325 for_each_irq_desc(irq, desc)
0326 irq_sysfs_add(irq, desc);
0327 irq_unlock_sparse();
0328
0329 return 0;
0330 }
0331 postcore_initcall(irq_sysfs_init);
0332
0333 #else
0334
0335 static struct kobj_type irq_kobj_type = {
0336 .release = irq_kobj_release,
0337 };
0338
0339 static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
0340 static void irq_sysfs_del(struct irq_desc *desc) {}
0341
0342 #endif
0343
0344 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
0345
0346 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
0347 {
0348 radix_tree_insert(&irq_desc_tree, irq, desc);
0349 }
0350
0351 struct irq_desc *irq_to_desc(unsigned int irq)
0352 {
0353 return radix_tree_lookup(&irq_desc_tree, irq);
0354 }
0355 #ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE
0356 EXPORT_SYMBOL_GPL(irq_to_desc);
0357 #endif
0358
0359 static void delete_irq_desc(unsigned int irq)
0360 {
0361 radix_tree_delete(&irq_desc_tree, irq);
0362 }
0363
0364 #ifdef CONFIG_SMP
0365 static void free_masks(struct irq_desc *desc)
0366 {
0367 #ifdef CONFIG_GENERIC_PENDING_IRQ
0368 free_cpumask_var(desc->pending_mask);
0369 #endif
0370 free_cpumask_var(desc->irq_common_data.affinity);
0371 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
0372 free_cpumask_var(desc->irq_common_data.effective_affinity);
0373 #endif
0374 }
0375 #else
0376 static inline void free_masks(struct irq_desc *desc) { }
0377 #endif
0378
0379 void irq_lock_sparse(void)
0380 {
0381 mutex_lock(&sparse_irq_lock);
0382 }
0383
0384 void irq_unlock_sparse(void)
0385 {
0386 mutex_unlock(&sparse_irq_lock);
0387 }
0388
0389 static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
0390 const struct cpumask *affinity,
0391 struct module *owner)
0392 {
0393 struct irq_desc *desc;
0394
0395 desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
0396 if (!desc)
0397 return NULL;
0398
0399 desc->kstat_irqs = alloc_percpu(unsigned int);
0400 if (!desc->kstat_irqs)
0401 goto err_desc;
0402
0403 if (alloc_masks(desc, node))
0404 goto err_kstat;
0405
0406 raw_spin_lock_init(&desc->lock);
0407 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
0408 mutex_init(&desc->request_mutex);
0409 init_rcu_head(&desc->rcu);
0410 init_waitqueue_head(&desc->wait_for_threads);
0411
0412 desc_set_defaults(irq, desc, node, affinity, owner);
0413 irqd_set(&desc->irq_data, flags);
0414 kobject_init(&desc->kobj, &irq_kobj_type);
0415
0416 return desc;
0417
0418 err_kstat:
0419 free_percpu(desc->kstat_irqs);
0420 err_desc:
0421 kfree(desc);
0422 return NULL;
0423 }
0424
0425 static void irq_kobj_release(struct kobject *kobj)
0426 {
0427 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
0428
0429 free_masks(desc);
0430 free_percpu(desc->kstat_irqs);
0431 kfree(desc);
0432 }
0433
0434 static void delayed_free_desc(struct rcu_head *rhp)
0435 {
0436 struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
0437
0438 kobject_put(&desc->kobj);
0439 }
0440
0441 static void free_desc(unsigned int irq)
0442 {
0443 struct irq_desc *desc = irq_to_desc(irq);
0444
0445 irq_remove_debugfs_entry(desc);
0446 unregister_irq_proc(irq, desc);
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457 irq_sysfs_del(desc);
0458 delete_irq_desc(irq);
0459
0460
0461
0462
0463
0464
0465
0466 call_rcu(&desc->rcu, delayed_free_desc);
0467 }
0468
0469 static int alloc_descs(unsigned int start, unsigned int cnt, int node,
0470 const struct irq_affinity_desc *affinity,
0471 struct module *owner)
0472 {
0473 struct irq_desc *desc;
0474 int i;
0475
0476
0477 if (affinity) {
0478 for (i = 0; i < cnt; i++) {
0479 if (cpumask_empty(&affinity[i].mask))
0480 return -EINVAL;
0481 }
0482 }
0483
0484 for (i = 0; i < cnt; i++) {
0485 const struct cpumask *mask = NULL;
0486 unsigned int flags = 0;
0487
0488 if (affinity) {
0489 if (affinity->is_managed) {
0490 flags = IRQD_AFFINITY_MANAGED |
0491 IRQD_MANAGED_SHUTDOWN;
0492 }
0493 mask = &affinity->mask;
0494 node = cpu_to_node(cpumask_first(mask));
0495 affinity++;
0496 }
0497
0498 desc = alloc_desc(start + i, node, flags, mask, owner);
0499 if (!desc)
0500 goto err;
0501 irq_insert_desc(start + i, desc);
0502 irq_sysfs_add(start + i, desc);
0503 irq_add_debugfs_entry(start + i, desc);
0504 }
0505 bitmap_set(allocated_irqs, start, cnt);
0506 return start;
0507
0508 err:
0509 for (i--; i >= 0; i--)
0510 free_desc(start + i);
0511 return -ENOMEM;
0512 }
0513
0514 static int irq_expand_nr_irqs(unsigned int nr)
0515 {
0516 if (nr > IRQ_BITMAP_BITS)
0517 return -ENOMEM;
0518 nr_irqs = nr;
0519 return 0;
0520 }
0521
0522 int __init early_irq_init(void)
0523 {
0524 int i, initcnt, node = first_online_node;
0525 struct irq_desc *desc;
0526
0527 init_irq_default_affinity();
0528
0529
0530 initcnt = arch_probe_nr_irqs();
0531 printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n",
0532 NR_IRQS, nr_irqs, initcnt);
0533
0534 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
0535 nr_irqs = IRQ_BITMAP_BITS;
0536
0537 if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
0538 initcnt = IRQ_BITMAP_BITS;
0539
0540 if (initcnt > nr_irqs)
0541 nr_irqs = initcnt;
0542
0543 for (i = 0; i < initcnt; i++) {
0544 desc = alloc_desc(i, node, 0, NULL, NULL);
0545 set_bit(i, allocated_irqs);
0546 irq_insert_desc(i, desc);
0547 }
0548 return arch_early_irq_init();
0549 }
0550
0551 #else
0552
0553 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
0554 [0 ... NR_IRQS-1] = {
0555 .handle_irq = handle_bad_irq,
0556 .depth = 1,
0557 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
0558 }
0559 };
0560
0561 int __init early_irq_init(void)
0562 {
0563 int count, i, node = first_online_node;
0564 struct irq_desc *desc;
0565
0566 init_irq_default_affinity();
0567
0568 printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
0569
0570 desc = irq_desc;
0571 count = ARRAY_SIZE(irq_desc);
0572
0573 for (i = 0; i < count; i++) {
0574 desc[i].kstat_irqs = alloc_percpu(unsigned int);
0575 alloc_masks(&desc[i], node);
0576 raw_spin_lock_init(&desc[i].lock);
0577 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
0578 mutex_init(&desc[i].request_mutex);
0579 init_waitqueue_head(&desc[i].wait_for_threads);
0580 desc_set_defaults(i, &desc[i], node, NULL, NULL);
0581 }
0582 return arch_early_irq_init();
0583 }
0584
0585 struct irq_desc *irq_to_desc(unsigned int irq)
0586 {
0587 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
0588 }
0589 EXPORT_SYMBOL(irq_to_desc);
0590
0591 static void free_desc(unsigned int irq)
0592 {
0593 struct irq_desc *desc = irq_to_desc(irq);
0594 unsigned long flags;
0595
0596 raw_spin_lock_irqsave(&desc->lock, flags);
0597 desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
0598 raw_spin_unlock_irqrestore(&desc->lock, flags);
0599 }
0600
0601 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
0602 const struct irq_affinity_desc *affinity,
0603 struct module *owner)
0604 {
0605 u32 i;
0606
0607 for (i = 0; i < cnt; i++) {
0608 struct irq_desc *desc = irq_to_desc(start + i);
0609
0610 desc->owner = owner;
0611 }
0612 bitmap_set(allocated_irqs, start, cnt);
0613 return start;
0614 }
0615
0616 static int irq_expand_nr_irqs(unsigned int nr)
0617 {
0618 return -ENOMEM;
0619 }
0620
0621 void irq_mark_irq(unsigned int irq)
0622 {
0623 mutex_lock(&sparse_irq_lock);
0624 bitmap_set(allocated_irqs, irq, 1);
0625 mutex_unlock(&sparse_irq_lock);
0626 }
0627
0628 #ifdef CONFIG_GENERIC_IRQ_LEGACY
0629 void irq_init_desc(unsigned int irq)
0630 {
0631 free_desc(irq);
0632 }
0633 #endif
0634
0635 #endif
0636
0637 int handle_irq_desc(struct irq_desc *desc)
0638 {
0639 struct irq_data *data;
0640
0641 if (!desc)
0642 return -EINVAL;
0643
0644 data = irq_desc_get_irq_data(desc);
0645 if (WARN_ON_ONCE(!in_hardirq() && handle_enforce_irqctx(data)))
0646 return -EPERM;
0647
0648 generic_handle_irq_desc(desc);
0649 return 0;
0650 }
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661 int generic_handle_irq(unsigned int irq)
0662 {
0663 return handle_irq_desc(irq_to_desc(irq));
0664 }
0665 EXPORT_SYMBOL_GPL(generic_handle_irq);
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678 int generic_handle_irq_safe(unsigned int irq)
0679 {
0680 unsigned long flags;
0681 int ret;
0682
0683 local_irq_save(flags);
0684 ret = handle_irq_desc(irq_to_desc(irq));
0685 local_irq_restore(flags);
0686 return ret;
0687 }
0688 EXPORT_SYMBOL_GPL(generic_handle_irq_safe);
0689
0690 #ifdef CONFIG_IRQ_DOMAIN
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702 int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq)
0703 {
0704 return handle_irq_desc(irq_resolve_mapping(domain, hwirq));
0705 }
0706 EXPORT_SYMBOL_GPL(generic_handle_domain_irq);
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719 int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq)
0720 {
0721 WARN_ON_ONCE(!in_nmi());
0722 return handle_irq_desc(irq_resolve_mapping(domain, hwirq));
0723 }
0724 #endif
0725
0726
0727
0728
0729
0730
0731
0732
0733 void irq_free_descs(unsigned int from, unsigned int cnt)
0734 {
0735 int i;
0736
0737 if (from >= nr_irqs || (from + cnt) > nr_irqs)
0738 return;
0739
0740 mutex_lock(&sparse_irq_lock);
0741 for (i = 0; i < cnt; i++)
0742 free_desc(from + i);
0743
0744 bitmap_clear(allocated_irqs, from, cnt);
0745 mutex_unlock(&sparse_irq_lock);
0746 }
0747 EXPORT_SYMBOL_GPL(irq_free_descs);
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762 int __ref
0763 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
0764 struct module *owner, const struct irq_affinity_desc *affinity)
0765 {
0766 int start, ret;
0767
0768 if (!cnt)
0769 return -EINVAL;
0770
0771 if (irq >= 0) {
0772 if (from > irq)
0773 return -EINVAL;
0774 from = irq;
0775 } else {
0776
0777
0778
0779
0780
0781 from = arch_dynirq_lower_bound(from);
0782 }
0783
0784 mutex_lock(&sparse_irq_lock);
0785
0786 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
0787 from, cnt, 0);
0788 ret = -EEXIST;
0789 if (irq >=0 && start != irq)
0790 goto unlock;
0791
0792 if (start + cnt > nr_irqs) {
0793 ret = irq_expand_nr_irqs(start + cnt);
0794 if (ret)
0795 goto unlock;
0796 }
0797 ret = alloc_descs(start, cnt, node, affinity, owner);
0798 unlock:
0799 mutex_unlock(&sparse_irq_lock);
0800 return ret;
0801 }
0802 EXPORT_SYMBOL_GPL(__irq_alloc_descs);
0803
0804
0805
0806
0807
0808
0809
0810 unsigned int irq_get_next_irq(unsigned int offset)
0811 {
0812 return find_next_bit(allocated_irqs, nr_irqs, offset);
0813 }
0814
0815 struct irq_desc *
0816 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
0817 unsigned int check)
0818 {
0819 struct irq_desc *desc = irq_to_desc(irq);
0820
0821 if (desc) {
0822 if (check & _IRQ_DESC_CHECK) {
0823 if ((check & _IRQ_DESC_PERCPU) &&
0824 !irq_settings_is_per_cpu_devid(desc))
0825 return NULL;
0826
0827 if (!(check & _IRQ_DESC_PERCPU) &&
0828 irq_settings_is_per_cpu_devid(desc))
0829 return NULL;
0830 }
0831
0832 if (bus)
0833 chip_bus_lock(desc);
0834 raw_spin_lock_irqsave(&desc->lock, *flags);
0835 }
0836 return desc;
0837 }
0838
0839 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
0840 __releases(&desc->lock)
0841 {
0842 raw_spin_unlock_irqrestore(&desc->lock, flags);
0843 if (bus)
0844 chip_bus_sync_unlock(desc);
0845 }
0846
0847 int irq_set_percpu_devid_partition(unsigned int irq,
0848 const struct cpumask *affinity)
0849 {
0850 struct irq_desc *desc = irq_to_desc(irq);
0851
0852 if (!desc)
0853 return -EINVAL;
0854
0855 if (desc->percpu_enabled)
0856 return -EINVAL;
0857
0858 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
0859
0860 if (!desc->percpu_enabled)
0861 return -ENOMEM;
0862
0863 if (affinity)
0864 desc->percpu_affinity = affinity;
0865 else
0866 desc->percpu_affinity = cpu_possible_mask;
0867
0868 irq_set_percpu_devid_flags(irq);
0869 return 0;
0870 }
0871
0872 int irq_set_percpu_devid(unsigned int irq)
0873 {
0874 return irq_set_percpu_devid_partition(irq, NULL);
0875 }
0876
0877 int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
0878 {
0879 struct irq_desc *desc = irq_to_desc(irq);
0880
0881 if (!desc || !desc->percpu_enabled)
0882 return -EINVAL;
0883
0884 if (affinity)
0885 cpumask_copy(affinity, desc->percpu_affinity);
0886
0887 return 0;
0888 }
0889 EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition);
0890
0891 void kstat_incr_irq_this_cpu(unsigned int irq)
0892 {
0893 kstat_incr_irqs_this_cpu(irq_to_desc(irq));
0894 }
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
0906 {
0907 struct irq_desc *desc = irq_to_desc(irq);
0908
0909 return desc && desc->kstat_irqs ?
0910 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
0911 }
0912
0913 static bool irq_is_nmi(struct irq_desc *desc)
0914 {
0915 return desc->istate & IRQS_NMI;
0916 }
0917
0918 static unsigned int kstat_irqs(unsigned int irq)
0919 {
0920 struct irq_desc *desc = irq_to_desc(irq);
0921 unsigned int sum = 0;
0922 int cpu;
0923
0924 if (!desc || !desc->kstat_irqs)
0925 return 0;
0926 if (!irq_settings_is_per_cpu_devid(desc) &&
0927 !irq_settings_is_per_cpu(desc) &&
0928 !irq_is_nmi(desc))
0929 return data_race(desc->tot_count);
0930
0931 for_each_possible_cpu(cpu)
0932 sum += data_race(*per_cpu_ptr(desc->kstat_irqs, cpu));
0933 return sum;
0934 }
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946 unsigned int kstat_irqs_usr(unsigned int irq)
0947 {
0948 unsigned int sum;
0949
0950 rcu_read_lock();
0951 sum = kstat_irqs(irq);
0952 rcu_read_unlock();
0953 return sum;
0954 }
0955
0956 #ifdef CONFIG_LOCKDEP
0957 void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
0958 struct lock_class_key *request_class)
0959 {
0960 struct irq_desc *desc = irq_to_desc(irq);
0961
0962 if (desc) {
0963 lockdep_set_class(&desc->lock, lock_class);
0964 lockdep_set_class(&desc->request_mutex, request_class);
0965 }
0966 }
0967 EXPORT_SYMBOL_GPL(__irq_set_lockdep_class);
0968 #endif