0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/types.h>
0012 #include <linux/device.h>
0013 #include <linux/irq.h>
0014 #include <linux/irqdomain.h>
0015 #include <linux/msi.h>
0016 #include <linux/slab.h>
0017 #include <linux/sysfs.h>
0018 #include <linux/pci.h>
0019
0020 #include "internals.h"
0021
0022 static inline int msi_sysfs_create_group(struct device *dev);
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec,
0036 const struct irq_affinity_desc *affinity)
0037 {
0038 struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
0039
0040 if (!desc)
0041 return NULL;
0042
0043 desc->dev = dev;
0044 desc->nvec_used = nvec;
0045 if (affinity) {
0046 desc->affinity = kmemdup(affinity, nvec * sizeof(*desc->affinity), GFP_KERNEL);
0047 if (!desc->affinity) {
0048 kfree(desc);
0049 return NULL;
0050 }
0051 }
0052 return desc;
0053 }
0054
0055 static void msi_free_desc(struct msi_desc *desc)
0056 {
0057 kfree(desc->affinity);
0058 kfree(desc);
0059 }
0060
0061 static int msi_insert_desc(struct msi_device_data *md, struct msi_desc *desc, unsigned int index)
0062 {
0063 int ret;
0064
0065 desc->msi_index = index;
0066 ret = xa_insert(&md->__store, index, desc, GFP_KERNEL);
0067 if (ret)
0068 msi_free_desc(desc);
0069 return ret;
0070 }
0071
0072
0073
0074
0075
0076
0077
0078
0079 int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc)
0080 {
0081 struct msi_desc *desc;
0082
0083 lockdep_assert_held(&dev->msi.data->mutex);
0084
0085 desc = msi_alloc_desc(dev, init_desc->nvec_used, init_desc->affinity);
0086 if (!desc)
0087 return -ENOMEM;
0088
0089
0090 desc->pci = init_desc->pci;
0091 return msi_insert_desc(dev->msi.data, desc, init_desc->msi_index);
0092 }
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102 static int msi_add_simple_msi_descs(struct device *dev, unsigned int index, unsigned int ndesc)
0103 {
0104 unsigned int idx, last = index + ndesc - 1;
0105 struct msi_desc *desc;
0106 int ret;
0107
0108 lockdep_assert_held(&dev->msi.data->mutex);
0109
0110 for (idx = index; idx <= last; idx++) {
0111 desc = msi_alloc_desc(dev, 1, NULL);
0112 if (!desc)
0113 goto fail_mem;
0114 ret = msi_insert_desc(dev->msi.data, desc, idx);
0115 if (ret)
0116 goto fail;
0117 }
0118 return 0;
0119
0120 fail_mem:
0121 ret = -ENOMEM;
0122 fail:
0123 msi_free_msi_descs_range(dev, MSI_DESC_NOTASSOCIATED, index, last);
0124 return ret;
0125 }
0126
0127 static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter)
0128 {
0129 switch (filter) {
0130 case MSI_DESC_ALL:
0131 return true;
0132 case MSI_DESC_NOTASSOCIATED:
0133 return !desc->irq;
0134 case MSI_DESC_ASSOCIATED:
0135 return !!desc->irq;
0136 }
0137 WARN_ON_ONCE(1);
0138 return false;
0139 }
0140
0141
0142
0143
0144
0145
0146
0147
0148 void msi_free_msi_descs_range(struct device *dev, enum msi_desc_filter filter,
0149 unsigned int first_index, unsigned int last_index)
0150 {
0151 struct xarray *xa = &dev->msi.data->__store;
0152 struct msi_desc *desc;
0153 unsigned long idx;
0154
0155 lockdep_assert_held(&dev->msi.data->mutex);
0156
0157 xa_for_each_range(xa, idx, desc, first_index, last_index) {
0158 if (msi_desc_match(desc, filter)) {
0159 xa_erase(xa, idx);
0160 msi_free_desc(desc);
0161 }
0162 }
0163 }
0164
0165 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
0166 {
0167 *msg = entry->msg;
0168 }
0169
0170 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
0171 {
0172 struct msi_desc *entry = irq_get_msi_desc(irq);
0173
0174 __get_cached_msi_msg(entry, msg);
0175 }
0176 EXPORT_SYMBOL_GPL(get_cached_msi_msg);
0177
0178 static void msi_device_data_release(struct device *dev, void *res)
0179 {
0180 struct msi_device_data *md = res;
0181
0182 WARN_ON_ONCE(!xa_empty(&md->__store));
0183 xa_destroy(&md->__store);
0184 dev->msi.data = NULL;
0185 }
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197 int msi_setup_device_data(struct device *dev)
0198 {
0199 struct msi_device_data *md;
0200 int ret;
0201
0202 if (dev->msi.data)
0203 return 0;
0204
0205 md = devres_alloc(msi_device_data_release, sizeof(*md), GFP_KERNEL);
0206 if (!md)
0207 return -ENOMEM;
0208
0209 ret = msi_sysfs_create_group(dev);
0210 if (ret) {
0211 devres_free(md);
0212 return ret;
0213 }
0214
0215 xa_init(&md->__store);
0216 mutex_init(&md->mutex);
0217 dev->msi.data = md;
0218 devres_add(dev, md);
0219 return 0;
0220 }
0221
0222
0223
0224
0225
0226 void msi_lock_descs(struct device *dev)
0227 {
0228 mutex_lock(&dev->msi.data->mutex);
0229 }
0230 EXPORT_SYMBOL_GPL(msi_lock_descs);
0231
0232
0233
0234
0235
0236 void msi_unlock_descs(struct device *dev)
0237 {
0238
0239 dev->msi.data->__iter_idx = MSI_MAX_INDEX;
0240 mutex_unlock(&dev->msi.data->mutex);
0241 }
0242 EXPORT_SYMBOL_GPL(msi_unlock_descs);
0243
0244 static struct msi_desc *msi_find_desc(struct msi_device_data *md, enum msi_desc_filter filter)
0245 {
0246 struct msi_desc *desc;
0247
0248 xa_for_each_start(&md->__store, md->__iter_idx, desc, md->__iter_idx) {
0249 if (msi_desc_match(desc, filter))
0250 return desc;
0251 }
0252 md->__iter_idx = MSI_MAX_INDEX;
0253 return NULL;
0254 }
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267 struct msi_desc *msi_first_desc(struct device *dev, enum msi_desc_filter filter)
0268 {
0269 struct msi_device_data *md = dev->msi.data;
0270
0271 if (WARN_ON_ONCE(!md))
0272 return NULL;
0273
0274 lockdep_assert_held(&md->mutex);
0275
0276 md->__iter_idx = 0;
0277 return msi_find_desc(md, filter);
0278 }
0279 EXPORT_SYMBOL_GPL(msi_first_desc);
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293 struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter)
0294 {
0295 struct msi_device_data *md = dev->msi.data;
0296
0297 if (WARN_ON_ONCE(!md))
0298 return NULL;
0299
0300 lockdep_assert_held(&md->mutex);
0301
0302 if (md->__iter_idx >= (unsigned long)MSI_MAX_INDEX)
0303 return NULL;
0304
0305 md->__iter_idx++;
0306 return msi_find_desc(md, filter);
0307 }
0308 EXPORT_SYMBOL_GPL(msi_next_desc);
0309
0310
0311
0312
0313
0314
0315
0316
0317 unsigned int msi_get_virq(struct device *dev, unsigned int index)
0318 {
0319 struct msi_desc *desc;
0320 unsigned int ret = 0;
0321 bool pcimsi;
0322
0323 if (!dev->msi.data)
0324 return 0;
0325
0326 pcimsi = dev_is_pci(dev) ? to_pci_dev(dev)->msi_enabled : false;
0327
0328 msi_lock_descs(dev);
0329 desc = xa_load(&dev->msi.data->__store, pcimsi ? 0 : index);
0330 if (desc && desc->irq) {
0331
0332
0333
0334
0335
0336 if (pcimsi) {
0337 if (index < desc->nvec_used)
0338 ret = desc->irq + index;
0339 } else {
0340 ret = desc->irq;
0341 }
0342 }
0343 msi_unlock_descs(dev);
0344 return ret;
0345 }
0346 EXPORT_SYMBOL_GPL(msi_get_virq);
0347
0348 #ifdef CONFIG_SYSFS
0349 static struct attribute *msi_dev_attrs[] = {
0350 NULL
0351 };
0352
0353 static const struct attribute_group msi_irqs_group = {
0354 .name = "msi_irqs",
0355 .attrs = msi_dev_attrs,
0356 };
0357
0358 static inline int msi_sysfs_create_group(struct device *dev)
0359 {
0360 return devm_device_add_group(dev, &msi_irqs_group);
0361 }
0362
0363 static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
0364 char *buf)
0365 {
0366
0367 bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false;
0368
0369 return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi");
0370 }
0371
0372 static void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc)
0373 {
0374 struct device_attribute *attrs = desc->sysfs_attrs;
0375 int i;
0376
0377 if (!attrs)
0378 return;
0379
0380 desc->sysfs_attrs = NULL;
0381 for (i = 0; i < desc->nvec_used; i++) {
0382 if (attrs[i].show)
0383 sysfs_remove_file_from_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
0384 kfree(attrs[i].attr.name);
0385 }
0386 kfree(attrs);
0387 }
0388
0389 static int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc)
0390 {
0391 struct device_attribute *attrs;
0392 int ret, i;
0393
0394 attrs = kcalloc(desc->nvec_used, sizeof(*attrs), GFP_KERNEL);
0395 if (!attrs)
0396 return -ENOMEM;
0397
0398 desc->sysfs_attrs = attrs;
0399 for (i = 0; i < desc->nvec_used; i++) {
0400 sysfs_attr_init(&attrs[i].attr);
0401 attrs[i].attr.name = kasprintf(GFP_KERNEL, "%d", desc->irq + i);
0402 if (!attrs[i].attr.name) {
0403 ret = -ENOMEM;
0404 goto fail;
0405 }
0406
0407 attrs[i].attr.mode = 0444;
0408 attrs[i].show = msi_mode_show;
0409
0410 ret = sysfs_add_file_to_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
0411 if (ret) {
0412 attrs[i].show = NULL;
0413 goto fail;
0414 }
0415 }
0416 return 0;
0417
0418 fail:
0419 msi_sysfs_remove_desc(dev, desc);
0420 return ret;
0421 }
0422
0423 #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
0424
0425
0426
0427
0428 int msi_device_populate_sysfs(struct device *dev)
0429 {
0430 struct msi_desc *desc;
0431 int ret;
0432
0433 msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
0434 if (desc->sysfs_attrs)
0435 continue;
0436 ret = msi_sysfs_populate_desc(dev, desc);
0437 if (ret)
0438 return ret;
0439 }
0440 return 0;
0441 }
0442
0443
0444
0445
0446
0447
0448 void msi_device_destroy_sysfs(struct device *dev)
0449 {
0450 struct msi_desc *desc;
0451
0452 msi_for_each_desc(desc, dev, MSI_DESC_ALL)
0453 msi_sysfs_remove_desc(dev, desc);
0454 }
0455 #endif
0456 #else
0457 static inline int msi_sysfs_create_group(struct device *dev) { return 0; }
0458 static inline int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) { return 0; }
0459 static inline void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc) { }
0460 #endif
0461
0462 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
0463 static inline void irq_chip_write_msi_msg(struct irq_data *data,
0464 struct msi_msg *msg)
0465 {
0466 data->chip->irq_write_msi_msg(data, msg);
0467 }
0468
0469 static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
0470 {
0471 struct msi_domain_info *info = domain->host_data;
0472
0473
0474
0475
0476
0477 WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
0478 (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) &&
0479 (msg[1].address_lo || msg[1].address_hi || msg[1].data));
0480 }
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493 int msi_domain_set_affinity(struct irq_data *irq_data,
0494 const struct cpumask *mask, bool force)
0495 {
0496 struct irq_data *parent = irq_data->parent_data;
0497 struct msi_msg msg[2] = { [1] = { }, };
0498 int ret;
0499
0500 ret = parent->chip->irq_set_affinity(parent, mask, force);
0501 if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
0502 BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
0503 msi_check_level(irq_data->domain, msg);
0504 irq_chip_write_msi_msg(irq_data, msg);
0505 }
0506
0507 return ret;
0508 }
0509
0510 static int msi_domain_activate(struct irq_domain *domain,
0511 struct irq_data *irq_data, bool early)
0512 {
0513 struct msi_msg msg[2] = { [1] = { }, };
0514
0515 BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
0516 msi_check_level(irq_data->domain, msg);
0517 irq_chip_write_msi_msg(irq_data, msg);
0518 return 0;
0519 }
0520
0521 static void msi_domain_deactivate(struct irq_domain *domain,
0522 struct irq_data *irq_data)
0523 {
0524 struct msi_msg msg[2];
0525
0526 memset(msg, 0, sizeof(msg));
0527 irq_chip_write_msi_msg(irq_data, msg);
0528 }
0529
0530 static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
0531 unsigned int nr_irqs, void *arg)
0532 {
0533 struct msi_domain_info *info = domain->host_data;
0534 struct msi_domain_ops *ops = info->ops;
0535 irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
0536 int i, ret;
0537
0538 if (irq_find_mapping(domain, hwirq) > 0)
0539 return -EEXIST;
0540
0541 if (domain->parent) {
0542 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
0543 if (ret < 0)
0544 return ret;
0545 }
0546
0547 for (i = 0; i < nr_irqs; i++) {
0548 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
0549 if (ret < 0) {
0550 if (ops->msi_free) {
0551 for (i--; i > 0; i--)
0552 ops->msi_free(domain, info, virq + i);
0553 }
0554 irq_domain_free_irqs_top(domain, virq, nr_irqs);
0555 return ret;
0556 }
0557 }
0558
0559 return 0;
0560 }
0561
0562 static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
0563 unsigned int nr_irqs)
0564 {
0565 struct msi_domain_info *info = domain->host_data;
0566 int i;
0567
0568 if (info->ops->msi_free) {
0569 for (i = 0; i < nr_irqs; i++)
0570 info->ops->msi_free(domain, info, virq + i);
0571 }
0572 irq_domain_free_irqs_top(domain, virq, nr_irqs);
0573 }
0574
0575 static const struct irq_domain_ops msi_domain_ops = {
0576 .alloc = msi_domain_alloc,
0577 .free = msi_domain_free,
0578 .activate = msi_domain_activate,
0579 .deactivate = msi_domain_deactivate,
0580 };
0581
0582 static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
0583 msi_alloc_info_t *arg)
0584 {
0585 return arg->hwirq;
0586 }
0587
0588 static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
0589 int nvec, msi_alloc_info_t *arg)
0590 {
0591 memset(arg, 0, sizeof(*arg));
0592 return 0;
0593 }
0594
0595 static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
0596 struct msi_desc *desc)
0597 {
0598 arg->desc = desc;
0599 }
0600
0601 static int msi_domain_ops_init(struct irq_domain *domain,
0602 struct msi_domain_info *info,
0603 unsigned int virq, irq_hw_number_t hwirq,
0604 msi_alloc_info_t *arg)
0605 {
0606 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
0607 info->chip_data);
0608 if (info->handler && info->handler_name) {
0609 __irq_set_handler(virq, info->handler, 0, info->handler_name);
0610 if (info->handler_data)
0611 irq_set_handler_data(virq, info->handler_data);
0612 }
0613 return 0;
0614 }
0615
0616 static int msi_domain_ops_check(struct irq_domain *domain,
0617 struct msi_domain_info *info,
0618 struct device *dev)
0619 {
0620 return 0;
0621 }
0622
0623 static struct msi_domain_ops msi_domain_ops_default = {
0624 .get_hwirq = msi_domain_ops_get_hwirq,
0625 .msi_init = msi_domain_ops_init,
0626 .msi_check = msi_domain_ops_check,
0627 .msi_prepare = msi_domain_ops_prepare,
0628 .set_desc = msi_domain_ops_set_desc,
0629 .domain_alloc_irqs = __msi_domain_alloc_irqs,
0630 .domain_free_irqs = __msi_domain_free_irqs,
0631 };
0632
0633 static void msi_domain_update_dom_ops(struct msi_domain_info *info)
0634 {
0635 struct msi_domain_ops *ops = info->ops;
0636
0637 if (ops == NULL) {
0638 info->ops = &msi_domain_ops_default;
0639 return;
0640 }
0641
0642 if (ops->domain_alloc_irqs == NULL)
0643 ops->domain_alloc_irqs = msi_domain_ops_default.domain_alloc_irqs;
0644 if (ops->domain_free_irqs == NULL)
0645 ops->domain_free_irqs = msi_domain_ops_default.domain_free_irqs;
0646
0647 if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS))
0648 return;
0649
0650 if (ops->get_hwirq == NULL)
0651 ops->get_hwirq = msi_domain_ops_default.get_hwirq;
0652 if (ops->msi_init == NULL)
0653 ops->msi_init = msi_domain_ops_default.msi_init;
0654 if (ops->msi_check == NULL)
0655 ops->msi_check = msi_domain_ops_default.msi_check;
0656 if (ops->msi_prepare == NULL)
0657 ops->msi_prepare = msi_domain_ops_default.msi_prepare;
0658 if (ops->set_desc == NULL)
0659 ops->set_desc = msi_domain_ops_default.set_desc;
0660 }
0661
0662 static void msi_domain_update_chip_ops(struct msi_domain_info *info)
0663 {
0664 struct irq_chip *chip = info->chip;
0665
0666 BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
0667 if (!chip->irq_set_affinity)
0668 chip->irq_set_affinity = msi_domain_set_affinity;
0669 }
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
0680 struct msi_domain_info *info,
0681 struct irq_domain *parent)
0682 {
0683 struct irq_domain *domain;
0684
0685 msi_domain_update_dom_ops(info);
0686 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
0687 msi_domain_update_chip_ops(info);
0688
0689 domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
0690 fwnode, &msi_domain_ops, info);
0691
0692 if (domain && !domain->name && info->chip)
0693 domain->name = info->chip->name;
0694
0695 return domain;
0696 }
0697
0698 int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
0699 int nvec, msi_alloc_info_t *arg)
0700 {
0701 struct msi_domain_info *info = domain->host_data;
0702 struct msi_domain_ops *ops = info->ops;
0703 int ret;
0704
0705 ret = ops->msi_check(domain, info, dev);
0706 if (ret == 0)
0707 ret = ops->msi_prepare(domain, dev, nvec, arg);
0708
0709 return ret;
0710 }
0711
0712 int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
0713 int virq_base, int nvec, msi_alloc_info_t *arg)
0714 {
0715 struct msi_domain_info *info = domain->host_data;
0716 struct msi_domain_ops *ops = info->ops;
0717 struct msi_desc *desc;
0718 int ret, virq;
0719
0720 msi_lock_descs(dev);
0721 ret = msi_add_simple_msi_descs(dev, virq_base, nvec);
0722 if (ret)
0723 goto unlock;
0724
0725 for (virq = virq_base; virq < virq_base + nvec; virq++) {
0726 desc = xa_load(&dev->msi.data->__store, virq);
0727 desc->irq = virq;
0728
0729 ops->set_desc(arg, desc);
0730 ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
0731 if (ret)
0732 goto fail;
0733
0734 irq_set_msi_desc(virq, desc);
0735 }
0736 msi_unlock_descs(dev);
0737 return 0;
0738
0739 fail:
0740 for (--virq; virq >= virq_base; virq--)
0741 irq_domain_free_irqs_common(domain, virq, 1);
0742 msi_free_msi_descs_range(dev, MSI_DESC_ALL, virq_base, virq_base + nvec - 1);
0743 unlock:
0744 msi_unlock_descs(dev);
0745 return ret;
0746 }
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759 static bool msi_check_reservation_mode(struct irq_domain *domain,
0760 struct msi_domain_info *info,
0761 struct device *dev)
0762 {
0763 struct msi_desc *desc;
0764
0765 switch(domain->bus_token) {
0766 case DOMAIN_BUS_PCI_MSI:
0767 case DOMAIN_BUS_VMD_MSI:
0768 break;
0769 default:
0770 return false;
0771 }
0772
0773 if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
0774 return false;
0775
0776 if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
0777 return false;
0778
0779
0780
0781
0782
0783 desc = msi_first_desc(dev, MSI_DESC_ALL);
0784 return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask;
0785 }
0786
0787 static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
0788 int allocated)
0789 {
0790 switch(domain->bus_token) {
0791 case DOMAIN_BUS_PCI_MSI:
0792 case DOMAIN_BUS_VMD_MSI:
0793 if (IS_ENABLED(CONFIG_PCI_MSI))
0794 break;
0795 fallthrough;
0796 default:
0797 return -ENOSPC;
0798 }
0799
0800
0801 if (desc->nvec_used > 1)
0802 return 1;
0803
0804
0805 return allocated ? allocated : -ENOSPC;
0806 }
0807
0808 #define VIRQ_CAN_RESERVE 0x01
0809 #define VIRQ_ACTIVATE 0x02
0810 #define VIRQ_NOMASK_QUIRK 0x04
0811
0812 static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
0813 {
0814 struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
0815 int ret;
0816
0817 if (!(vflags & VIRQ_CAN_RESERVE)) {
0818 irqd_clr_can_reserve(irqd);
0819 if (vflags & VIRQ_NOMASK_QUIRK)
0820 irqd_set_msi_nomask_quirk(irqd);
0821
0822
0823
0824
0825
0826
0827
0828
0829 if ((vflags & VIRQ_ACTIVATE) &&
0830 irqd_affinity_is_managed(irqd) &&
0831 !cpumask_intersects(irq_data_get_affinity_mask(irqd),
0832 cpu_online_mask)) {
0833 irqd_set_managed_shutdown(irqd);
0834 return 0;
0835 }
0836 }
0837
0838 if (!(vflags & VIRQ_ACTIVATE))
0839 return 0;
0840
0841 ret = irq_domain_activate_irq(irqd, vflags & VIRQ_CAN_RESERVE);
0842 if (ret)
0843 return ret;
0844
0845
0846
0847
0848 if (vflags & VIRQ_CAN_RESERVE)
0849 irqd_clr_activated(irqd);
0850 return 0;
0851 }
0852
0853 int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
0854 int nvec)
0855 {
0856 struct msi_domain_info *info = domain->host_data;
0857 struct msi_domain_ops *ops = info->ops;
0858 msi_alloc_info_t arg = { };
0859 unsigned int vflags = 0;
0860 struct msi_desc *desc;
0861 int allocated = 0;
0862 int i, ret, virq;
0863
0864 ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
0865 if (ret)
0866 return ret;
0867
0868
0869
0870
0871
0872
0873 if (info->flags & MSI_FLAG_ACTIVATE_EARLY)
0874 vflags |= VIRQ_ACTIVATE;
0875
0876
0877
0878
0879
0880 if (msi_check_reservation_mode(domain, info, dev)) {
0881 vflags |= VIRQ_CAN_RESERVE;
0882
0883
0884
0885
0886 if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
0887 vflags |= VIRQ_NOMASK_QUIRK;
0888 }
0889
0890 msi_for_each_desc(desc, dev, MSI_DESC_NOTASSOCIATED) {
0891 ops->set_desc(&arg, desc);
0892
0893 virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
0894 dev_to_node(dev), &arg, false,
0895 desc->affinity);
0896 if (virq < 0)
0897 return msi_handle_pci_fail(domain, desc, allocated);
0898
0899 for (i = 0; i < desc->nvec_used; i++) {
0900 irq_set_msi_desc_off(virq, i, desc);
0901 irq_debugfs_copy_devname(virq + i, dev);
0902 ret = msi_init_virq(domain, virq + i, vflags);
0903 if (ret)
0904 return ret;
0905 }
0906 if (info->flags & MSI_FLAG_DEV_SYSFS) {
0907 ret = msi_sysfs_populate_desc(dev, desc);
0908 if (ret)
0909 return ret;
0910 }
0911 allocated++;
0912 }
0913 return 0;
0914 }
0915
0916 static int msi_domain_add_simple_msi_descs(struct msi_domain_info *info,
0917 struct device *dev,
0918 unsigned int num_descs)
0919 {
0920 if (!(info->flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS))
0921 return 0;
0922
0923 return msi_add_simple_msi_descs(dev, 0, num_descs);
0924 }
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939 int msi_domain_alloc_irqs_descs_locked(struct irq_domain *domain, struct device *dev,
0940 int nvec)
0941 {
0942 struct msi_domain_info *info = domain->host_data;
0943 struct msi_domain_ops *ops = info->ops;
0944 int ret;
0945
0946 lockdep_assert_held(&dev->msi.data->mutex);
0947
0948 ret = msi_domain_add_simple_msi_descs(info, dev, nvec);
0949 if (ret)
0950 return ret;
0951
0952 ret = ops->domain_alloc_irqs(domain, dev, nvec);
0953 if (ret)
0954 msi_domain_free_irqs_descs_locked(domain, dev);
0955 return ret;
0956 }
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec)
0968 {
0969 int ret;
0970
0971 msi_lock_descs(dev);
0972 ret = msi_domain_alloc_irqs_descs_locked(domain, dev, nvec);
0973 msi_unlock_descs(dev);
0974 return ret;
0975 }
0976
0977 void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
0978 {
0979 struct msi_domain_info *info = domain->host_data;
0980 struct irq_data *irqd;
0981 struct msi_desc *desc;
0982 int i;
0983
0984
0985 msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
0986
0987 for (i = 0; i < desc->nvec_used; i++) {
0988 irqd = irq_domain_get_irq_data(domain, desc->irq + i);
0989 if (irqd && irqd_is_activated(irqd))
0990 irq_domain_deactivate_irq(irqd);
0991 }
0992
0993 irq_domain_free_irqs(desc->irq, desc->nvec_used);
0994 if (info->flags & MSI_FLAG_DEV_SYSFS)
0995 msi_sysfs_remove_desc(dev, desc);
0996 desc->irq = 0;
0997 }
0998 }
0999
1000 static void msi_domain_free_msi_descs(struct msi_domain_info *info,
1001 struct device *dev)
1002 {
1003 if (info->flags & MSI_FLAG_FREE_MSI_DESCS)
1004 msi_free_msi_descs(dev);
1005 }
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 void msi_domain_free_irqs_descs_locked(struct irq_domain *domain, struct device *dev)
1018 {
1019 struct msi_domain_info *info = domain->host_data;
1020 struct msi_domain_ops *ops = info->ops;
1021
1022 lockdep_assert_held(&dev->msi.data->mutex);
1023
1024 ops->domain_free_irqs(domain, dev);
1025 msi_domain_free_msi_descs(info, dev);
1026 }
1027
1028
1029
1030
1031
1032
1033
1034 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
1035 {
1036 msi_lock_descs(dev);
1037 msi_domain_free_irqs_descs_locked(domain, dev);
1038 msi_unlock_descs(dev);
1039 }
1040
1041
1042
1043
1044
1045
1046
1047 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
1048 {
1049 return (struct msi_domain_info *)domain->host_data;
1050 }
1051
1052 #endif