0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/mm.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/irq.h>
0013 #include <linux/pci.h>
0014 #include <linux/dmar.h>
0015 #include <linux/hpet.h>
0016 #include <linux/msi.h>
0017 #include <asm/irqdomain.h>
0018 #include <asm/hpet.h>
0019 #include <asm/hw_irq.h>
0020 #include <asm/apic.h>
0021 #include <asm/irq_remapping.h>
0022 #include <asm/xen/hypervisor.h>
0023
0024 struct irq_domain *x86_pci_msi_default_domain __ro_after_init;
0025
0026 static void irq_msi_update_msg(struct irq_data *irqd, struct irq_cfg *cfg)
0027 {
0028 struct msi_msg msg[2] = { [1] = { }, };
0029
0030 __irq_msi_compose_msg(cfg, msg, false);
0031 irq_data_get_irq_chip(irqd)->irq_write_msi_msg(irqd, msg);
0032 }
0033
0034 static int
0035 msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
0036 {
0037 struct irq_cfg old_cfg, *cfg = irqd_cfg(irqd);
0038 struct irq_data *parent = irqd->parent_data;
0039 unsigned int cpu;
0040 int ret;
0041
0042
0043 cpu = cpumask_first(irq_data_get_effective_affinity_mask(irqd));
0044 old_cfg = *cfg;
0045
0046
0047 ret = parent->chip->irq_set_affinity(parent, mask, force);
0048 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
0049 return ret;
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065 if (!irqd_msi_nomask_quirk(irqd) ||
0066 cfg->vector == old_cfg.vector ||
0067 old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
0068 !irqd_is_started(irqd) ||
0069 cfg->dest_apicid == old_cfg.dest_apicid) {
0070 irq_msi_update_msg(irqd, cfg);
0071 return ret;
0072 }
0073
0074
0075
0076
0077
0078 if (WARN_ON_ONCE(cpu != smp_processor_id())) {
0079 irq_msi_update_msg(irqd, cfg);
0080 return ret;
0081 }
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 lock_vector_lock();
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111 if (IS_ERR_OR_NULL(this_cpu_read(vector_irq[cfg->vector])))
0112 this_cpu_write(vector_irq[cfg->vector], VECTOR_RETRIGGERED);
0113
0114
0115 old_cfg.vector = cfg->vector;
0116 irq_msi_update_msg(irqd, &old_cfg);
0117
0118
0119 irq_msi_update_msg(irqd, cfg);
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129 unlock_vector_lock();
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139 if (lapic_vector_set_in_irr(cfg->vector))
0140 irq_data_get_irq_chip(irqd)->irq_retrigger(irqd);
0141
0142 return ret;
0143 }
0144
0145
0146
0147
0148
0149 static struct irq_chip pci_msi_controller = {
0150 .name = "PCI-MSI",
0151 .irq_unmask = pci_msi_unmask_irq,
0152 .irq_mask = pci_msi_mask_irq,
0153 .irq_ack = irq_chip_ack_parent,
0154 .irq_retrigger = irq_chip_retrigger_hierarchy,
0155 .irq_set_affinity = msi_set_affinity,
0156 .flags = IRQCHIP_SKIP_SET_WAKE |
0157 IRQCHIP_AFFINITY_PRE_STARTUP,
0158 };
0159
0160 int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
0161 msi_alloc_info_t *arg)
0162 {
0163 init_irq_alloc_info(arg, NULL);
0164 if (to_pci_dev(dev)->msix_enabled) {
0165 arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSIX;
0166 } else {
0167 arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSI;
0168 arg->flags |= X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
0169 }
0170
0171 return 0;
0172 }
0173 EXPORT_SYMBOL_GPL(pci_msi_prepare);
0174
0175 static struct msi_domain_ops pci_msi_domain_ops = {
0176 .msi_prepare = pci_msi_prepare,
0177 };
0178
0179 static struct msi_domain_info pci_msi_domain_info = {
0180 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
0181 MSI_FLAG_PCI_MSIX,
0182 .ops = &pci_msi_domain_ops,
0183 .chip = &pci_msi_controller,
0184 .handler = handle_edge_irq,
0185 .handler_name = "edge",
0186 };
0187
0188 struct irq_domain * __init native_create_pci_msi_domain(void)
0189 {
0190 struct fwnode_handle *fn;
0191 struct irq_domain *d;
0192
0193 if (disable_apic)
0194 return NULL;
0195
0196 fn = irq_domain_alloc_named_fwnode("PCI-MSI");
0197 if (!fn)
0198 return NULL;
0199
0200 d = pci_msi_create_irq_domain(fn, &pci_msi_domain_info,
0201 x86_vector_domain);
0202 if (!d) {
0203 irq_domain_free_fwnode(fn);
0204 pr_warn("Failed to initialize PCI-MSI irqdomain.\n");
0205 } else {
0206 d->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK;
0207 }
0208 return d;
0209 }
0210
0211 void __init x86_create_pci_msi_domain(void)
0212 {
0213 x86_pci_msi_default_domain = x86_init.irqs.create_pci_msi_domain();
0214 }
0215
0216 #ifdef CONFIG_IRQ_REMAP
0217 static struct irq_chip pci_msi_ir_controller = {
0218 .name = "IR-PCI-MSI",
0219 .irq_unmask = pci_msi_unmask_irq,
0220 .irq_mask = pci_msi_mask_irq,
0221 .irq_ack = irq_chip_ack_parent,
0222 .irq_retrigger = irq_chip_retrigger_hierarchy,
0223 .flags = IRQCHIP_SKIP_SET_WAKE |
0224 IRQCHIP_AFFINITY_PRE_STARTUP,
0225 };
0226
0227 static struct msi_domain_info pci_msi_ir_domain_info = {
0228 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
0229 MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
0230 .ops = &pci_msi_domain_ops,
0231 .chip = &pci_msi_ir_controller,
0232 .handler = handle_edge_irq,
0233 .handler_name = "edge",
0234 };
0235
0236 struct irq_domain *arch_create_remap_msi_irq_domain(struct irq_domain *parent,
0237 const char *name, int id)
0238 {
0239 struct fwnode_handle *fn;
0240 struct irq_domain *d;
0241
0242 fn = irq_domain_alloc_named_id_fwnode(name, id);
0243 if (!fn)
0244 return NULL;
0245 d = pci_msi_create_irq_domain(fn, &pci_msi_ir_domain_info, parent);
0246 if (!d)
0247 irq_domain_free_fwnode(fn);
0248 return d;
0249 }
0250 #endif
0251
0252 #ifdef CONFIG_DMAR_TABLE
0253
0254
0255
0256
0257
0258
0259 static void dmar_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
0260 {
0261 __irq_msi_compose_msg(irqd_cfg(data), msg, true);
0262 }
0263
0264 static void dmar_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
0265 {
0266 dmar_msi_write(data->irq, msg);
0267 }
0268
0269 static struct irq_chip dmar_msi_controller = {
0270 .name = "DMAR-MSI",
0271 .irq_unmask = dmar_msi_unmask,
0272 .irq_mask = dmar_msi_mask,
0273 .irq_ack = irq_chip_ack_parent,
0274 .irq_set_affinity = msi_domain_set_affinity,
0275 .irq_retrigger = irq_chip_retrigger_hierarchy,
0276 .irq_compose_msi_msg = dmar_msi_compose_msg,
0277 .irq_write_msi_msg = dmar_msi_write_msg,
0278 .flags = IRQCHIP_SKIP_SET_WAKE |
0279 IRQCHIP_AFFINITY_PRE_STARTUP,
0280 };
0281
0282 static int dmar_msi_init(struct irq_domain *domain,
0283 struct msi_domain_info *info, unsigned int virq,
0284 irq_hw_number_t hwirq, msi_alloc_info_t *arg)
0285 {
0286 irq_domain_set_info(domain, virq, arg->devid, info->chip, NULL,
0287 handle_edge_irq, arg->data, "edge");
0288
0289 return 0;
0290 }
0291
0292 static struct msi_domain_ops dmar_msi_domain_ops = {
0293 .msi_init = dmar_msi_init,
0294 };
0295
0296 static struct msi_domain_info dmar_msi_domain_info = {
0297 .ops = &dmar_msi_domain_ops,
0298 .chip = &dmar_msi_controller,
0299 .flags = MSI_FLAG_USE_DEF_DOM_OPS,
0300 };
0301
0302 static struct irq_domain *dmar_get_irq_domain(void)
0303 {
0304 static struct irq_domain *dmar_domain;
0305 static DEFINE_MUTEX(dmar_lock);
0306 struct fwnode_handle *fn;
0307
0308 mutex_lock(&dmar_lock);
0309 if (dmar_domain)
0310 goto out;
0311
0312 fn = irq_domain_alloc_named_fwnode("DMAR-MSI");
0313 if (fn) {
0314 dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info,
0315 x86_vector_domain);
0316 if (!dmar_domain)
0317 irq_domain_free_fwnode(fn);
0318 }
0319 out:
0320 mutex_unlock(&dmar_lock);
0321 return dmar_domain;
0322 }
0323
0324 int dmar_alloc_hwirq(int id, int node, void *arg)
0325 {
0326 struct irq_domain *domain = dmar_get_irq_domain();
0327 struct irq_alloc_info info;
0328
0329 if (!domain)
0330 return -1;
0331
0332 init_irq_alloc_info(&info, NULL);
0333 info.type = X86_IRQ_ALLOC_TYPE_DMAR;
0334 info.devid = id;
0335 info.hwirq = id;
0336 info.data = arg;
0337
0338 return irq_domain_alloc_irqs(domain, 1, node, &info);
0339 }
0340
0341 void dmar_free_hwirq(int irq)
0342 {
0343 irq_domain_free_irqs(irq, 1);
0344 }
0345 #endif
0346
0347 bool arch_restore_msi_irqs(struct pci_dev *dev)
0348 {
0349 return xen_initdom_restore_msi(dev);
0350 }