0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/err.h>
0010 #include <linux/io.h>
0011 #include <linux/irq.h>
0012 #include <linux/irqchip.h>
0013 #include <linux/irqdomain.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/msi.h>
0016 #include <linux/module.h>
0017 #include <linux/moduleparam.h>
0018 #include <linux/of_address.h>
0019 #include <linux/of_irq.h>
0020 #include <linux/of_platform.h>
0021 #include <linux/irqchip/chained_irq.h>
0022 #include <linux/soc/ti/ti_sci_inta_msi.h>
0023 #include <linux/soc/ti/ti_sci_protocol.h>
0024 #include <asm-generic/msi.h>
0025
0026 #define TI_SCI_DEV_ID_MASK 0xffff
0027 #define TI_SCI_DEV_ID_SHIFT 16
0028 #define TI_SCI_IRQ_ID_MASK 0xffff
0029 #define TI_SCI_IRQ_ID_SHIFT 0
0030 #define HWIRQ_TO_DEVID(hwirq) (((hwirq) >> (TI_SCI_DEV_ID_SHIFT)) & \
0031 (TI_SCI_DEV_ID_MASK))
0032 #define HWIRQ_TO_IRQID(hwirq) ((hwirq) & (TI_SCI_IRQ_ID_MASK))
0033 #define TO_HWIRQ(dev, index) ((((dev) & TI_SCI_DEV_ID_MASK) << \
0034 TI_SCI_DEV_ID_SHIFT) | \
0035 ((index) & TI_SCI_IRQ_ID_MASK))
0036
0037 #define MAX_EVENTS_PER_VINT 64
0038 #define VINT_ENABLE_SET_OFFSET 0x0
0039 #define VINT_ENABLE_CLR_OFFSET 0x8
0040 #define VINT_STATUS_OFFSET 0x18
0041 #define VINT_STATUS_MASKED_OFFSET 0x20
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 struct ti_sci_inta_event_desc {
0053 u16 global_event;
0054 u32 hwirq;
0055 u8 vint_bit;
0056 };
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068 struct ti_sci_inta_vint_desc {
0069 struct irq_domain *domain;
0070 struct list_head list;
0071 DECLARE_BITMAP(event_map, MAX_EVENTS_PER_VINT);
0072 struct ti_sci_inta_event_desc events[MAX_EVENTS_PER_VINT];
0073 unsigned int parent_virq;
0074 u16 vint_id;
0075 };
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100 struct ti_sci_inta_irq_domain {
0101 const struct ti_sci_handle *sci;
0102 struct ti_sci_resource *vint;
0103 struct ti_sci_resource *global_event;
0104 struct list_head vint_list;
0105
0106 struct mutex vint_mutex;
0107 void __iomem *base;
0108 struct platform_device *pdev;
0109 u32 ti_sci_id;
0110
0111 int unmapped_cnt;
0112 u16 *unmapped_dev_ids;
0113 };
0114
0115 #define to_vint_desc(e, i) container_of(e, struct ti_sci_inta_vint_desc, \
0116 events[i])
0117
0118 static u16 ti_sci_inta_get_dev_id(struct ti_sci_inta_irq_domain *inta, u32 hwirq)
0119 {
0120 u16 dev_id = HWIRQ_TO_DEVID(hwirq);
0121 int i;
0122
0123 if (inta->unmapped_cnt == 0)
0124 return dev_id;
0125
0126
0127
0128
0129
0130
0131 for (i = 0; i < inta->unmapped_cnt; i++) {
0132 if (dev_id == inta->unmapped_dev_ids[i]) {
0133 dev_id = inta->ti_sci_id;
0134 break;
0135 }
0136 }
0137
0138 return dev_id;
0139 }
0140
0141
0142
0143
0144
0145 static void ti_sci_inta_irq_handler(struct irq_desc *desc)
0146 {
0147 struct ti_sci_inta_vint_desc *vint_desc;
0148 struct ti_sci_inta_irq_domain *inta;
0149 struct irq_domain *domain;
0150 unsigned int bit;
0151 unsigned long val;
0152
0153 vint_desc = irq_desc_get_handler_data(desc);
0154 domain = vint_desc->domain;
0155 inta = domain->host_data;
0156
0157 chained_irq_enter(irq_desc_get_chip(desc), desc);
0158
0159 val = readq_relaxed(inta->base + vint_desc->vint_id * 0x1000 +
0160 VINT_STATUS_MASKED_OFFSET);
0161
0162 for_each_set_bit(bit, &val, MAX_EVENTS_PER_VINT)
0163 generic_handle_domain_irq(domain, vint_desc->events[bit].hwirq);
0164
0165 chained_irq_exit(irq_desc_get_chip(desc), desc);
0166 }
0167
0168
0169
0170
0171
0172
0173
0174
0175 static int ti_sci_inta_xlate_irq(struct ti_sci_inta_irq_domain *inta,
0176 u16 vint_id)
0177 {
0178 struct device_node *np = dev_of_node(&inta->pdev->dev);
0179 u32 base, parent_base, size;
0180 const __be32 *range;
0181 int len;
0182
0183 range = of_get_property(np, "ti,interrupt-ranges", &len);
0184 if (!range)
0185 return vint_id;
0186
0187 for (len /= sizeof(*range); len >= 3; len -= 3) {
0188 base = be32_to_cpu(*range++);
0189 parent_base = be32_to_cpu(*range++);
0190 size = be32_to_cpu(*range++);
0191
0192 if (base <= vint_id && vint_id < base + size)
0193 return vint_id - base + parent_base;
0194 }
0195
0196 return -ENOENT;
0197 }
0198
0199
0200
0201
0202
0203
0204
0205 static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_domain *domain)
0206 {
0207 struct ti_sci_inta_irq_domain *inta = domain->host_data;
0208 struct ti_sci_inta_vint_desc *vint_desc;
0209 struct irq_fwspec parent_fwspec;
0210 struct device_node *parent_node;
0211 unsigned int parent_virq;
0212 int p_hwirq, ret;
0213 u16 vint_id;
0214
0215 vint_id = ti_sci_get_free_resource(inta->vint);
0216 if (vint_id == TI_SCI_RESOURCE_NULL)
0217 return ERR_PTR(-EINVAL);
0218
0219 p_hwirq = ti_sci_inta_xlate_irq(inta, vint_id);
0220 if (p_hwirq < 0) {
0221 ret = p_hwirq;
0222 goto free_vint;
0223 }
0224
0225 vint_desc = kzalloc(sizeof(*vint_desc), GFP_KERNEL);
0226 if (!vint_desc) {
0227 ret = -ENOMEM;
0228 goto free_vint;
0229 }
0230
0231 vint_desc->domain = domain;
0232 vint_desc->vint_id = vint_id;
0233 INIT_LIST_HEAD(&vint_desc->list);
0234
0235 parent_node = of_irq_find_parent(dev_of_node(&inta->pdev->dev));
0236 parent_fwspec.fwnode = of_node_to_fwnode(parent_node);
0237
0238 if (of_device_is_compatible(parent_node, "arm,gic-v3")) {
0239
0240 parent_fwspec.param_count = 3;
0241 parent_fwspec.param[0] = 0;
0242 parent_fwspec.param[1] = p_hwirq - 32;
0243 parent_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
0244 } else {
0245
0246 parent_fwspec.param_count = 1;
0247 parent_fwspec.param[0] = p_hwirq;
0248 }
0249
0250 parent_virq = irq_create_fwspec_mapping(&parent_fwspec);
0251 if (parent_virq == 0) {
0252 dev_err(&inta->pdev->dev, "Parent IRQ allocation failed\n");
0253 ret = -EINVAL;
0254 goto free_vint_desc;
0255
0256 }
0257 vint_desc->parent_virq = parent_virq;
0258
0259 list_add_tail(&vint_desc->list, &inta->vint_list);
0260 irq_set_chained_handler_and_data(vint_desc->parent_virq,
0261 ti_sci_inta_irq_handler, vint_desc);
0262
0263 return vint_desc;
0264 free_vint_desc:
0265 kfree(vint_desc);
0266 free_vint:
0267 ti_sci_release_resource(inta->vint, vint_id);
0268 return ERR_PTR(ret);
0269 }
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279 static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_event(struct ti_sci_inta_vint_desc *vint_desc,
0280 u16 free_bit,
0281 u32 hwirq)
0282 {
0283 struct ti_sci_inta_irq_domain *inta = vint_desc->domain->host_data;
0284 struct ti_sci_inta_event_desc *event_desc;
0285 u16 dev_id, dev_index;
0286 int err;
0287
0288 dev_id = ti_sci_inta_get_dev_id(inta, hwirq);
0289 dev_index = HWIRQ_TO_IRQID(hwirq);
0290
0291 event_desc = &vint_desc->events[free_bit];
0292 event_desc->hwirq = hwirq;
0293 event_desc->vint_bit = free_bit;
0294 event_desc->global_event = ti_sci_get_free_resource(inta->global_event);
0295 if (event_desc->global_event == TI_SCI_RESOURCE_NULL)
0296 return ERR_PTR(-EINVAL);
0297
0298 err = inta->sci->ops.rm_irq_ops.set_event_map(inta->sci,
0299 dev_id, dev_index,
0300 inta->ti_sci_id,
0301 vint_desc->vint_id,
0302 event_desc->global_event,
0303 free_bit);
0304 if (err)
0305 goto free_global_event;
0306
0307 return event_desc;
0308 free_global_event:
0309 ti_sci_release_resource(inta->global_event, event_desc->global_event);
0310 return ERR_PTR(err);
0311 }
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324 static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_irq(struct irq_domain *domain,
0325 u32 hwirq)
0326 {
0327 struct ti_sci_inta_irq_domain *inta = domain->host_data;
0328 struct ti_sci_inta_vint_desc *vint_desc = NULL;
0329 struct ti_sci_inta_event_desc *event_desc;
0330 u16 free_bit;
0331
0332 mutex_lock(&inta->vint_mutex);
0333 list_for_each_entry(vint_desc, &inta->vint_list, list) {
0334 free_bit = find_first_zero_bit(vint_desc->event_map,
0335 MAX_EVENTS_PER_VINT);
0336 if (free_bit != MAX_EVENTS_PER_VINT) {
0337 set_bit(free_bit, vint_desc->event_map);
0338 goto alloc_event;
0339 }
0340 }
0341
0342
0343 vint_desc = ti_sci_inta_alloc_parent_irq(domain);
0344 if (IS_ERR(vint_desc)) {
0345 event_desc = ERR_CAST(vint_desc);
0346 goto unlock;
0347 }
0348
0349 free_bit = find_first_zero_bit(vint_desc->event_map,
0350 MAX_EVENTS_PER_VINT);
0351 set_bit(free_bit, vint_desc->event_map);
0352
0353 alloc_event:
0354 event_desc = ti_sci_inta_alloc_event(vint_desc, free_bit, hwirq);
0355 if (IS_ERR(event_desc))
0356 clear_bit(free_bit, vint_desc->event_map);
0357
0358 unlock:
0359 mutex_unlock(&inta->vint_mutex);
0360 return event_desc;
0361 }
0362
0363
0364
0365
0366
0367
0368 static void ti_sci_inta_free_parent_irq(struct ti_sci_inta_irq_domain *inta,
0369 struct ti_sci_inta_vint_desc *vint_desc)
0370 {
0371 if (find_first_bit(vint_desc->event_map, MAX_EVENTS_PER_VINT) == MAX_EVENTS_PER_VINT) {
0372 list_del(&vint_desc->list);
0373 ti_sci_release_resource(inta->vint, vint_desc->vint_id);
0374 irq_dispose_mapping(vint_desc->parent_virq);
0375 kfree(vint_desc);
0376 }
0377 }
0378
0379
0380
0381
0382
0383
0384 static void ti_sci_inta_free_irq(struct ti_sci_inta_event_desc *event_desc,
0385 u32 hwirq)
0386 {
0387 struct ti_sci_inta_vint_desc *vint_desc;
0388 struct ti_sci_inta_irq_domain *inta;
0389 u16 dev_id;
0390
0391 vint_desc = to_vint_desc(event_desc, event_desc->vint_bit);
0392 inta = vint_desc->domain->host_data;
0393 dev_id = ti_sci_inta_get_dev_id(inta, hwirq);
0394
0395 mutex_lock(&inta->vint_mutex);
0396 inta->sci->ops.rm_irq_ops.free_event_map(inta->sci,
0397 dev_id, HWIRQ_TO_IRQID(hwirq),
0398 inta->ti_sci_id,
0399 vint_desc->vint_id,
0400 event_desc->global_event,
0401 event_desc->vint_bit);
0402
0403 clear_bit(event_desc->vint_bit, vint_desc->event_map);
0404 ti_sci_release_resource(inta->global_event, event_desc->global_event);
0405 event_desc->global_event = TI_SCI_RESOURCE_NULL;
0406 event_desc->hwirq = 0;
0407
0408 ti_sci_inta_free_parent_irq(inta, vint_desc);
0409 mutex_unlock(&inta->vint_mutex);
0410 }
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423 static int ti_sci_inta_request_resources(struct irq_data *data)
0424 {
0425 struct ti_sci_inta_event_desc *event_desc;
0426
0427 event_desc = ti_sci_inta_alloc_irq(data->domain, data->hwirq);
0428 if (IS_ERR(event_desc))
0429 return PTR_ERR(event_desc);
0430
0431 data->chip_data = event_desc;
0432
0433 return 0;
0434 }
0435
0436
0437
0438
0439
0440
0441
0442
0443 static void ti_sci_inta_release_resources(struct irq_data *data)
0444 {
0445 struct ti_sci_inta_event_desc *event_desc;
0446
0447 event_desc = irq_data_get_irq_chip_data(data);
0448 ti_sci_inta_free_irq(event_desc, data->hwirq);
0449 }
0450
0451
0452
0453
0454
0455
0456 static void ti_sci_inta_manage_event(struct irq_data *data, u32 offset)
0457 {
0458 struct ti_sci_inta_event_desc *event_desc;
0459 struct ti_sci_inta_vint_desc *vint_desc;
0460 struct ti_sci_inta_irq_domain *inta;
0461
0462 event_desc = irq_data_get_irq_chip_data(data);
0463 vint_desc = to_vint_desc(event_desc, event_desc->vint_bit);
0464 inta = data->domain->host_data;
0465
0466 writeq_relaxed(BIT(event_desc->vint_bit),
0467 inta->base + vint_desc->vint_id * 0x1000 + offset);
0468 }
0469
0470
0471
0472
0473
0474 static void ti_sci_inta_mask_irq(struct irq_data *data)
0475 {
0476 ti_sci_inta_manage_event(data, VINT_ENABLE_CLR_OFFSET);
0477 }
0478
0479
0480
0481
0482
0483 static void ti_sci_inta_unmask_irq(struct irq_data *data)
0484 {
0485 ti_sci_inta_manage_event(data, VINT_ENABLE_SET_OFFSET);
0486 }
0487
0488
0489
0490
0491
0492 static void ti_sci_inta_ack_irq(struct irq_data *data)
0493 {
0494
0495
0496
0497
0498 if (irqd_get_trigger_type(data) != IRQF_TRIGGER_HIGH)
0499 ti_sci_inta_manage_event(data, VINT_STATUS_OFFSET);
0500 }
0501
0502 static int ti_sci_inta_set_affinity(struct irq_data *d,
0503 const struct cpumask *mask_val, bool force)
0504 {
0505 return -EINVAL;
0506 }
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517 static int ti_sci_inta_set_type(struct irq_data *data, unsigned int type)
0518 {
0519
0520
0521
0522
0523 switch (type & IRQ_TYPE_SENSE_MASK) {
0524 case IRQF_TRIGGER_HIGH:
0525 irq_set_handler_locked(data, handle_level_irq);
0526 return 0;
0527 case IRQF_TRIGGER_RISING:
0528 return 0;
0529 default:
0530 return -EINVAL;
0531 }
0532 }
0533
0534 static struct irq_chip ti_sci_inta_irq_chip = {
0535 .name = "INTA",
0536 .irq_ack = ti_sci_inta_ack_irq,
0537 .irq_mask = ti_sci_inta_mask_irq,
0538 .irq_set_type = ti_sci_inta_set_type,
0539 .irq_unmask = ti_sci_inta_unmask_irq,
0540 .irq_set_affinity = ti_sci_inta_set_affinity,
0541 .irq_request_resources = ti_sci_inta_request_resources,
0542 .irq_release_resources = ti_sci_inta_release_resources,
0543 };
0544
0545
0546
0547
0548
0549
0550
0551 static void ti_sci_inta_irq_domain_free(struct irq_domain *domain,
0552 unsigned int virq, unsigned int nr_irqs)
0553 {
0554 struct irq_data *data = irq_domain_get_irq_data(domain, virq);
0555
0556 irq_domain_reset_irq_data(data);
0557 }
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570 static int ti_sci_inta_irq_domain_alloc(struct irq_domain *domain,
0571 unsigned int virq, unsigned int nr_irqs,
0572 void *data)
0573 {
0574 msi_alloc_info_t *arg = data;
0575
0576 irq_domain_set_info(domain, virq, arg->hwirq, &ti_sci_inta_irq_chip,
0577 NULL, handle_edge_irq, NULL, NULL);
0578
0579 return 0;
0580 }
0581
0582 static const struct irq_domain_ops ti_sci_inta_irq_domain_ops = {
0583 .free = ti_sci_inta_irq_domain_free,
0584 .alloc = ti_sci_inta_irq_domain_alloc,
0585 };
0586
0587 static struct irq_chip ti_sci_inta_msi_irq_chip = {
0588 .name = "MSI-INTA",
0589 .flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
0590 };
0591
0592 static void ti_sci_inta_msi_set_desc(msi_alloc_info_t *arg,
0593 struct msi_desc *desc)
0594 {
0595 struct platform_device *pdev = to_platform_device(desc->dev);
0596
0597 arg->desc = desc;
0598 arg->hwirq = TO_HWIRQ(pdev->id, desc->msi_index);
0599 }
0600
0601 static struct msi_domain_ops ti_sci_inta_msi_ops = {
0602 .set_desc = ti_sci_inta_msi_set_desc,
0603 };
0604
0605 static struct msi_domain_info ti_sci_inta_msi_domain_info = {
0606 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
0607 MSI_FLAG_LEVEL_CAPABLE),
0608 .ops = &ti_sci_inta_msi_ops,
0609 .chip = &ti_sci_inta_msi_irq_chip,
0610 };
0611
0612 static int ti_sci_inta_get_unmapped_sources(struct ti_sci_inta_irq_domain *inta)
0613 {
0614 struct device *dev = &inta->pdev->dev;
0615 struct device_node *node = dev_of_node(dev);
0616 struct of_phandle_iterator it;
0617 int count, err, ret, i;
0618
0619 count = of_count_phandle_with_args(node, "ti,unmapped-event-sources", NULL);
0620 if (count <= 0)
0621 return 0;
0622
0623 inta->unmapped_dev_ids = devm_kcalloc(dev, count,
0624 sizeof(*inta->unmapped_dev_ids),
0625 GFP_KERNEL);
0626 if (!inta->unmapped_dev_ids)
0627 return -ENOMEM;
0628
0629 i = 0;
0630 of_for_each_phandle(&it, err, node, "ti,unmapped-event-sources", NULL, 0) {
0631 u32 dev_id;
0632
0633 ret = of_property_read_u32(it.node, "ti,sci-dev-id", &dev_id);
0634 if (ret) {
0635 dev_err(dev, "ti,sci-dev-id read failure for %pOFf\n", it.node);
0636 of_node_put(it.node);
0637 return ret;
0638 }
0639 inta->unmapped_dev_ids[i++] = dev_id;
0640 }
0641
0642 inta->unmapped_cnt = count;
0643
0644 return 0;
0645 }
0646
0647 static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
0648 {
0649 struct irq_domain *parent_domain, *domain, *msi_domain;
0650 struct device_node *parent_node, *node;
0651 struct ti_sci_inta_irq_domain *inta;
0652 struct device *dev = &pdev->dev;
0653 int ret;
0654
0655 node = dev_of_node(dev);
0656 parent_node = of_irq_find_parent(node);
0657 if (!parent_node) {
0658 dev_err(dev, "Failed to get IRQ parent node\n");
0659 return -ENODEV;
0660 }
0661
0662 parent_domain = irq_find_host(parent_node);
0663 if (!parent_domain)
0664 return -EPROBE_DEFER;
0665
0666 inta = devm_kzalloc(dev, sizeof(*inta), GFP_KERNEL);
0667 if (!inta)
0668 return -ENOMEM;
0669
0670 inta->pdev = pdev;
0671 inta->sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
0672 if (IS_ERR(inta->sci))
0673 return dev_err_probe(dev, PTR_ERR(inta->sci),
0674 "ti,sci read fail\n");
0675
0676 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &inta->ti_sci_id);
0677 if (ret) {
0678 dev_err(dev, "missing 'ti,sci-dev-id' property\n");
0679 return -EINVAL;
0680 }
0681
0682 inta->vint = devm_ti_sci_get_resource(inta->sci, dev, inta->ti_sci_id,
0683 TI_SCI_RESASG_SUBTYPE_IA_VINT);
0684 if (IS_ERR(inta->vint)) {
0685 dev_err(dev, "VINT resource allocation failed\n");
0686 return PTR_ERR(inta->vint);
0687 }
0688
0689 inta->global_event = devm_ti_sci_get_resource(inta->sci, dev, inta->ti_sci_id,
0690 TI_SCI_RESASG_SUBTYPE_GLOBAL_EVENT_SEVT);
0691 if (IS_ERR(inta->global_event)) {
0692 dev_err(dev, "Global event resource allocation failed\n");
0693 return PTR_ERR(inta->global_event);
0694 }
0695
0696 inta->base = devm_platform_ioremap_resource(pdev, 0);
0697 if (IS_ERR(inta->base))
0698 return PTR_ERR(inta->base);
0699
0700 ret = ti_sci_inta_get_unmapped_sources(inta);
0701 if (ret)
0702 return ret;
0703
0704 domain = irq_domain_add_linear(dev_of_node(dev),
0705 ti_sci_get_num_resources(inta->vint),
0706 &ti_sci_inta_irq_domain_ops, inta);
0707 if (!domain) {
0708 dev_err(dev, "Failed to allocate IRQ domain\n");
0709 return -ENOMEM;
0710 }
0711
0712 msi_domain = ti_sci_inta_msi_create_irq_domain(of_node_to_fwnode(node),
0713 &ti_sci_inta_msi_domain_info,
0714 domain);
0715 if (!msi_domain) {
0716 irq_domain_remove(domain);
0717 dev_err(dev, "Failed to allocate msi domain\n");
0718 return -ENOMEM;
0719 }
0720
0721 INIT_LIST_HEAD(&inta->vint_list);
0722 mutex_init(&inta->vint_mutex);
0723
0724 dev_info(dev, "Interrupt Aggregator domain %d created\n", inta->ti_sci_id);
0725
0726 return 0;
0727 }
0728
0729 static const struct of_device_id ti_sci_inta_irq_domain_of_match[] = {
0730 { .compatible = "ti,sci-inta", },
0731 { },
0732 };
0733 MODULE_DEVICE_TABLE(of, ti_sci_inta_irq_domain_of_match);
0734
0735 static struct platform_driver ti_sci_inta_irq_domain_driver = {
0736 .probe = ti_sci_inta_irq_domain_probe,
0737 .driver = {
0738 .name = "ti-sci-inta",
0739 .of_match_table = ti_sci_inta_irq_domain_of_match,
0740 },
0741 };
0742 module_platform_driver(ti_sci_inta_irq_domain_driver);
0743
0744 MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ti.com>");
0745 MODULE_DESCRIPTION("K3 Interrupt Aggregator driver over TI SCI protocol");
0746 MODULE_LICENSE("GPL v2");