0001
0002
0003
0004
0005
0006 #include <linux/of_address.h>
0007 #include <linux/of_irq.h>
0008 #include <linux/slab.h>
0009 #include <linux/irqchip.h>
0010 #include <linux/syscore_ops.h>
0011
0012 #define IMR_NUM 4
0013 #define GPC_MAX_IRQS (IMR_NUM * 32)
0014
0015 #define GPC_IMR1_CORE0 0x30
0016 #define GPC_IMR1_CORE1 0x40
0017 #define GPC_IMR1_CORE2 0x1c0
0018 #define GPC_IMR1_CORE3 0x1d0
0019
0020
0021 struct gpcv2_irqchip_data {
0022 struct raw_spinlock rlock;
0023 void __iomem *gpc_base;
0024 u32 wakeup_sources[IMR_NUM];
0025 u32 saved_irq_mask[IMR_NUM];
0026 u32 cpu2wakeup;
0027 };
0028
0029 static struct gpcv2_irqchip_data *imx_gpcv2_instance __ro_after_init;
0030
0031 static void __iomem *gpcv2_idx_to_reg(struct gpcv2_irqchip_data *cd, int i)
0032 {
0033 return cd->gpc_base + cd->cpu2wakeup + i * 4;
0034 }
0035
0036 static int gpcv2_wakeup_source_save(void)
0037 {
0038 struct gpcv2_irqchip_data *cd;
0039 void __iomem *reg;
0040 int i;
0041
0042 cd = imx_gpcv2_instance;
0043 if (!cd)
0044 return 0;
0045
0046 for (i = 0; i < IMR_NUM; i++) {
0047 reg = gpcv2_idx_to_reg(cd, i);
0048 cd->saved_irq_mask[i] = readl_relaxed(reg);
0049 writel_relaxed(cd->wakeup_sources[i], reg);
0050 }
0051
0052 return 0;
0053 }
0054
0055 static void gpcv2_wakeup_source_restore(void)
0056 {
0057 struct gpcv2_irqchip_data *cd;
0058 int i;
0059
0060 cd = imx_gpcv2_instance;
0061 if (!cd)
0062 return;
0063
0064 for (i = 0; i < IMR_NUM; i++)
0065 writel_relaxed(cd->saved_irq_mask[i], gpcv2_idx_to_reg(cd, i));
0066 }
0067
0068 static struct syscore_ops imx_gpcv2_syscore_ops = {
0069 .suspend = gpcv2_wakeup_source_save,
0070 .resume = gpcv2_wakeup_source_restore,
0071 };
0072
0073 static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on)
0074 {
0075 struct gpcv2_irqchip_data *cd = d->chip_data;
0076 unsigned int idx = d->hwirq / 32;
0077 unsigned long flags;
0078 u32 mask, val;
0079
0080 raw_spin_lock_irqsave(&cd->rlock, flags);
0081 mask = BIT(d->hwirq % 32);
0082 val = cd->wakeup_sources[idx];
0083
0084 cd->wakeup_sources[idx] = on ? (val & ~mask) : (val | mask);
0085 raw_spin_unlock_irqrestore(&cd->rlock, flags);
0086
0087
0088
0089
0090
0091
0092 return 0;
0093 }
0094
0095 static void imx_gpcv2_irq_unmask(struct irq_data *d)
0096 {
0097 struct gpcv2_irqchip_data *cd = d->chip_data;
0098 void __iomem *reg;
0099 u32 val;
0100
0101 raw_spin_lock(&cd->rlock);
0102 reg = gpcv2_idx_to_reg(cd, d->hwirq / 32);
0103 val = readl_relaxed(reg);
0104 val &= ~BIT(d->hwirq % 32);
0105 writel_relaxed(val, reg);
0106 raw_spin_unlock(&cd->rlock);
0107
0108 irq_chip_unmask_parent(d);
0109 }
0110
0111 static void imx_gpcv2_irq_mask(struct irq_data *d)
0112 {
0113 struct gpcv2_irqchip_data *cd = d->chip_data;
0114 void __iomem *reg;
0115 u32 val;
0116
0117 raw_spin_lock(&cd->rlock);
0118 reg = gpcv2_idx_to_reg(cd, d->hwirq / 32);
0119 val = readl_relaxed(reg);
0120 val |= BIT(d->hwirq % 32);
0121 writel_relaxed(val, reg);
0122 raw_spin_unlock(&cd->rlock);
0123
0124 irq_chip_mask_parent(d);
0125 }
0126
0127 static struct irq_chip gpcv2_irqchip_data_chip = {
0128 .name = "GPCv2",
0129 .irq_eoi = irq_chip_eoi_parent,
0130 .irq_mask = imx_gpcv2_irq_mask,
0131 .irq_unmask = imx_gpcv2_irq_unmask,
0132 .irq_set_wake = imx_gpcv2_irq_set_wake,
0133 .irq_retrigger = irq_chip_retrigger_hierarchy,
0134 .irq_set_type = irq_chip_set_type_parent,
0135 #ifdef CONFIG_SMP
0136 .irq_set_affinity = irq_chip_set_affinity_parent,
0137 #endif
0138 };
0139
0140 static int imx_gpcv2_domain_translate(struct irq_domain *d,
0141 struct irq_fwspec *fwspec,
0142 unsigned long *hwirq,
0143 unsigned int *type)
0144 {
0145 if (is_of_node(fwspec->fwnode)) {
0146 if (fwspec->param_count != 3)
0147 return -EINVAL;
0148
0149
0150 if (fwspec->param[0] != 0)
0151 return -EINVAL;
0152
0153 *hwirq = fwspec->param[1];
0154 *type = fwspec->param[2];
0155 return 0;
0156 }
0157
0158 return -EINVAL;
0159 }
0160
0161 static int imx_gpcv2_domain_alloc(struct irq_domain *domain,
0162 unsigned int irq, unsigned int nr_irqs,
0163 void *data)
0164 {
0165 struct irq_fwspec *fwspec = data;
0166 struct irq_fwspec parent_fwspec;
0167 irq_hw_number_t hwirq;
0168 unsigned int type;
0169 int err;
0170 int i;
0171
0172 err = imx_gpcv2_domain_translate(domain, fwspec, &hwirq, &type);
0173 if (err)
0174 return err;
0175
0176 if (hwirq >= GPC_MAX_IRQS)
0177 return -EINVAL;
0178
0179 for (i = 0; i < nr_irqs; i++) {
0180 irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i,
0181 &gpcv2_irqchip_data_chip, domain->host_data);
0182 }
0183
0184 parent_fwspec = *fwspec;
0185 parent_fwspec.fwnode = domain->parent->fwnode;
0186 return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs,
0187 &parent_fwspec);
0188 }
0189
0190 static const struct irq_domain_ops gpcv2_irqchip_data_domain_ops = {
0191 .translate = imx_gpcv2_domain_translate,
0192 .alloc = imx_gpcv2_domain_alloc,
0193 .free = irq_domain_free_irqs_common,
0194 };
0195
0196 static const struct of_device_id gpcv2_of_match[] = {
0197 { .compatible = "fsl,imx7d-gpc", .data = (const void *) 2 },
0198 { .compatible = "fsl,imx8mq-gpc", .data = (const void *) 4 },
0199 { }
0200 };
0201
0202 static int __init imx_gpcv2_irqchip_init(struct device_node *node,
0203 struct device_node *parent)
0204 {
0205 struct irq_domain *parent_domain, *domain;
0206 struct gpcv2_irqchip_data *cd;
0207 const struct of_device_id *id;
0208 unsigned long core_num;
0209 int i;
0210
0211 if (!parent) {
0212 pr_err("%pOF: no parent, giving up\n", node);
0213 return -ENODEV;
0214 }
0215
0216 id = of_match_node(gpcv2_of_match, node);
0217 if (!id) {
0218 pr_err("%pOF: unknown compatibility string\n", node);
0219 return -ENODEV;
0220 }
0221
0222 core_num = (unsigned long)id->data;
0223
0224 parent_domain = irq_find_host(parent);
0225 if (!parent_domain) {
0226 pr_err("%pOF: unable to get parent domain\n", node);
0227 return -ENXIO;
0228 }
0229
0230 cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL);
0231 if (!cd)
0232 return -ENOMEM;
0233
0234 raw_spin_lock_init(&cd->rlock);
0235
0236 cd->gpc_base = of_iomap(node, 0);
0237 if (!cd->gpc_base) {
0238 pr_err("%pOF: unable to map gpc registers\n", node);
0239 kfree(cd);
0240 return -ENOMEM;
0241 }
0242
0243 domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
0244 node, &gpcv2_irqchip_data_domain_ops, cd);
0245 if (!domain) {
0246 iounmap(cd->gpc_base);
0247 kfree(cd);
0248 return -ENOMEM;
0249 }
0250 irq_set_default_host(domain);
0251
0252
0253 for (i = 0; i < IMR_NUM; i++) {
0254 void __iomem *reg = cd->gpc_base + i * 4;
0255
0256 switch (core_num) {
0257 case 4:
0258 writel_relaxed(~0, reg + GPC_IMR1_CORE2);
0259 writel_relaxed(~0, reg + GPC_IMR1_CORE3);
0260 fallthrough;
0261 case 2:
0262 writel_relaxed(~0, reg + GPC_IMR1_CORE0);
0263 writel_relaxed(~0, reg + GPC_IMR1_CORE1);
0264 }
0265 cd->wakeup_sources[i] = ~0;
0266 }
0267
0268
0269 cd->cpu2wakeup = GPC_IMR1_CORE0;
0270
0271
0272
0273
0274
0275
0276 writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup);
0277
0278 imx_gpcv2_instance = cd;
0279 register_syscore_ops(&imx_gpcv2_syscore_ops);
0280
0281
0282
0283
0284
0285 of_node_clear_flag(node, OF_POPULATED);
0286 return 0;
0287 }
0288
0289 IRQCHIP_DECLARE(imx_gpcv2_imx7d, "fsl,imx7d-gpc", imx_gpcv2_irqchip_init);
0290 IRQCHIP_DECLARE(imx_gpcv2_imx8mq, "fsl,imx8mq-gpc", imx_gpcv2_irqchip_init);