0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/err.h>
0009 #include <linux/export.h>
0010 #include <linux/init.h>
0011 #include <linux/io.h>
0012 #include <linux/slab.h>
0013 #include <linux/syscore_ops.h>
0014 #include <linux/irqdomain.h>
0015 #include <linux/irqchip.h>
0016 #include <linux/irqchip/chained_irq.h>
0017 #include <linux/interrupt.h>
0018 #include <linux/of_address.h>
0019 #include <linux/of_irq.h>
0020
0021 #define COMBINER_ENABLE_SET 0x0
0022 #define COMBINER_ENABLE_CLEAR 0x4
0023 #define COMBINER_INT_STATUS 0xC
0024
0025 #define IRQ_IN_COMBINER 8
0026
0027 static DEFINE_SPINLOCK(irq_controller_lock);
0028
0029 struct combiner_chip_data {
0030 unsigned int hwirq_offset;
0031 unsigned int irq_mask;
0032 void __iomem *base;
0033 unsigned int parent_irq;
0034 #ifdef CONFIG_PM
0035 u32 pm_save;
0036 #endif
0037 };
0038
0039 static struct combiner_chip_data *combiner_data;
0040 static struct irq_domain *combiner_irq_domain;
0041 static unsigned int max_nr = 20;
0042
0043 static inline void __iomem *combiner_base(struct irq_data *data)
0044 {
0045 struct combiner_chip_data *combiner_data =
0046 irq_data_get_irq_chip_data(data);
0047
0048 return combiner_data->base;
0049 }
0050
0051 static void combiner_mask_irq(struct irq_data *data)
0052 {
0053 u32 mask = 1 << (data->hwirq % 32);
0054
0055 writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
0056 }
0057
0058 static void combiner_unmask_irq(struct irq_data *data)
0059 {
0060 u32 mask = 1 << (data->hwirq % 32);
0061
0062 writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_SET);
0063 }
0064
0065 static void combiner_handle_cascade_irq(struct irq_desc *desc)
0066 {
0067 struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
0068 struct irq_chip *chip = irq_desc_get_chip(desc);
0069 unsigned int combiner_irq;
0070 unsigned long status;
0071 int ret;
0072
0073 chained_irq_enter(chip, desc);
0074
0075 spin_lock(&irq_controller_lock);
0076 status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS);
0077 spin_unlock(&irq_controller_lock);
0078 status &= chip_data->irq_mask;
0079
0080 if (status == 0)
0081 goto out;
0082
0083 combiner_irq = chip_data->hwirq_offset + __ffs(status);
0084 ret = generic_handle_domain_irq(combiner_irq_domain, combiner_irq);
0085 if (unlikely(ret))
0086 handle_bad_irq(desc);
0087
0088 out:
0089 chained_irq_exit(chip, desc);
0090 }
0091
0092 #ifdef CONFIG_SMP
0093 static int combiner_set_affinity(struct irq_data *d,
0094 const struct cpumask *mask_val, bool force)
0095 {
0096 struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
0097 struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
0098 struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);
0099
0100 if (chip && chip->irq_set_affinity)
0101 return chip->irq_set_affinity(data, mask_val, force);
0102 else
0103 return -EINVAL;
0104 }
0105 #endif
0106
0107 static struct irq_chip combiner_chip = {
0108 .name = "COMBINER",
0109 .irq_mask = combiner_mask_irq,
0110 .irq_unmask = combiner_unmask_irq,
0111 #ifdef CONFIG_SMP
0112 .irq_set_affinity = combiner_set_affinity,
0113 #endif
0114 };
0115
0116 static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
0117 unsigned int irq)
0118 {
0119 irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq,
0120 combiner_data);
0121 }
0122
0123 static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
0124 unsigned int combiner_nr,
0125 void __iomem *base, unsigned int irq)
0126 {
0127 combiner_data->base = base;
0128 combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER;
0129 combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3);
0130 combiner_data->parent_irq = irq;
0131
0132
0133 writel_relaxed(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
0134 }
0135
0136 static int combiner_irq_domain_xlate(struct irq_domain *d,
0137 struct device_node *controller,
0138 const u32 *intspec, unsigned int intsize,
0139 unsigned long *out_hwirq,
0140 unsigned int *out_type)
0141 {
0142 if (irq_domain_get_of_node(d) != controller)
0143 return -EINVAL;
0144
0145 if (intsize < 2)
0146 return -EINVAL;
0147
0148 *out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1];
0149 *out_type = 0;
0150
0151 return 0;
0152 }
0153
0154 static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
0155 irq_hw_number_t hw)
0156 {
0157 struct combiner_chip_data *combiner_data = d->host_data;
0158
0159 irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
0160 irq_set_chip_data(irq, &combiner_data[hw >> 3]);
0161 irq_set_probe(irq);
0162
0163 return 0;
0164 }
0165
0166 static const struct irq_domain_ops combiner_irq_domain_ops = {
0167 .xlate = combiner_irq_domain_xlate,
0168 .map = combiner_irq_domain_map,
0169 };
0170
0171 static void __init combiner_init(void __iomem *combiner_base,
0172 struct device_node *np)
0173 {
0174 int i, irq;
0175 unsigned int nr_irq;
0176
0177 nr_irq = max_nr * IRQ_IN_COMBINER;
0178
0179 combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
0180 if (!combiner_data)
0181 return;
0182
0183 combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
0184 &combiner_irq_domain_ops, combiner_data);
0185 if (WARN_ON(!combiner_irq_domain)) {
0186 pr_warn("%s: irq domain init failed\n", __func__);
0187 return;
0188 }
0189
0190 for (i = 0; i < max_nr; i++) {
0191 irq = irq_of_parse_and_map(np, i);
0192
0193 combiner_init_one(&combiner_data[i], i,
0194 combiner_base + (i >> 2) * 0x10, irq);
0195 combiner_cascade_irq(&combiner_data[i], irq);
0196 }
0197 }
0198
0199 #ifdef CONFIG_PM
0200
0201
0202
0203
0204
0205
0206
0207
0208 static int combiner_suspend(void)
0209 {
0210 int i;
0211
0212 for (i = 0; i < max_nr; i++)
0213 combiner_data[i].pm_save =
0214 readl_relaxed(combiner_data[i].base + COMBINER_ENABLE_SET);
0215
0216 return 0;
0217 }
0218
0219
0220
0221
0222
0223
0224
0225
0226 static void combiner_resume(void)
0227 {
0228 int i;
0229
0230 for (i = 0; i < max_nr; i++) {
0231 writel_relaxed(combiner_data[i].irq_mask,
0232 combiner_data[i].base + COMBINER_ENABLE_CLEAR);
0233 writel_relaxed(combiner_data[i].pm_save,
0234 combiner_data[i].base + COMBINER_ENABLE_SET);
0235 }
0236 }
0237
0238 #else
0239 #define combiner_suspend NULL
0240 #define combiner_resume NULL
0241 #endif
0242
0243 static struct syscore_ops combiner_syscore_ops = {
0244 .suspend = combiner_suspend,
0245 .resume = combiner_resume,
0246 };
0247
0248 static int __init combiner_of_init(struct device_node *np,
0249 struct device_node *parent)
0250 {
0251 void __iomem *combiner_base;
0252
0253 combiner_base = of_iomap(np, 0);
0254 if (!combiner_base) {
0255 pr_err("%s: failed to map combiner registers\n", __func__);
0256 return -ENXIO;
0257 }
0258
0259 if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
0260 pr_info("%s: number of combiners not specified, "
0261 "setting default as %d.\n",
0262 __func__, max_nr);
0263 }
0264
0265 combiner_init(combiner_base, np);
0266
0267 register_syscore_ops(&combiner_syscore_ops);
0268
0269 return 0;
0270 }
0271 IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
0272 combiner_of_init);