0001
0002
0003
0004
0005 #include <linux/io.h>
0006 #include <linux/irq.h>
0007 #include <linux/irqdomain.h>
0008 #include <linux/of.h>
0009 #include <linux/of_irq.h>
0010 #include <linux/of_address.h>
0011 #include <linux/platform_device.h>
0012 #include <linux/pm_domain.h>
0013 #include <linux/reset.h>
0014 #include <linux/reset-controller.h>
0015 #include <linux/sched.h>
0016 #include <linux/slab.h>
0017 #include <linux/soc/dove/pmu.h>
0018 #include <linux/spinlock.h>
0019
0020 #define NR_PMU_IRQS 7
0021
0022 #define PMC_SW_RST 0x30
0023 #define PMC_IRQ_CAUSE 0x50
0024 #define PMC_IRQ_MASK 0x54
0025
0026 #define PMU_PWR 0x10
0027 #define PMU_ISO 0x58
0028
0029 struct pmu_data {
0030 spinlock_t lock;
0031 struct device_node *of_node;
0032 void __iomem *pmc_base;
0033 void __iomem *pmu_base;
0034 struct irq_chip_generic *irq_gc;
0035 struct irq_domain *irq_domain;
0036 #ifdef CONFIG_RESET_CONTROLLER
0037 struct reset_controller_dev reset;
0038 #endif
0039 };
0040
0041
0042
0043
0044
0045 #ifdef CONFIG_RESET_CONTROLLER
0046 #define rcdev_to_pmu(rcdev) container_of(rcdev, struct pmu_data, reset)
0047
0048 static int pmu_reset_reset(struct reset_controller_dev *rc, unsigned long id)
0049 {
0050 struct pmu_data *pmu = rcdev_to_pmu(rc);
0051 unsigned long flags;
0052 u32 val;
0053
0054 spin_lock_irqsave(&pmu->lock, flags);
0055 val = readl_relaxed(pmu->pmc_base + PMC_SW_RST);
0056 writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST);
0057 writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST);
0058 spin_unlock_irqrestore(&pmu->lock, flags);
0059
0060 return 0;
0061 }
0062
0063 static int pmu_reset_assert(struct reset_controller_dev *rc, unsigned long id)
0064 {
0065 struct pmu_data *pmu = rcdev_to_pmu(rc);
0066 unsigned long flags;
0067 u32 val = ~BIT(id);
0068
0069 spin_lock_irqsave(&pmu->lock, flags);
0070 val &= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
0071 writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
0072 spin_unlock_irqrestore(&pmu->lock, flags);
0073
0074 return 0;
0075 }
0076
0077 static int pmu_reset_deassert(struct reset_controller_dev *rc, unsigned long id)
0078 {
0079 struct pmu_data *pmu = rcdev_to_pmu(rc);
0080 unsigned long flags;
0081 u32 val = BIT(id);
0082
0083 spin_lock_irqsave(&pmu->lock, flags);
0084 val |= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
0085 writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
0086 spin_unlock_irqrestore(&pmu->lock, flags);
0087
0088 return 0;
0089 }
0090
0091 static const struct reset_control_ops pmu_reset_ops = {
0092 .reset = pmu_reset_reset,
0093 .assert = pmu_reset_assert,
0094 .deassert = pmu_reset_deassert,
0095 };
0096
0097 static struct reset_controller_dev pmu_reset __initdata = {
0098 .ops = &pmu_reset_ops,
0099 .owner = THIS_MODULE,
0100 .nr_resets = 32,
0101 };
0102
0103 static void __init pmu_reset_init(struct pmu_data *pmu)
0104 {
0105 int ret;
0106
0107 pmu->reset = pmu_reset;
0108 pmu->reset.of_node = pmu->of_node;
0109
0110 ret = reset_controller_register(&pmu->reset);
0111 if (ret)
0112 pr_err("pmu: %s failed: %d\n", "reset_controller_register", ret);
0113 }
0114 #else
0115 static void __init pmu_reset_init(struct pmu_data *pmu)
0116 {
0117 }
0118 #endif
0119
0120 struct pmu_domain {
0121 struct pmu_data *pmu;
0122 u32 pwr_mask;
0123 u32 rst_mask;
0124 u32 iso_mask;
0125 struct generic_pm_domain base;
0126 };
0127
0128 #define to_pmu_domain(dom) container_of(dom, struct pmu_domain, base)
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143 static int pmu_domain_power_off(struct generic_pm_domain *domain)
0144 {
0145 struct pmu_domain *pmu_dom = to_pmu_domain(domain);
0146 struct pmu_data *pmu = pmu_dom->pmu;
0147 unsigned long flags;
0148 unsigned int val;
0149 void __iomem *pmu_base = pmu->pmu_base;
0150 void __iomem *pmc_base = pmu->pmc_base;
0151
0152 spin_lock_irqsave(&pmu->lock, flags);
0153
0154
0155 if (pmu_dom->iso_mask) {
0156 val = ~pmu_dom->iso_mask;
0157 val &= readl_relaxed(pmu_base + PMU_ISO);
0158 writel_relaxed(val, pmu_base + PMU_ISO);
0159 }
0160
0161
0162 if (pmu_dom->rst_mask) {
0163 val = ~pmu_dom->rst_mask;
0164 val &= readl_relaxed(pmc_base + PMC_SW_RST);
0165 writel_relaxed(val, pmc_base + PMC_SW_RST);
0166 }
0167
0168
0169 val = readl_relaxed(pmu_base + PMU_PWR) | pmu_dom->pwr_mask;
0170 writel_relaxed(val, pmu_base + PMU_PWR);
0171
0172 spin_unlock_irqrestore(&pmu->lock, flags);
0173
0174 return 0;
0175 }
0176
0177 static int pmu_domain_power_on(struct generic_pm_domain *domain)
0178 {
0179 struct pmu_domain *pmu_dom = to_pmu_domain(domain);
0180 struct pmu_data *pmu = pmu_dom->pmu;
0181 unsigned long flags;
0182 unsigned int val;
0183 void __iomem *pmu_base = pmu->pmu_base;
0184 void __iomem *pmc_base = pmu->pmc_base;
0185
0186 spin_lock_irqsave(&pmu->lock, flags);
0187
0188
0189 val = ~pmu_dom->pwr_mask & readl_relaxed(pmu_base + PMU_PWR);
0190 writel_relaxed(val, pmu_base + PMU_PWR);
0191
0192
0193 if (pmu_dom->rst_mask) {
0194 val = pmu_dom->rst_mask;
0195 val |= readl_relaxed(pmc_base + PMC_SW_RST);
0196 writel_relaxed(val, pmc_base + PMC_SW_RST);
0197 }
0198
0199
0200 if (pmu_dom->iso_mask) {
0201 val = pmu_dom->iso_mask;
0202 val |= readl_relaxed(pmu_base + PMU_ISO);
0203 writel_relaxed(val, pmu_base + PMU_ISO);
0204 }
0205
0206 spin_unlock_irqrestore(&pmu->lock, flags);
0207
0208 return 0;
0209 }
0210
0211 static void __pmu_domain_register(struct pmu_domain *domain,
0212 struct device_node *np)
0213 {
0214 unsigned int val = readl_relaxed(domain->pmu->pmu_base + PMU_PWR);
0215
0216 domain->base.power_off = pmu_domain_power_off;
0217 domain->base.power_on = pmu_domain_power_on;
0218
0219 pm_genpd_init(&domain->base, NULL, !(val & domain->pwr_mask));
0220
0221 if (np)
0222 of_genpd_add_provider_simple(np, &domain->base);
0223 }
0224
0225
0226 static void pmu_irq_handler(struct irq_desc *desc)
0227 {
0228 struct pmu_data *pmu = irq_desc_get_handler_data(desc);
0229 struct irq_chip_generic *gc = pmu->irq_gc;
0230 struct irq_domain *domain = pmu->irq_domain;
0231 void __iomem *base = gc->reg_base;
0232 u32 stat = readl_relaxed(base + PMC_IRQ_CAUSE) & gc->mask_cache;
0233 u32 done = ~0;
0234
0235 if (stat == 0) {
0236 handle_bad_irq(desc);
0237 return;
0238 }
0239
0240 while (stat) {
0241 u32 hwirq = fls(stat) - 1;
0242
0243 stat &= ~(1 << hwirq);
0244 done &= ~(1 << hwirq);
0245
0246 generic_handle_irq(irq_find_mapping(domain, hwirq));
0247 }
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260 irq_gc_lock(gc);
0261 done &= readl_relaxed(base + PMC_IRQ_CAUSE);
0262 writel_relaxed(done, base + PMC_IRQ_CAUSE);
0263 irq_gc_unlock(gc);
0264 }
0265
0266 static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq)
0267 {
0268 const char *name = "pmu_irq";
0269 struct irq_chip_generic *gc;
0270 struct irq_domain *domain;
0271 int ret;
0272
0273
0274 writel(0, pmu->pmc_base + PMC_IRQ_MASK);
0275 writel(0, pmu->pmc_base + PMC_IRQ_CAUSE);
0276
0277 domain = irq_domain_add_linear(pmu->of_node, NR_PMU_IRQS,
0278 &irq_generic_chip_ops, NULL);
0279 if (!domain) {
0280 pr_err("%s: unable to add irq domain\n", name);
0281 return -ENOMEM;
0282 }
0283
0284 ret = irq_alloc_domain_generic_chips(domain, NR_PMU_IRQS, 1, name,
0285 handle_level_irq,
0286 IRQ_NOREQUEST | IRQ_NOPROBE, 0,
0287 IRQ_GC_INIT_MASK_CACHE);
0288 if (ret) {
0289 pr_err("%s: unable to alloc irq domain gc: %d\n", name, ret);
0290 irq_domain_remove(domain);
0291 return ret;
0292 }
0293
0294 gc = irq_get_domain_generic_chip(domain, 0);
0295 gc->reg_base = pmu->pmc_base;
0296 gc->chip_types[0].regs.mask = PMC_IRQ_MASK;
0297 gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
0298 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
0299
0300 pmu->irq_domain = domain;
0301 pmu->irq_gc = gc;
0302
0303 irq_set_handler_data(irq, pmu);
0304 irq_set_chained_handler(irq, pmu_irq_handler);
0305
0306 return 0;
0307 }
0308
0309 int __init dove_init_pmu_legacy(const struct dove_pmu_initdata *initdata)
0310 {
0311 const struct dove_pmu_domain_initdata *domain_initdata;
0312 struct pmu_data *pmu;
0313 int ret;
0314
0315 pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
0316 if (!pmu)
0317 return -ENOMEM;
0318
0319 spin_lock_init(&pmu->lock);
0320 pmu->pmc_base = initdata->pmc_base;
0321 pmu->pmu_base = initdata->pmu_base;
0322
0323 pmu_reset_init(pmu);
0324 for (domain_initdata = initdata->domains; domain_initdata->name;
0325 domain_initdata++) {
0326 struct pmu_domain *domain;
0327
0328 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
0329 if (domain) {
0330 domain->pmu = pmu;
0331 domain->pwr_mask = domain_initdata->pwr_mask;
0332 domain->rst_mask = domain_initdata->rst_mask;
0333 domain->iso_mask = domain_initdata->iso_mask;
0334 domain->base.name = domain_initdata->name;
0335
0336 __pmu_domain_register(domain, NULL);
0337 }
0338 }
0339
0340 ret = dove_init_pmu_irq(pmu, initdata->irq);
0341 if (ret)
0342 pr_err("dove_init_pmu_irq() failed: %d\n", ret);
0343
0344 if (pmu->irq_domain)
0345 irq_domain_associate_many(pmu->irq_domain,
0346 initdata->irq_domain_start,
0347 0, NR_PMU_IRQS);
0348
0349 return 0;
0350 }
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373 int __init dove_init_pmu(void)
0374 {
0375 struct device_node *np_pmu, *domains_node, *np;
0376 struct pmu_data *pmu;
0377 int ret, parent_irq;
0378
0379
0380 np_pmu = of_find_compatible_node(NULL, NULL, "marvell,dove-pmu");
0381 if (!np_pmu)
0382 return 0;
0383
0384 domains_node = of_get_child_by_name(np_pmu, "domains");
0385 if (!domains_node) {
0386 pr_err("%pOFn: failed to find domains sub-node\n", np_pmu);
0387 return 0;
0388 }
0389
0390 pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
0391 if (!pmu)
0392 return -ENOMEM;
0393
0394 spin_lock_init(&pmu->lock);
0395 pmu->of_node = np_pmu;
0396 pmu->pmc_base = of_iomap(pmu->of_node, 0);
0397 pmu->pmu_base = of_iomap(pmu->of_node, 1);
0398 if (!pmu->pmc_base || !pmu->pmu_base) {
0399 pr_err("%pOFn: failed to map PMU\n", np_pmu);
0400 iounmap(pmu->pmu_base);
0401 iounmap(pmu->pmc_base);
0402 kfree(pmu);
0403 return -ENOMEM;
0404 }
0405
0406 pmu_reset_init(pmu);
0407
0408 for_each_available_child_of_node(domains_node, np) {
0409 struct of_phandle_args args;
0410 struct pmu_domain *domain;
0411
0412 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
0413 if (!domain)
0414 break;
0415
0416 domain->pmu = pmu;
0417 domain->base.name = kasprintf(GFP_KERNEL, "%pOFn", np);
0418 if (!domain->base.name) {
0419 kfree(domain);
0420 break;
0421 }
0422
0423 of_property_read_u32(np, "marvell,pmu_pwr_mask",
0424 &domain->pwr_mask);
0425 of_property_read_u32(np, "marvell,pmu_iso_mask",
0426 &domain->iso_mask);
0427
0428
0429
0430
0431
0432
0433 ret = of_parse_phandle_with_args(np, "resets", "#reset-cells",
0434 0, &args);
0435 if (ret == 0) {
0436 if (args.np == pmu->of_node)
0437 domain->rst_mask = BIT(args.args[0]);
0438 of_node_put(args.np);
0439 }
0440
0441 __pmu_domain_register(domain, np);
0442 }
0443
0444
0445 parent_irq = irq_of_parse_and_map(pmu->of_node, 0);
0446 if (!parent_irq) {
0447 pr_err("%pOFn: no interrupt specified\n", np_pmu);
0448 } else {
0449 ret = dove_init_pmu_irq(pmu, parent_irq);
0450 if (ret)
0451 pr_err("dove_init_pmu_irq() failed: %d\n", ret);
0452 }
0453
0454 return 0;
0455 }