0001
0002
0003
0004
0005
0006
0007
0008
0009 #define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt
0010
0011 #include <linux/cpuidle.h>
0012 #include <linux/cpumask.h>
0013 #include <linux/cpu_pm.h>
0014 #include <linux/cpu_cooling.h>
0015 #include <linux/kernel.h>
0016 #include <linux/module.h>
0017 #include <linux/of.h>
0018 #include <linux/of_device.h>
0019 #include <linux/slab.h>
0020 #include <linux/platform_device.h>
0021 #include <linux/pm_domain.h>
0022 #include <linux/pm_runtime.h>
0023 #include <asm/cpuidle.h>
0024 #include <asm/sbi.h>
0025 #include <asm/smp.h>
0026 #include <asm/suspend.h>
0027
0028 #include "dt_idle_states.h"
0029 #include "dt_idle_genpd.h"
0030
0031 struct sbi_cpuidle_data {
0032 u32 *states;
0033 struct device *dev;
0034 };
0035
0036 struct sbi_domain_state {
0037 bool available;
0038 u32 state;
0039 };
0040
0041 static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data);
0042 static DEFINE_PER_CPU(struct sbi_domain_state, domain_state);
0043 static bool sbi_cpuidle_use_osi;
0044 static bool sbi_cpuidle_use_cpuhp;
0045 static bool sbi_cpuidle_pd_allow_domain_state;
0046
0047 static inline void sbi_set_domain_state(u32 state)
0048 {
0049 struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
0050
0051 data->available = true;
0052 data->state = state;
0053 }
0054
0055 static inline u32 sbi_get_domain_state(void)
0056 {
0057 struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
0058
0059 return data->state;
0060 }
0061
0062 static inline void sbi_clear_domain_state(void)
0063 {
0064 struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
0065
0066 data->available = false;
0067 }
0068
0069 static inline bool sbi_is_domain_state_available(void)
0070 {
0071 struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
0072
0073 return data->available;
0074 }
0075
0076 static int sbi_suspend_finisher(unsigned long suspend_type,
0077 unsigned long resume_addr,
0078 unsigned long opaque)
0079 {
0080 struct sbiret ret;
0081
0082 ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
0083 suspend_type, resume_addr, opaque, 0, 0, 0);
0084
0085 return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
0086 }
0087
0088 static int sbi_suspend(u32 state)
0089 {
0090 if (state & SBI_HSM_SUSP_NON_RET_BIT)
0091 return cpu_suspend(state, sbi_suspend_finisher);
0092 else
0093 return sbi_suspend_finisher(state, 0, 0);
0094 }
0095
0096 static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
0097 struct cpuidle_driver *drv, int idx)
0098 {
0099 u32 *states = __this_cpu_read(sbi_cpuidle_data.states);
0100
0101 return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, states[idx]);
0102 }
0103
0104 static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
0105 struct cpuidle_driver *drv, int idx,
0106 bool s2idle)
0107 {
0108 struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data);
0109 u32 *states = data->states;
0110 struct device *pd_dev = data->dev;
0111 u32 state;
0112 int ret;
0113
0114 ret = cpu_pm_enter();
0115 if (ret)
0116 return -1;
0117
0118
0119 ct_irq_enter_irqson();
0120 if (s2idle)
0121 dev_pm_genpd_suspend(pd_dev);
0122 else
0123 pm_runtime_put_sync_suspend(pd_dev);
0124 ct_irq_exit_irqson();
0125
0126 if (sbi_is_domain_state_available())
0127 state = sbi_get_domain_state();
0128 else
0129 state = states[idx];
0130
0131 ret = sbi_suspend(state) ? -1 : idx;
0132
0133 ct_irq_enter_irqson();
0134 if (s2idle)
0135 dev_pm_genpd_resume(pd_dev);
0136 else
0137 pm_runtime_get_sync(pd_dev);
0138 ct_irq_exit_irqson();
0139
0140 cpu_pm_exit();
0141
0142
0143 sbi_clear_domain_state();
0144 return ret;
0145 }
0146
0147 static int sbi_enter_domain_idle_state(struct cpuidle_device *dev,
0148 struct cpuidle_driver *drv, int idx)
0149 {
0150 return __sbi_enter_domain_idle_state(dev, drv, idx, false);
0151 }
0152
0153 static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device *dev,
0154 struct cpuidle_driver *drv,
0155 int idx)
0156 {
0157 return __sbi_enter_domain_idle_state(dev, drv, idx, true);
0158 }
0159
0160 static int sbi_cpuidle_cpuhp_up(unsigned int cpu)
0161 {
0162 struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
0163
0164 if (pd_dev)
0165 pm_runtime_get_sync(pd_dev);
0166
0167 return 0;
0168 }
0169
0170 static int sbi_cpuidle_cpuhp_down(unsigned int cpu)
0171 {
0172 struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
0173
0174 if (pd_dev) {
0175 pm_runtime_put_sync(pd_dev);
0176
0177 sbi_clear_domain_state();
0178 }
0179
0180 return 0;
0181 }
0182
0183 static void sbi_idle_init_cpuhp(void)
0184 {
0185 int err;
0186
0187 if (!sbi_cpuidle_use_cpuhp)
0188 return;
0189
0190 err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
0191 "cpuidle/sbi:online",
0192 sbi_cpuidle_cpuhp_up,
0193 sbi_cpuidle_cpuhp_down);
0194 if (err)
0195 pr_warn("Failed %d while setup cpuhp state\n", err);
0196 }
0197
0198 static const struct of_device_id sbi_cpuidle_state_match[] = {
0199 { .compatible = "riscv,idle-state",
0200 .data = sbi_cpuidle_enter_state },
0201 { },
0202 };
0203
0204 static bool sbi_suspend_state_is_valid(u32 state)
0205 {
0206 if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
0207 state < SBI_HSM_SUSPEND_RET_PLATFORM)
0208 return false;
0209 if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
0210 state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
0211 return false;
0212 return true;
0213 }
0214
0215 static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
0216 {
0217 int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state);
0218
0219 if (err) {
0220 pr_warn("%pOF missing riscv,sbi-suspend-param property\n", np);
0221 return err;
0222 }
0223
0224 if (!sbi_suspend_state_is_valid(*state)) {
0225 pr_warn("Invalid SBI suspend state %#x\n", *state);
0226 return -EINVAL;
0227 }
0228
0229 return 0;
0230 }
0231
0232 static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv,
0233 struct sbi_cpuidle_data *data,
0234 unsigned int state_count, int cpu)
0235 {
0236
0237 if (!sbi_cpuidle_use_osi)
0238 return 0;
0239
0240 data->dev = dt_idle_attach_cpu(cpu, "sbi");
0241 if (IS_ERR_OR_NULL(data->dev))
0242 return PTR_ERR_OR_ZERO(data->dev);
0243
0244
0245
0246
0247
0248
0249 drv->states[state_count - 1].enter = sbi_enter_domain_idle_state;
0250 drv->states[state_count - 1].enter_s2idle =
0251 sbi_enter_s2idle_domain_idle_state;
0252 sbi_cpuidle_use_cpuhp = true;
0253
0254 return 0;
0255 }
0256
0257 static int sbi_cpuidle_dt_init_states(struct device *dev,
0258 struct cpuidle_driver *drv,
0259 unsigned int cpu,
0260 unsigned int state_count)
0261 {
0262 struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
0263 struct device_node *state_node;
0264 struct device_node *cpu_node;
0265 u32 *states;
0266 int i, ret;
0267
0268 cpu_node = of_cpu_device_node_get(cpu);
0269 if (!cpu_node)
0270 return -ENODEV;
0271
0272 states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL);
0273 if (!states) {
0274 ret = -ENOMEM;
0275 goto fail;
0276 }
0277
0278
0279 for (i = 1; i < state_count; i++) {
0280 state_node = of_get_cpu_state_node(cpu_node, i - 1);
0281 if (!state_node)
0282 break;
0283
0284 ret = sbi_dt_parse_state_node(state_node, &states[i]);
0285 of_node_put(state_node);
0286
0287 if (ret)
0288 return ret;
0289
0290 pr_debug("sbi-state %#x index %d\n", states[i], i);
0291 }
0292 if (i != state_count) {
0293 ret = -ENODEV;
0294 goto fail;
0295 }
0296
0297
0298 ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu);
0299 if (ret < 0)
0300 return ret;
0301
0302
0303 data->states = states;
0304
0305 fail:
0306 of_node_put(cpu_node);
0307
0308 return ret;
0309 }
0310
0311 static void sbi_cpuidle_deinit_cpu(int cpu)
0312 {
0313 struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
0314
0315 dt_idle_detach_cpu(data->dev);
0316 sbi_cpuidle_use_cpuhp = false;
0317 }
0318
0319 static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
0320 {
0321 struct cpuidle_driver *drv;
0322 unsigned int state_count = 0;
0323 int ret = 0;
0324
0325 drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
0326 if (!drv)
0327 return -ENOMEM;
0328
0329 drv->name = "sbi_cpuidle";
0330 drv->owner = THIS_MODULE;
0331 drv->cpumask = (struct cpumask *)cpumask_of(cpu);
0332
0333
0334 drv->states[0].enter = sbi_cpuidle_enter_state;
0335 drv->states[0].exit_latency = 1;
0336 drv->states[0].target_residency = 1;
0337 drv->states[0].power_usage = UINT_MAX;
0338 strcpy(drv->states[0].name, "WFI");
0339 strcpy(drv->states[0].desc, "RISC-V WFI");
0340
0341
0342
0343
0344
0345
0346
0347
0348 ret = dt_init_idle_driver(drv, sbi_cpuidle_state_match, 1);
0349 if (ret <= 0) {
0350 pr_debug("HART%ld: failed to parse DT idle states\n",
0351 cpuid_to_hartid_map(cpu));
0352 return ret ? : -ENODEV;
0353 }
0354 state_count = ret + 1;
0355
0356
0357 ret = sbi_cpuidle_dt_init_states(dev, drv, cpu, state_count);
0358 if (ret) {
0359 pr_err("HART%ld: failed to init idle states\n",
0360 cpuid_to_hartid_map(cpu));
0361 return ret;
0362 }
0363
0364 ret = cpuidle_register(drv, NULL);
0365 if (ret)
0366 goto deinit;
0367
0368 cpuidle_cooling_register(drv);
0369
0370 return 0;
0371 deinit:
0372 sbi_cpuidle_deinit_cpu(cpu);
0373 return ret;
0374 }
0375
0376 static void sbi_cpuidle_domain_sync_state(struct device *dev)
0377 {
0378
0379
0380
0381
0382 sbi_cpuidle_pd_allow_domain_state = true;
0383 }
0384
0385 #ifdef CONFIG_DT_IDLE_GENPD
0386
0387 static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
0388 {
0389 struct genpd_power_state *state = &pd->states[pd->state_idx];
0390 u32 *pd_state;
0391
0392 if (!state->data)
0393 return 0;
0394
0395 if (!sbi_cpuidle_pd_allow_domain_state)
0396 return -EBUSY;
0397
0398
0399 pd_state = state->data;
0400 sbi_set_domain_state(*pd_state);
0401
0402 return 0;
0403 }
0404
0405 struct sbi_pd_provider {
0406 struct list_head link;
0407 struct device_node *node;
0408 };
0409
0410 static LIST_HEAD(sbi_pd_providers);
0411
0412 static int sbi_pd_init(struct device_node *np)
0413 {
0414 struct generic_pm_domain *pd;
0415 struct sbi_pd_provider *pd_provider;
0416 struct dev_power_governor *pd_gov;
0417 int ret = -ENOMEM;
0418
0419 pd = dt_idle_pd_alloc(np, sbi_dt_parse_state_node);
0420 if (!pd)
0421 goto out;
0422
0423 pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL);
0424 if (!pd_provider)
0425 goto free_pd;
0426
0427 pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
0428
0429
0430 if (sbi_cpuidle_use_osi)
0431 pd->power_off = sbi_cpuidle_pd_power_off;
0432 else
0433 pd->flags |= GENPD_FLAG_ALWAYS_ON;
0434
0435
0436 pd_gov = pd->states ? &pm_domain_cpu_gov : NULL;
0437
0438 ret = pm_genpd_init(pd, pd_gov, false);
0439 if (ret)
0440 goto free_pd_prov;
0441
0442 ret = of_genpd_add_provider_simple(np, pd);
0443 if (ret)
0444 goto remove_pd;
0445
0446 pd_provider->node = of_node_get(np);
0447 list_add(&pd_provider->link, &sbi_pd_providers);
0448
0449 pr_debug("init PM domain %s\n", pd->name);
0450 return 0;
0451
0452 remove_pd:
0453 pm_genpd_remove(pd);
0454 free_pd_prov:
0455 kfree(pd_provider);
0456 free_pd:
0457 dt_idle_pd_free(pd);
0458 out:
0459 pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
0460 return ret;
0461 }
0462
0463 static void sbi_pd_remove(void)
0464 {
0465 struct sbi_pd_provider *pd_provider, *it;
0466 struct generic_pm_domain *genpd;
0467
0468 list_for_each_entry_safe(pd_provider, it, &sbi_pd_providers, link) {
0469 of_genpd_del_provider(pd_provider->node);
0470
0471 genpd = of_genpd_remove_last(pd_provider->node);
0472 if (!IS_ERR(genpd))
0473 kfree(genpd);
0474
0475 of_node_put(pd_provider->node);
0476 list_del(&pd_provider->link);
0477 kfree(pd_provider);
0478 }
0479 }
0480
0481 static int sbi_genpd_probe(struct device_node *np)
0482 {
0483 struct device_node *node;
0484 int ret = 0, pd_count = 0;
0485
0486 if (!np)
0487 return -ENODEV;
0488
0489
0490
0491
0492
0493 for_each_child_of_node(np, node) {
0494 if (!of_find_property(node, "#power-domain-cells", NULL))
0495 continue;
0496
0497 ret = sbi_pd_init(node);
0498 if (ret)
0499 goto put_node;
0500
0501 pd_count++;
0502 }
0503
0504
0505 if (!pd_count)
0506 goto no_pd;
0507
0508
0509 ret = dt_idle_pd_init_topology(np);
0510 if (ret)
0511 goto remove_pd;
0512
0513 return 0;
0514
0515 put_node:
0516 of_node_put(node);
0517 remove_pd:
0518 sbi_pd_remove();
0519 pr_err("failed to create CPU PM domains ret=%d\n", ret);
0520 no_pd:
0521 return ret;
0522 }
0523
0524 #else
0525
0526 static inline int sbi_genpd_probe(struct device_node *np)
0527 {
0528 return 0;
0529 }
0530
0531 #endif
0532
0533 static int sbi_cpuidle_probe(struct platform_device *pdev)
0534 {
0535 int cpu, ret;
0536 struct cpuidle_driver *drv;
0537 struct cpuidle_device *dev;
0538 struct device_node *np, *pds_node;
0539
0540
0541 sbi_cpuidle_use_osi = true;
0542 for_each_possible_cpu(cpu) {
0543 np = of_cpu_device_node_get(cpu);
0544 if (np &&
0545 of_find_property(np, "power-domains", NULL) &&
0546 of_find_property(np, "power-domain-names", NULL)) {
0547 continue;
0548 } else {
0549 sbi_cpuidle_use_osi = false;
0550 break;
0551 }
0552 }
0553
0554
0555 pds_node = of_find_node_by_path("/cpus/power-domains");
0556 if (pds_node) {
0557 ret = sbi_genpd_probe(pds_node);
0558 of_node_put(pds_node);
0559 if (ret)
0560 return ret;
0561 }
0562
0563
0564 for_each_possible_cpu(cpu) {
0565 ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu);
0566 if (ret) {
0567 pr_debug("HART%ld: idle driver init failed\n",
0568 cpuid_to_hartid_map(cpu));
0569 goto out_fail;
0570 }
0571 }
0572
0573
0574 sbi_idle_init_cpuhp();
0575
0576 pr_info("idle driver registered for all CPUs\n");
0577
0578 return 0;
0579
0580 out_fail:
0581 while (--cpu >= 0) {
0582 dev = per_cpu(cpuidle_devices, cpu);
0583 drv = cpuidle_get_cpu_driver(dev);
0584 cpuidle_unregister(drv);
0585 sbi_cpuidle_deinit_cpu(cpu);
0586 }
0587
0588 return ret;
0589 }
0590
0591 static struct platform_driver sbi_cpuidle_driver = {
0592 .probe = sbi_cpuidle_probe,
0593 .driver = {
0594 .name = "sbi-cpuidle",
0595 .sync_state = sbi_cpuidle_domain_sync_state,
0596 },
0597 };
0598
0599 static int __init sbi_cpuidle_init(void)
0600 {
0601 int ret;
0602 struct platform_device *pdev;
0603
0604
0605
0606
0607
0608
0609 if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
0610 sbi_probe_extension(SBI_EXT_HSM) <= 0) {
0611 pr_info("HSM suspend not available\n");
0612 return 0;
0613 }
0614
0615 ret = platform_driver_register(&sbi_cpuidle_driver);
0616 if (ret)
0617 return ret;
0618
0619 pdev = platform_device_register_simple("sbi-cpuidle",
0620 -1, NULL, 0);
0621 if (IS_ERR(pdev)) {
0622 platform_driver_unregister(&sbi_cpuidle_driver);
0623 return PTR_ERR(pdev);
0624 }
0625
0626 return 0;
0627 }
0628 device_initcall(sbi_cpuidle_init);