0001
0002
0003
0004
0005
0006
0007 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0008
0009 #include <linux/atomic.h>
0010 #include <linux/completion.h>
0011 #include <linux/cpu.h>
0012 #include <linux/cpuidle.h>
0013 #include <linux/cpu_pm.h>
0014 #include <linux/kernel.h>
0015 #include <linux/kthread.h>
0016 #include <uapi/linux/sched/types.h>
0017 #include <linux/module.h>
0018 #include <linux/preempt.h>
0019 #include <linux/psci.h>
0020 #include <linux/slab.h>
0021 #include <linux/tick.h>
0022 #include <linux/topology.h>
0023
0024 #include <asm/cpuidle.h>
0025
0026 #include <uapi/linux/psci.h>
0027
0028 #define NUM_SUSPEND_CYCLE (10)
0029
0030 static unsigned int nb_available_cpus;
0031 static int tos_resident_cpu = -1;
0032
0033 static atomic_t nb_active_threads;
0034 static struct completion suspend_threads_started =
0035 COMPLETION_INITIALIZER(suspend_threads_started);
0036 static struct completion suspend_threads_done =
0037 COMPLETION_INITIALIZER(suspend_threads_done);
0038
0039
0040
0041
0042
0043
0044
0045 static int psci_ops_check(void)
0046 {
0047 int migrate_type = -1;
0048 int cpu;
0049
0050 if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) {
0051 pr_warn("Missing PSCI operations, aborting tests\n");
0052 return -EOPNOTSUPP;
0053 }
0054
0055 if (psci_ops.migrate_info_type)
0056 migrate_type = psci_ops.migrate_info_type();
0057
0058 if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE ||
0059 migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) {
0060
0061 for_each_online_cpu(cpu)
0062 if (psci_tos_resident_on(cpu)) {
0063 tos_resident_cpu = cpu;
0064 break;
0065 }
0066 if (tos_resident_cpu == -1)
0067 pr_warn("UP Trusted OS resides on no online CPU\n");
0068 }
0069
0070 return 0;
0071 }
0072
0073
0074
0075
0076
0077 static unsigned int down_and_up_cpus(const struct cpumask *cpus,
0078 struct cpumask *offlined_cpus)
0079 {
0080 int cpu;
0081 int err = 0;
0082
0083 cpumask_clear(offlined_cpus);
0084
0085
0086 for_each_cpu(cpu, cpus) {
0087 int ret = remove_cpu(cpu);
0088
0089
0090
0091
0092
0093 if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) {
0094 if (ret != -EBUSY) {
0095 pr_err("Unexpected return code %d while trying "
0096 "to power down last online CPU %d\n",
0097 ret, cpu);
0098 ++err;
0099 }
0100 } else if (cpu == tos_resident_cpu) {
0101 if (ret != -EPERM) {
0102 pr_err("Unexpected return code %d while trying "
0103 "to power down TOS resident CPU %d\n",
0104 ret, cpu);
0105 ++err;
0106 }
0107 } else if (ret != 0) {
0108 pr_err("Error occurred (%d) while trying "
0109 "to power down CPU %d\n", ret, cpu);
0110 ++err;
0111 }
0112
0113 if (ret == 0)
0114 cpumask_set_cpu(cpu, offlined_cpus);
0115 }
0116
0117
0118 for_each_cpu(cpu, offlined_cpus) {
0119 int ret = add_cpu(cpu);
0120
0121 if (ret != 0) {
0122 pr_err("Error occurred (%d) while trying "
0123 "to power up CPU %d\n", ret, cpu);
0124 ++err;
0125 } else {
0126 cpumask_clear_cpu(cpu, offlined_cpus);
0127 }
0128 }
0129
0130
0131
0132
0133
0134 WARN_ON(!cpumask_empty(offlined_cpus) ||
0135 num_online_cpus() != nb_available_cpus);
0136
0137 return err;
0138 }
0139
0140 static void free_cpu_groups(int num, cpumask_var_t **pcpu_groups)
0141 {
0142 int i;
0143 cpumask_var_t *cpu_groups = *pcpu_groups;
0144
0145 for (i = 0; i < num; ++i)
0146 free_cpumask_var(cpu_groups[i]);
0147 kfree(cpu_groups);
0148 }
0149
0150 static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
0151 {
0152 int num_groups = 0;
0153 cpumask_var_t tmp, *cpu_groups;
0154
0155 if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
0156 return -ENOMEM;
0157
0158 cpu_groups = kcalloc(nb_available_cpus, sizeof(*cpu_groups),
0159 GFP_KERNEL);
0160 if (!cpu_groups) {
0161 free_cpumask_var(tmp);
0162 return -ENOMEM;
0163 }
0164
0165 cpumask_copy(tmp, cpu_online_mask);
0166
0167 while (!cpumask_empty(tmp)) {
0168 const struct cpumask *cpu_group =
0169 topology_core_cpumask(cpumask_any(tmp));
0170
0171 if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
0172 free_cpumask_var(tmp);
0173 free_cpu_groups(num_groups, &cpu_groups);
0174 return -ENOMEM;
0175 }
0176 cpumask_copy(cpu_groups[num_groups++], cpu_group);
0177 cpumask_andnot(tmp, tmp, cpu_group);
0178 }
0179
0180 free_cpumask_var(tmp);
0181 *pcpu_groups = cpu_groups;
0182
0183 return num_groups;
0184 }
0185
0186 static int hotplug_tests(void)
0187 {
0188 int i, nb_cpu_group, err = -ENOMEM;
0189 cpumask_var_t offlined_cpus, *cpu_groups;
0190 char *page_buf;
0191
0192 if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
0193 return err;
0194
0195 nb_cpu_group = alloc_init_cpu_groups(&cpu_groups);
0196 if (nb_cpu_group < 0)
0197 goto out_free_cpus;
0198 page_buf = (char *)__get_free_page(GFP_KERNEL);
0199 if (!page_buf)
0200 goto out_free_cpu_groups;
0201
0202
0203
0204
0205
0206 pr_info("Trying to turn off and on again all CPUs\n");
0207 err = down_and_up_cpus(cpu_online_mask, offlined_cpus);
0208
0209
0210
0211
0212
0213 for (i = 0; i < nb_cpu_group; ++i) {
0214 ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
0215 cpu_groups[i]);
0216
0217 page_buf[len - 1] = '\0';
0218 pr_info("Trying to turn off and on again group %d (CPUs %s)\n",
0219 i, page_buf);
0220 err += down_and_up_cpus(cpu_groups[i], offlined_cpus);
0221 }
0222
0223 free_page((unsigned long)page_buf);
0224 out_free_cpu_groups:
0225 free_cpu_groups(nb_cpu_group, &cpu_groups);
0226 out_free_cpus:
0227 free_cpumask_var(offlined_cpus);
0228 return err;
0229 }
0230
0231 static void dummy_callback(struct timer_list *unused) {}
0232
0233 static int suspend_cpu(struct cpuidle_device *dev,
0234 struct cpuidle_driver *drv, int index)
0235 {
0236 struct cpuidle_state *state = &drv->states[index];
0237 bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
0238 int ret;
0239
0240 arch_cpu_idle_enter();
0241
0242 if (broadcast) {
0243
0244
0245
0246
0247 ret = tick_broadcast_enter();
0248 if (ret) {
0249
0250
0251
0252
0253
0254
0255
0256 cpu_do_idle();
0257 ret = 0;
0258 goto out_arch_exit;
0259 }
0260 }
0261
0262 ret = state->enter(dev, drv, index);
0263
0264 if (broadcast)
0265 tick_broadcast_exit();
0266
0267 out_arch_exit:
0268 arch_cpu_idle_exit();
0269
0270 return ret;
0271 }
0272
0273 static int suspend_test_thread(void *arg)
0274 {
0275 int cpu = (long)arg;
0276 int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0;
0277 struct cpuidle_device *dev;
0278 struct cpuidle_driver *drv;
0279
0280 struct timer_list wakeup_timer;
0281
0282
0283 wait_for_completion(&suspend_threads_started);
0284
0285
0286 sched_set_fifo(current);
0287
0288 dev = this_cpu_read(cpuidle_devices);
0289 drv = cpuidle_get_cpu_driver(dev);
0290
0291 pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
0292 cpu, drv->state_count - 1);
0293
0294 timer_setup_on_stack(&wakeup_timer, dummy_callback, 0);
0295 for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
0296 int index;
0297
0298
0299
0300
0301 for (index = 1; index < drv->state_count; ++index) {
0302 int ret;
0303 struct cpuidle_state *state = &drv->states[index];
0304
0305
0306
0307
0308
0309
0310
0311
0312 mod_timer(&wakeup_timer, jiffies +
0313 usecs_to_jiffies(state->target_residency));
0314
0315
0316 local_irq_disable();
0317
0318 ret = suspend_cpu(dev, drv, index);
0319
0320
0321
0322
0323
0324
0325 local_irq_enable();
0326
0327 if (ret == index) {
0328 ++nb_suspend;
0329 } else if (ret >= 0) {
0330
0331 ++nb_shallow_sleep;
0332 } else {
0333 pr_err("Failed to suspend CPU %d: error %d "
0334 "(requested state %d, cycle %d)\n",
0335 cpu, ret, index, i);
0336 ++nb_err;
0337 }
0338 }
0339 }
0340
0341
0342
0343
0344
0345 del_timer(&wakeup_timer);
0346 destroy_timer_on_stack(&wakeup_timer);
0347
0348 if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
0349 complete(&suspend_threads_done);
0350
0351 for (;;) {
0352
0353 set_current_state(TASK_INTERRUPTIBLE);
0354 if (kthread_should_park())
0355 break;
0356 schedule();
0357 }
0358
0359 pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
0360 cpu, nb_suspend, nb_shallow_sleep, nb_err);
0361
0362 kthread_parkme();
0363
0364 return nb_err;
0365 }
0366
0367 static int suspend_tests(void)
0368 {
0369 int i, cpu, err = 0;
0370 struct task_struct **threads;
0371 int nb_threads = 0;
0372
0373 threads = kmalloc_array(nb_available_cpus, sizeof(*threads),
0374 GFP_KERNEL);
0375 if (!threads)
0376 return -ENOMEM;
0377
0378
0379
0380
0381
0382
0383
0384
0385 cpuidle_pause_and_lock();
0386
0387 for_each_online_cpu(cpu) {
0388 struct task_struct *thread;
0389
0390 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
0391 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
0392
0393 if (!dev || !drv) {
0394 pr_warn("cpuidle not available on CPU %d, ignoring\n",
0395 cpu);
0396 continue;
0397 }
0398
0399 thread = kthread_create_on_cpu(suspend_test_thread,
0400 (void *)(long)cpu, cpu,
0401 "psci_suspend_test");
0402 if (IS_ERR(thread))
0403 pr_err("Failed to create kthread on CPU %d\n", cpu);
0404 else
0405 threads[nb_threads++] = thread;
0406 }
0407
0408 if (nb_threads < 1) {
0409 err = -ENODEV;
0410 goto out;
0411 }
0412
0413 atomic_set(&nb_active_threads, nb_threads);
0414
0415
0416
0417
0418
0419
0420 for (i = 0; i < nb_threads; ++i)
0421 wake_up_process(threads[i]);
0422 complete_all(&suspend_threads_started);
0423
0424 wait_for_completion(&suspend_threads_done);
0425
0426
0427
0428 for (i = 0; i < nb_threads; ++i) {
0429 err += kthread_park(threads[i]);
0430 err += kthread_stop(threads[i]);
0431 }
0432 out:
0433 cpuidle_resume_and_unlock();
0434 kfree(threads);
0435 return err;
0436 }
0437
0438 static int __init psci_checker(void)
0439 {
0440 int ret;
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451 nb_available_cpus = num_online_cpus();
0452
0453
0454 ret = psci_ops_check();
0455 if (ret)
0456 return ret;
0457
0458 pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus);
0459
0460 pr_info("Starting hotplug tests\n");
0461 ret = hotplug_tests();
0462 if (ret == 0)
0463 pr_info("Hotplug tests passed OK\n");
0464 else if (ret > 0)
0465 pr_err("%d error(s) encountered in hotplug tests\n", ret);
0466 else {
0467 pr_err("Out of memory\n");
0468 return ret;
0469 }
0470
0471 pr_info("Starting suspend tests (%d cycles per state)\n",
0472 NUM_SUSPEND_CYCLE);
0473 ret = suspend_tests();
0474 if (ret == 0)
0475 pr_info("Suspend tests passed OK\n");
0476 else if (ret > 0)
0477 pr_err("%d error(s) encountered in suspend tests\n", ret);
0478 else {
0479 switch (ret) {
0480 case -ENOMEM:
0481 pr_err("Out of memory\n");
0482 break;
0483 case -ENODEV:
0484 pr_warn("Could not start suspend tests on any CPU\n");
0485 break;
0486 }
0487 }
0488
0489 pr_info("PSCI checker completed\n");
0490 return ret < 0 ? ret : 0;
0491 }
0492 late_initcall(psci_checker);