0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/atomic.h>
0010 #include <linux/init.h>
0011 #include <linux/kernel.h>
0012 #include <linux/module.h>
0013 #include <linux/sched/signal.h>
0014 #include <uapi/linux/sched/types.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/cpu_pm.h>
0017 #include <linux/cpu.h>
0018 #include <linux/cpumask.h>
0019 #include <linux/kthread.h>
0020 #include <linux/wait.h>
0021 #include <linux/time.h>
0022 #include <linux/clockchips.h>
0023 #include <linux/hrtimer.h>
0024 #include <linux/tick.h>
0025 #include <linux/notifier.h>
0026 #include <linux/mm.h>
0027 #include <linux/mutex.h>
0028 #include <linux/smp.h>
0029 #include <linux/spinlock.h>
0030 #include <linux/string.h>
0031 #include <linux/sysfs.h>
0032 #include <linux/irqchip/arm-gic.h>
0033 #include <linux/moduleparam.h>
0034
0035 #include <asm/smp_plat.h>
0036 #include <asm/cputype.h>
0037 #include <asm/suspend.h>
0038 #include <asm/mcpm.h>
0039 #include <asm/bL_switcher.h>
0040
0041 #define CREATE_TRACE_POINTS
0042 #include <trace/events/power_cpu_migrate.h>
0043
0044
0045
0046
0047
0048
0049
0050
0051 static int read_mpidr(void)
0052 {
0053 unsigned int id;
0054 asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
0055 return id & MPIDR_HWID_BITMASK;
0056 }
0057
0058
0059
0060
0061
0062 static void bL_do_switch(void *_arg)
0063 {
0064 unsigned ib_mpidr, ib_cpu, ib_cluster;
0065 long volatile handshake, **handshake_ptr = _arg;
0066
0067 pr_debug("%s\n", __func__);
0068
0069 ib_mpidr = cpu_logical_map(smp_processor_id());
0070 ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
0071 ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
0072
0073
0074 if (handshake_ptr) {
0075 handshake = 0;
0076 *handshake_ptr = &handshake;
0077 } else
0078 handshake = -1;
0079
0080
0081
0082
0083
0084 mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume);
0085 sev();
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 while (!handshake) {
0102 wfe();
0103 smp_mb();
0104 }
0105
0106
0107 mcpm_cpu_power_down();
0108
0109
0110 BUG();
0111 }
0112
0113
0114
0115
0116
0117
0118
0119
0120 #define STACK_SIZE 512
0121 extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
0122 static int bL_switchpoint(unsigned long _arg)
0123 {
0124 unsigned int mpidr = read_mpidr();
0125 unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0126 void *stack = current_thread_info() + 1;
0127 stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
0128 stack += clusterid * STACK_SIZE + STACK_SIZE;
0129 call_with_stack(bL_do_switch, (void *)_arg, stack);
0130 BUG();
0131 }
0132
0133
0134
0135
0136
0137 static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
0138 static int bL_switcher_cpu_pairing[NR_CPUS];
0139
0140
0141
0142
0143
0144
0145
0146
0147 static int bL_switch_to(unsigned int new_cluster_id)
0148 {
0149 unsigned int mpidr, this_cpu, that_cpu;
0150 unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
0151 struct completion inbound_alive;
0152 long volatile *handshake_ptr;
0153 int ipi_nr, ret;
0154
0155 this_cpu = smp_processor_id();
0156 ob_mpidr = read_mpidr();
0157 ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0);
0158 ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1);
0159 BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr);
0160
0161 if (new_cluster_id == ob_cluster)
0162 return 0;
0163
0164 that_cpu = bL_switcher_cpu_pairing[this_cpu];
0165 ib_mpidr = cpu_logical_map(that_cpu);
0166 ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
0167 ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
0168
0169 pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
0170 this_cpu, ob_mpidr, ib_mpidr);
0171
0172 this_cpu = smp_processor_id();
0173
0174
0175 mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
0176 mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
0177
0178
0179 init_completion(&inbound_alive);
0180 ipi_nr = register_ipi_completion(&inbound_alive, this_cpu);
0181 ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]);
0182 mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr);
0183
0184
0185
0186
0187
0188 ret = mcpm_cpu_power_up(ib_cpu, ib_cluster);
0189 if (ret) {
0190 pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
0191 return ret;
0192 }
0193
0194
0195
0196
0197
0198 gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0);
0199
0200
0201
0202
0203
0204 wait_for_completion(&inbound_alive);
0205 mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0);
0206
0207
0208
0209
0210
0211 local_irq_disable();
0212 local_fiq_disable();
0213 trace_cpu_migrate_begin(ktime_get_real_ns(), ob_mpidr);
0214
0215
0216 gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
0217
0218 tick_suspend_local();
0219
0220 ret = cpu_pm_enter();
0221
0222
0223 if (ret)
0224 panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
0225
0226
0227 cpu_logical_map(this_cpu) = ib_mpidr;
0228 cpu_logical_map(that_cpu) = ob_mpidr;
0229
0230
0231 ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint);
0232 if (ret > 0)
0233 panic("%s: cpu_suspend() returned %d\n", __func__, ret);
0234
0235
0236 mpidr = read_mpidr();
0237 pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr);
0238 BUG_ON(mpidr != ib_mpidr);
0239
0240 mcpm_cpu_powered_up();
0241
0242 ret = cpu_pm_exit();
0243
0244 tick_resume_local();
0245
0246 trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr);
0247 local_fiq_enable();
0248 local_irq_enable();
0249
0250 *handshake_ptr = 1;
0251 dsb_sev();
0252
0253 if (ret)
0254 pr_err("%s exiting with error %d\n", __func__, ret);
0255 return ret;
0256 }
0257
0258 struct bL_thread {
0259 spinlock_t lock;
0260 struct task_struct *task;
0261 wait_queue_head_t wq;
0262 int wanted_cluster;
0263 struct completion started;
0264 bL_switch_completion_handler completer;
0265 void *completer_cookie;
0266 };
0267
0268 static struct bL_thread bL_threads[NR_CPUS];
0269
0270 static int bL_switcher_thread(void *arg)
0271 {
0272 struct bL_thread *t = arg;
0273 int cluster;
0274 bL_switch_completion_handler completer;
0275 void *completer_cookie;
0276
0277 sched_set_fifo_low(current);
0278 complete(&t->started);
0279
0280 do {
0281 if (signal_pending(current))
0282 flush_signals(current);
0283 wait_event_interruptible(t->wq,
0284 t->wanted_cluster != -1 ||
0285 kthread_should_stop());
0286
0287 spin_lock(&t->lock);
0288 cluster = t->wanted_cluster;
0289 completer = t->completer;
0290 completer_cookie = t->completer_cookie;
0291 t->wanted_cluster = -1;
0292 t->completer = NULL;
0293 spin_unlock(&t->lock);
0294
0295 if (cluster != -1) {
0296 bL_switch_to(cluster);
0297
0298 if (completer)
0299 completer(completer_cookie);
0300 }
0301 } while (!kthread_should_stop());
0302
0303 return 0;
0304 }
0305
0306 static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
0307 {
0308 struct task_struct *task;
0309
0310 task = kthread_create_on_node(bL_switcher_thread, arg,
0311 cpu_to_node(cpu), "kswitcher_%d", cpu);
0312 if (!IS_ERR(task)) {
0313 kthread_bind(task, cpu);
0314 wake_up_process(task);
0315 } else
0316 pr_err("%s failed for CPU %d\n", __func__, cpu);
0317 return task;
0318 }
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342 int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
0343 bL_switch_completion_handler completer,
0344 void *completer_cookie)
0345 {
0346 struct bL_thread *t;
0347
0348 if (cpu >= ARRAY_SIZE(bL_threads)) {
0349 pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
0350 return -EINVAL;
0351 }
0352
0353 t = &bL_threads[cpu];
0354
0355 if (IS_ERR(t->task))
0356 return PTR_ERR(t->task);
0357 if (!t->task)
0358 return -ESRCH;
0359
0360 spin_lock(&t->lock);
0361 if (t->completer) {
0362 spin_unlock(&t->lock);
0363 return -EBUSY;
0364 }
0365 t->completer = completer;
0366 t->completer_cookie = completer_cookie;
0367 t->wanted_cluster = new_cluster_id;
0368 spin_unlock(&t->lock);
0369 wake_up(&t->wq);
0370 return 0;
0371 }
0372 EXPORT_SYMBOL_GPL(bL_switch_request_cb);
0373
0374
0375
0376
0377
0378 static DEFINE_MUTEX(bL_switcher_activation_lock);
0379 static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier);
0380 static unsigned int bL_switcher_active;
0381 static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
0382 static cpumask_t bL_switcher_removed_logical_cpus;
0383
0384 int bL_switcher_register_notifier(struct notifier_block *nb)
0385 {
0386 return blocking_notifier_chain_register(&bL_activation_notifier, nb);
0387 }
0388 EXPORT_SYMBOL_GPL(bL_switcher_register_notifier);
0389
0390 int bL_switcher_unregister_notifier(struct notifier_block *nb)
0391 {
0392 return blocking_notifier_chain_unregister(&bL_activation_notifier, nb);
0393 }
0394 EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier);
0395
0396 static int bL_activation_notify(unsigned long val)
0397 {
0398 int ret;
0399
0400 ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL);
0401 if (ret & NOTIFY_STOP_MASK)
0402 pr_err("%s: notifier chain failed with status 0x%x\n",
0403 __func__, ret);
0404 return notifier_to_errno(ret);
0405 }
0406
0407 static void bL_switcher_restore_cpus(void)
0408 {
0409 int i;
0410
0411 for_each_cpu(i, &bL_switcher_removed_logical_cpus) {
0412 struct device *cpu_dev = get_cpu_device(i);
0413 int ret = device_online(cpu_dev);
0414 if (ret)
0415 dev_err(cpu_dev, "switcher: unable to restore CPU\n");
0416 }
0417 }
0418
0419 static int bL_switcher_halve_cpus(void)
0420 {
0421 int i, j, cluster_0, gic_id, ret;
0422 unsigned int cpu, cluster, mask;
0423 cpumask_t available_cpus;
0424
0425
0426 mask = 0;
0427 for_each_online_cpu(i) {
0428 cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
0429 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
0430 if (cluster >= 2) {
0431 pr_err("%s: only dual cluster systems are supported\n", __func__);
0432 return -EINVAL;
0433 }
0434 if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
0435 return -EINVAL;
0436 mask |= (1 << cluster);
0437 }
0438 if (mask != 3) {
0439 pr_err("%s: no CPU pairing possible\n", __func__);
0440 return -EINVAL;
0441 }
0442
0443
0444
0445
0446
0447
0448
0449 memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
0450 cpumask_copy(&available_cpus, cpu_online_mask);
0451 cluster_0 = -1;
0452 for_each_cpu(i, &available_cpus) {
0453 int match = -1;
0454 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
0455 if (cluster_0 == -1)
0456 cluster_0 = cluster;
0457 if (cluster != cluster_0)
0458 continue;
0459 cpumask_clear_cpu(i, &available_cpus);
0460 for_each_cpu(j, &available_cpus) {
0461 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
0462
0463
0464
0465
0466
0467
0468 if (cluster != cluster_0)
0469 match = j;
0470 }
0471 if (match != -1) {
0472 bL_switcher_cpu_pairing[i] = match;
0473 cpumask_clear_cpu(match, &available_cpus);
0474 pr_info("CPU%d paired with CPU%d\n", i, match);
0475 }
0476 }
0477
0478
0479
0480
0481
0482 cpumask_clear(&bL_switcher_removed_logical_cpus);
0483 for_each_online_cpu(i) {
0484 cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
0485 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
0486
0487
0488 gic_id = gic_get_cpu_id(i);
0489 if (gic_id < 0) {
0490 pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
0491 bL_switcher_restore_cpus();
0492 return -EINVAL;
0493 }
0494 bL_gic_id[cpu][cluster] = gic_id;
0495 pr_info("GIC ID for CPU %u cluster %u is %u\n",
0496 cpu, cluster, gic_id);
0497
0498 if (bL_switcher_cpu_pairing[i] != -1) {
0499 bL_switcher_cpu_original_cluster[i] = cluster;
0500 continue;
0501 }
0502
0503 ret = device_offline(get_cpu_device(i));
0504 if (ret) {
0505 bL_switcher_restore_cpus();
0506 return ret;
0507 }
0508 cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
0509 }
0510
0511 return 0;
0512 }
0513
0514
0515 int bL_switcher_get_logical_index(u32 mpidr)
0516 {
0517 int cpu;
0518
0519 if (!bL_switcher_active)
0520 return -EUNATCH;
0521
0522 mpidr &= MPIDR_HWID_BITMASK;
0523 for_each_online_cpu(cpu) {
0524 int pairing = bL_switcher_cpu_pairing[cpu];
0525 if (pairing == -1)
0526 continue;
0527 if ((mpidr == cpu_logical_map(cpu)) ||
0528 (mpidr == cpu_logical_map(pairing)))
0529 return cpu;
0530 }
0531 return -EINVAL;
0532 }
0533
0534 static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
0535 {
0536 trace_cpu_migrate_current(ktime_get_real_ns(), read_mpidr());
0537 }
0538
0539 int bL_switcher_trace_trigger(void)
0540 {
0541 preempt_disable();
0542
0543 bL_switcher_trace_trigger_cpu(NULL);
0544 smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
0545
0546 preempt_enable();
0547
0548 return 0;
0549 }
0550 EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
0551
0552 static int bL_switcher_enable(void)
0553 {
0554 int cpu, ret;
0555
0556 mutex_lock(&bL_switcher_activation_lock);
0557 lock_device_hotplug();
0558 if (bL_switcher_active) {
0559 unlock_device_hotplug();
0560 mutex_unlock(&bL_switcher_activation_lock);
0561 return 0;
0562 }
0563
0564 pr_info("big.LITTLE switcher initializing\n");
0565
0566 ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE);
0567 if (ret)
0568 goto error;
0569
0570 ret = bL_switcher_halve_cpus();
0571 if (ret)
0572 goto error;
0573
0574 bL_switcher_trace_trigger();
0575
0576 for_each_online_cpu(cpu) {
0577 struct bL_thread *t = &bL_threads[cpu];
0578 spin_lock_init(&t->lock);
0579 init_waitqueue_head(&t->wq);
0580 init_completion(&t->started);
0581 t->wanted_cluster = -1;
0582 t->task = bL_switcher_thread_create(cpu, t);
0583 }
0584
0585 bL_switcher_active = 1;
0586 bL_activation_notify(BL_NOTIFY_POST_ENABLE);
0587 pr_info("big.LITTLE switcher initialized\n");
0588 goto out;
0589
0590 error:
0591 pr_warn("big.LITTLE switcher initialization failed\n");
0592 bL_activation_notify(BL_NOTIFY_POST_DISABLE);
0593
0594 out:
0595 unlock_device_hotplug();
0596 mutex_unlock(&bL_switcher_activation_lock);
0597 return ret;
0598 }
0599
0600 #ifdef CONFIG_SYSFS
0601
0602 static void bL_switcher_disable(void)
0603 {
0604 unsigned int cpu, cluster;
0605 struct bL_thread *t;
0606 struct task_struct *task;
0607
0608 mutex_lock(&bL_switcher_activation_lock);
0609 lock_device_hotplug();
0610
0611 if (!bL_switcher_active)
0612 goto out;
0613
0614 if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) {
0615 bL_activation_notify(BL_NOTIFY_POST_ENABLE);
0616 goto out;
0617 }
0618
0619 bL_switcher_active = 0;
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629 for_each_online_cpu(cpu) {
0630 t = &bL_threads[cpu];
0631 task = t->task;
0632 t->task = NULL;
0633 if (!task || IS_ERR(task))
0634 continue;
0635 kthread_stop(task);
0636
0637 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
0638 if (cluster == bL_switcher_cpu_original_cluster[cpu])
0639 continue;
0640 init_completion(&t->started);
0641 t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
0642 task = bL_switcher_thread_create(cpu, t);
0643 if (!IS_ERR(task)) {
0644 wait_for_completion(&t->started);
0645 kthread_stop(task);
0646 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
0647 if (cluster == bL_switcher_cpu_original_cluster[cpu])
0648 continue;
0649 }
0650
0651 pr_crit("%s: unable to restore original cluster for CPU %d\n",
0652 __func__, cpu);
0653 pr_crit("%s: CPU %d can't be restored\n",
0654 __func__, bL_switcher_cpu_pairing[cpu]);
0655 cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
0656 &bL_switcher_removed_logical_cpus);
0657 }
0658
0659 bL_switcher_restore_cpus();
0660 bL_switcher_trace_trigger();
0661
0662 bL_activation_notify(BL_NOTIFY_POST_DISABLE);
0663
0664 out:
0665 unlock_device_hotplug();
0666 mutex_unlock(&bL_switcher_activation_lock);
0667 }
0668
0669 static ssize_t bL_switcher_active_show(struct kobject *kobj,
0670 struct kobj_attribute *attr, char *buf)
0671 {
0672 return sprintf(buf, "%u\n", bL_switcher_active);
0673 }
0674
0675 static ssize_t bL_switcher_active_store(struct kobject *kobj,
0676 struct kobj_attribute *attr, const char *buf, size_t count)
0677 {
0678 int ret;
0679
0680 switch (buf[0]) {
0681 case '0':
0682 bL_switcher_disable();
0683 ret = 0;
0684 break;
0685 case '1':
0686 ret = bL_switcher_enable();
0687 break;
0688 default:
0689 ret = -EINVAL;
0690 }
0691
0692 return (ret >= 0) ? count : ret;
0693 }
0694
0695 static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj,
0696 struct kobj_attribute *attr, const char *buf, size_t count)
0697 {
0698 int ret = bL_switcher_trace_trigger();
0699
0700 return ret ? ret : count;
0701 }
0702
0703 static struct kobj_attribute bL_switcher_active_attr =
0704 __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store);
0705
0706 static struct kobj_attribute bL_switcher_trace_trigger_attr =
0707 __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store);
0708
0709 static struct attribute *bL_switcher_attrs[] = {
0710 &bL_switcher_active_attr.attr,
0711 &bL_switcher_trace_trigger_attr.attr,
0712 NULL,
0713 };
0714
0715 static struct attribute_group bL_switcher_attr_group = {
0716 .attrs = bL_switcher_attrs,
0717 };
0718
0719 static struct kobject *bL_switcher_kobj;
0720
0721 static int __init bL_switcher_sysfs_init(void)
0722 {
0723 int ret;
0724
0725 bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj);
0726 if (!bL_switcher_kobj)
0727 return -ENOMEM;
0728 ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group);
0729 if (ret)
0730 kobject_put(bL_switcher_kobj);
0731 return ret;
0732 }
0733
0734 #endif
0735
0736 bool bL_switcher_get_enabled(void)
0737 {
0738 mutex_lock(&bL_switcher_activation_lock);
0739
0740 return bL_switcher_active;
0741 }
0742 EXPORT_SYMBOL_GPL(bL_switcher_get_enabled);
0743
0744 void bL_switcher_put_enabled(void)
0745 {
0746 mutex_unlock(&bL_switcher_activation_lock);
0747 }
0748 EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
0749
0750
0751
0752
0753
0754
0755 static int bL_switcher_cpu_pre(unsigned int cpu)
0756 {
0757 int pairing;
0758
0759 if (!bL_switcher_active)
0760 return 0;
0761
0762 pairing = bL_switcher_cpu_pairing[cpu];
0763
0764 if (pairing == -1)
0765 return -EINVAL;
0766 return 0;
0767 }
0768
0769 static bool no_bL_switcher;
0770 core_param(no_bL_switcher, no_bL_switcher, bool, 0644);
0771
0772 static int __init bL_switcher_init(void)
0773 {
0774 int ret;
0775
0776 if (!mcpm_is_available())
0777 return -ENODEV;
0778
0779 cpuhp_setup_state_nocalls(CPUHP_ARM_BL_PREPARE, "arm/bl:prepare",
0780 bL_switcher_cpu_pre, NULL);
0781 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/bl:predown",
0782 NULL, bL_switcher_cpu_pre);
0783 if (ret < 0) {
0784 cpuhp_remove_state_nocalls(CPUHP_ARM_BL_PREPARE);
0785 pr_err("bL_switcher: Failed to allocate a hotplug state\n");
0786 return ret;
0787 }
0788 if (!no_bL_switcher) {
0789 ret = bL_switcher_enable();
0790 if (ret)
0791 return ret;
0792 }
0793
0794 #ifdef CONFIG_SYSFS
0795 ret = bL_switcher_sysfs_init();
0796 if (ret)
0797 pr_err("%s: unable to create sysfs entry\n", __func__);
0798 #endif
0799
0800 return 0;
0801 }
0802
0803 late_initcall(bL_switcher_init);