Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * OMAP4+ CPU idle Routines
0004  *
0005  * Copyright (C) 2011-2013 Texas Instruments, Inc.
0006  * Santosh Shilimkar <santosh.shilimkar@ti.com>
0007  * Rajendra Nayak <rnayak@ti.com>
0008  */
0009 
0010 #include <linux/sched.h>
0011 #include <linux/cpuidle.h>
0012 #include <linux/cpu_pm.h>
0013 #include <linux/export.h>
0014 #include <linux/tick.h>
0015 
0016 #include <asm/cpuidle.h>
0017 
0018 #include "common.h"
0019 #include "pm.h"
0020 #include "prm.h"
0021 #include "soc.h"
0022 #include "clockdomain.h"
0023 
0024 #define MAX_CPUS    2
0025 
0026 /* Machine specific information */
0027 struct idle_statedata {
0028     u32 cpu_state;
0029     u32 mpu_logic_state;
0030     u32 mpu_state;
0031     u32 mpu_state_vote;
0032 };
0033 
0034 static struct idle_statedata omap4_idle_data[] = {
0035     {
0036         .cpu_state = PWRDM_POWER_ON,
0037         .mpu_state = PWRDM_POWER_ON,
0038         .mpu_logic_state = PWRDM_POWER_RET,
0039     },
0040     {
0041         .cpu_state = PWRDM_POWER_OFF,
0042         .mpu_state = PWRDM_POWER_RET,
0043         .mpu_logic_state = PWRDM_POWER_RET,
0044     },
0045     {
0046         .cpu_state = PWRDM_POWER_OFF,
0047         .mpu_state = PWRDM_POWER_RET,
0048         .mpu_logic_state = PWRDM_POWER_OFF,
0049     },
0050 };
0051 
0052 static struct idle_statedata omap5_idle_data[] = {
0053     {
0054         .cpu_state = PWRDM_POWER_ON,
0055         .mpu_state = PWRDM_POWER_ON,
0056         .mpu_logic_state = PWRDM_POWER_ON,
0057     },
0058     {
0059         .cpu_state = PWRDM_POWER_RET,
0060         .mpu_state = PWRDM_POWER_RET,
0061         .mpu_logic_state = PWRDM_POWER_RET,
0062     },
0063 };
0064 
0065 static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
0066 static struct clockdomain *cpu_clkdm[MAX_CPUS];
0067 
0068 static atomic_t abort_barrier;
0069 static bool cpu_done[MAX_CPUS];
0070 static struct idle_statedata *state_ptr = &omap4_idle_data[0];
0071 static DEFINE_RAW_SPINLOCK(mpu_lock);
0072 
0073 /* Private functions */
0074 
0075 /**
0076  * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
0077  * @dev: cpuidle device
0078  * @drv: cpuidle driver
0079  * @index: the index of state to be entered
0080  *
0081  * Called from the CPUidle framework to program the device to the
0082  * specified low power state selected by the governor.
0083  * Returns the amount of time spent in the low power state.
0084  */
0085 static int omap_enter_idle_simple(struct cpuidle_device *dev,
0086             struct cpuidle_driver *drv,
0087             int index)
0088 {
0089     omap_do_wfi();
0090     return index;
0091 }
0092 
0093 static int omap_enter_idle_smp(struct cpuidle_device *dev,
0094                    struct cpuidle_driver *drv,
0095                    int index)
0096 {
0097     struct idle_statedata *cx = state_ptr + index;
0098     unsigned long flag;
0099 
0100     raw_spin_lock_irqsave(&mpu_lock, flag);
0101     cx->mpu_state_vote++;
0102     if (cx->mpu_state_vote == num_online_cpus()) {
0103         pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
0104         omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
0105     }
0106     raw_spin_unlock_irqrestore(&mpu_lock, flag);
0107 
0108     omap4_enter_lowpower(dev->cpu, cx->cpu_state);
0109 
0110     raw_spin_lock_irqsave(&mpu_lock, flag);
0111     if (cx->mpu_state_vote == num_online_cpus())
0112         omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
0113     cx->mpu_state_vote--;
0114     raw_spin_unlock_irqrestore(&mpu_lock, flag);
0115 
0116     return index;
0117 }
0118 
0119 static int omap_enter_idle_coupled(struct cpuidle_device *dev,
0120             struct cpuidle_driver *drv,
0121             int index)
0122 {
0123     struct idle_statedata *cx = state_ptr + index;
0124     u32 mpuss_can_lose_context = 0;
0125     int error;
0126 
0127     /*
0128      * CPU0 has to wait and stay ON until CPU1 is OFF state.
0129      * This is necessary to honour hardware recommondation
0130      * of triggeing all the possible low power modes once CPU1 is
0131      * out of coherency and in OFF mode.
0132      */
0133     if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
0134         while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
0135             cpu_relax();
0136 
0137             /*
0138              * CPU1 could have already entered & exited idle
0139              * without hitting off because of a wakeup
0140              * or a failed attempt to hit off mode.  Check for
0141              * that here, otherwise we could spin forever
0142              * waiting for CPU1 off.
0143              */
0144             if (cpu_done[1])
0145                 goto fail;
0146 
0147         }
0148     }
0149 
0150     mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
0151                  (cx->mpu_logic_state == PWRDM_POWER_OFF);
0152 
0153     /* Enter broadcast mode for periodic timers */
0154     RCU_NONIDLE(tick_broadcast_enable());
0155 
0156     /* Enter broadcast mode for one-shot timers */
0157     RCU_NONIDLE(tick_broadcast_enter());
0158 
0159     /*
0160      * Call idle CPU PM enter notifier chain so that
0161      * VFP and per CPU interrupt context is saved.
0162      */
0163     error = cpu_pm_enter();
0164     if (error)
0165         goto cpu_pm_out;
0166 
0167     if (dev->cpu == 0) {
0168         pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
0169         RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
0170 
0171         /*
0172          * Call idle CPU cluster PM enter notifier chain
0173          * to save GIC and wakeupgen context.
0174          */
0175         if (mpuss_can_lose_context) {
0176             error = cpu_cluster_pm_enter();
0177             if (error) {
0178                 index = 0;
0179                 cx = state_ptr + index;
0180                 pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
0181                 RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
0182                 mpuss_can_lose_context = 0;
0183             }
0184         }
0185     }
0186 
0187     omap4_enter_lowpower(dev->cpu, cx->cpu_state);
0188     cpu_done[dev->cpu] = true;
0189 
0190     /* Wakeup CPU1 only if it is not offlined */
0191     if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
0192 
0193         if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
0194             mpuss_can_lose_context)
0195             gic_dist_disable();
0196 
0197         RCU_NONIDLE(clkdm_deny_idle(cpu_clkdm[1]));
0198         RCU_NONIDLE(omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON));
0199         RCU_NONIDLE(clkdm_allow_idle(cpu_clkdm[1]));
0200 
0201         if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
0202             mpuss_can_lose_context) {
0203             while (gic_dist_disabled()) {
0204                 udelay(1);
0205                 cpu_relax();
0206             }
0207             gic_timer_retrigger();
0208         }
0209     }
0210 
0211     /*
0212      * Call idle CPU cluster PM exit notifier chain
0213      * to restore GIC and wakeupgen context.
0214      */
0215     if (dev->cpu == 0 && mpuss_can_lose_context)
0216         cpu_cluster_pm_exit();
0217 
0218     /*
0219      * Call idle CPU PM exit notifier chain to restore
0220      * VFP and per CPU IRQ context.
0221      */
0222     cpu_pm_exit();
0223 
0224 cpu_pm_out:
0225     RCU_NONIDLE(tick_broadcast_exit());
0226 
0227 fail:
0228     cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
0229     cpu_done[dev->cpu] = false;
0230 
0231     return index;
0232 }
0233 
0234 static struct cpuidle_driver omap4_idle_driver = {
0235     .name               = "omap4_idle",
0236     .owner              = THIS_MODULE,
0237     .states = {
0238         {
0239             /* C1 - CPU0 ON + CPU1 ON + MPU ON */
0240             .exit_latency = 2 + 2,
0241             .target_residency = 5,
0242             .enter = omap_enter_idle_simple,
0243             .name = "C1",
0244             .desc = "CPUx ON, MPUSS ON"
0245         },
0246         {
0247             /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
0248             .exit_latency = 328 + 440,
0249             .target_residency = 960,
0250             .flags = CPUIDLE_FLAG_COUPLED,
0251             .enter = omap_enter_idle_coupled,
0252             .name = "C2",
0253             .desc = "CPUx OFF, MPUSS CSWR",
0254         },
0255         {
0256             /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
0257             .exit_latency = 460 + 518,
0258             .target_residency = 1100,
0259             .flags = CPUIDLE_FLAG_COUPLED,
0260             .enter = omap_enter_idle_coupled,
0261             .name = "C3",
0262             .desc = "CPUx OFF, MPUSS OSWR",
0263         },
0264     },
0265     .state_count = ARRAY_SIZE(omap4_idle_data),
0266     .safe_state_index = 0,
0267 };
0268 
0269 static struct cpuidle_driver omap5_idle_driver = {
0270     .name               = "omap5_idle",
0271     .owner              = THIS_MODULE,
0272     .states = {
0273         {
0274             /* C1 - CPU0 ON + CPU1 ON + MPU ON */
0275             .exit_latency = 2 + 2,
0276             .target_residency = 5,
0277             .enter = omap_enter_idle_simple,
0278             .name = "C1",
0279             .desc = "CPUx WFI, MPUSS ON"
0280         },
0281         {
0282             /* C2 - CPU0 RET + CPU1 RET + MPU CSWR */
0283             .exit_latency = 48 + 60,
0284             .target_residency = 100,
0285             .flags = CPUIDLE_FLAG_TIMER_STOP,
0286             .enter = omap_enter_idle_smp,
0287             .name = "C2",
0288             .desc = "CPUx CSWR, MPUSS CSWR",
0289         },
0290     },
0291     .state_count = ARRAY_SIZE(omap5_idle_data),
0292     .safe_state_index = 0,
0293 };
0294 
0295 /* Public functions */
0296 
0297 /**
0298  * omap4_idle_init - Init routine for OMAP4+ idle
0299  *
0300  * Registers the OMAP4+ specific cpuidle driver to the cpuidle
0301  * framework with the valid set of states.
0302  */
0303 int __init omap4_idle_init(void)
0304 {
0305     struct cpuidle_driver *idle_driver;
0306 
0307     if (soc_is_omap54xx()) {
0308         state_ptr = &omap5_idle_data[0];
0309         idle_driver = &omap5_idle_driver;
0310     } else {
0311         state_ptr = &omap4_idle_data[0];
0312         idle_driver = &omap4_idle_driver;
0313     }
0314 
0315     mpu_pd = pwrdm_lookup("mpu_pwrdm");
0316     cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
0317     cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
0318     if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
0319         return -ENODEV;
0320 
0321     cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
0322     cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
0323     if (!cpu_clkdm[0] || !cpu_clkdm[1])
0324         return -ENODEV;
0325 
0326     return cpuidle_register(idle_driver, cpu_online_mask);
0327 }