Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * OMAP44xx sleep code.
0004  *
0005  * Copyright (C) 2011 Texas Instruments, Inc.
0006  *  Santosh Shilimkar <santosh.shilimkar@ti.com>
0007  */
0008 
0009 #include <linux/linkage.h>
0010 #include <asm/assembler.h>
0011 #include <asm/smp_scu.h>
0012 #include <asm/memory.h>
0013 #include <asm/hardware/cache-l2x0.h>
0014 
0015 #include "omap-secure.h"
0016 
0017 #include "common.h"
0018 #include "omap44xx.h"
0019 #include "omap4-sar-layout.h"
0020 
0021     .arch armv7-a
0022 
0023 #if defined(CONFIG_SMP) && defined(CONFIG_PM)
0024 
0025     .arch_extension sec
0026 .macro  DO_SMC
0027     dsb
0028     smc #0
0029     dsb
0030 .endm
0031 
0032 #ifdef CONFIG_ARCH_OMAP4
0033 
0034 /*
0035  * =============================
0036  * == CPU suspend finisher ==
0037  * =============================
0038  *
0039  * void omap4_finish_suspend(unsigned long cpu_state)
0040  *
0041  * This function code saves the CPU context and performs the CPU
0042  * power down sequence. Calling WFI effectively changes the CPU
0043  * power domains states to the desired target power state.
0044  *
0045  * @cpu_state : contains context save state (r0)
0046  *  0 - No context lost
0047  *  1 - CPUx L1 and logic lost: MPUSS CSWR
0048  *  2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
0049  *  3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
0050  * @return: This function never returns for CPU OFF and DORMANT power states.
0051  * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
0052  * from this follows a full CPU reset path via ROM code to CPU restore code.
0053  * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
0054  * It returns to the caller for CPU INACTIVE and ON power states or in case
0055  * CPU failed to transition to targeted OFF/DORMANT state.
0056  *
0057  * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
0058  * stack frame and it expects the caller to take care of it. Hence the entire
0059  * stack frame is saved to avoid possible stack corruption.
0060  */
0061 ENTRY(omap4_finish_suspend)
0062     stmfd   sp!, {r4-r12, lr}
0063     cmp r0, #0x0
0064     beq do_WFI              @ No lowpower state, jump to WFI
0065 
0066     /*
0067      * Flush all data from the L1 data cache before disabling
0068      * SCTLR.C bit.
0069      */
0070     bl  omap4_get_sar_ram_base
0071     ldr r9, [r0, #OMAP_TYPE_OFFSET]
0072     cmp r9, #0x1            @ Check for HS device
0073     bne skip_secure_l1_clean
0074     mov r0, #SCU_PM_NORMAL
0075     mov r1, #0xFF           @ clean seucre L1
0076     stmfd   r13!, {r4-r12, r14}
0077     ldr r12, =OMAP4_MON_SCU_PWR_INDEX
0078     DO_SMC
0079     ldmfd   r13!, {r4-r12, r14}
0080 skip_secure_l1_clean:
0081     bl  v7_flush_dcache_all
0082 
0083     /*
0084      * Clear the SCTLR.C bit to prevent further data cache
0085      * allocation. Clearing SCTLR.C would make all the data accesses
0086      * strongly ordered and would not hit the cache.
0087      */
0088     mrc p15, 0, r0, c1, c0, 0
0089     bic r0, r0, #(1 << 2)       @ Disable the C bit
0090     mcr p15, 0, r0, c1, c0, 0
0091     isb
0092 
0093     bl  v7_invalidate_l1
0094 
0095     /*
0096      * Switch the CPU from Symmetric Multiprocessing (SMP) mode
0097      * to AsymmetricMultiprocessing (AMP) mode by programming
0098      * the SCU power status to DORMANT or OFF mode.
0099      * This enables the CPU to be taken out of coherency by
0100      * preventing the CPU from receiving cache, TLB, or BTB
0101      * maintenance operations broadcast by other CPUs in the cluster.
0102      */
0103     bl  omap4_get_sar_ram_base
0104     mov r8, r0
0105     ldr r9, [r8, #OMAP_TYPE_OFFSET]
0106     cmp r9, #0x1            @ Check for HS device
0107     bne scu_gp_set
0108     mrc p15, 0, r0, c0, c0, 5       @ Read MPIDR
0109     ands    r0, r0, #0x0f
0110     ldreq   r0, [r8, #SCU_OFFSET0]
0111     ldrne   r0, [r8, #SCU_OFFSET1]
0112     mov r1, #0x00
0113     stmfd   r13!, {r4-r12, r14}
0114     ldr r12, =OMAP4_MON_SCU_PWR_INDEX
0115     DO_SMC
0116     ldmfd   r13!, {r4-r12, r14}
0117     b   skip_scu_gp_set
0118 scu_gp_set:
0119     mrc p15, 0, r0, c0, c0, 5       @ Read MPIDR
0120     ands    r0, r0, #0x0f
0121     ldreq   r1, [r8, #SCU_OFFSET0]
0122     ldrne   r1, [r8, #SCU_OFFSET1]
0123     bl  omap4_get_scu_base
0124     bl  scu_power_mode
0125 skip_scu_gp_set:
0126     mrc p15, 0, r0, c1, c1, 2       @ Read NSACR data
0127     tst r0, #(1 << 18)
0128     mrcne   p15, 0, r0, c1, c0, 1
0129     bicne   r0, r0, #(1 << 6)       @ Disable SMP bit
0130     mcrne   p15, 0, r0, c1, c0, 1
0131     isb
0132     dsb
0133 #ifdef CONFIG_CACHE_L2X0
0134     /*
0135      * Clean and invalidate the L2 cache.
0136      * Common cache-l2x0.c functions can't be used here since it
0137      * uses spinlocks. We are out of coherency here with data cache
0138      * disabled. The spinlock implementation uses exclusive load/store
0139      * instruction which can fail without data cache being enabled.
0140      * OMAP4 hardware doesn't support exclusive monitor which can
0141      * overcome exclusive access issue. Because of this, CPU can
0142      * lead to deadlock.
0143      */
0144     bl  omap4_get_sar_ram_base
0145     mov r8, r0
0146     mrc p15, 0, r5, c0, c0, 5       @ Read MPIDR
0147     ands    r5, r5, #0x0f
0148     ldreq   r0, [r8, #L2X0_SAVE_OFFSET0]    @ Retrieve L2 state from SAR
0149     ldrne   r0, [r8, #L2X0_SAVE_OFFSET1]    @ memory.
0150     cmp r0, #3
0151     bne do_WFI
0152 #ifdef CONFIG_PL310_ERRATA_727915
0153     mov r0, #0x03
0154     mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
0155     DO_SMC
0156 #endif
0157     bl  omap4_get_l2cache_base
0158     mov r2, r0
0159     ldr r0, =0xffff
0160     str r0, [r2, #L2X0_CLEAN_INV_WAY]
0161 wait:
0162     ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
0163     ldr r1, =0xffff
0164     ands    r0, r0, r1
0165     bne wait
0166 #ifdef CONFIG_PL310_ERRATA_727915
0167     mov r0, #0x00
0168     mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
0169     DO_SMC
0170 #endif
0171 l2x_sync:
0172     bl  omap4_get_l2cache_base
0173     mov r2, r0
0174     mov r0, #0x0
0175     str r0, [r2, #L2X0_CACHE_SYNC]
0176 sync:
0177     ldr r0, [r2, #L2X0_CACHE_SYNC]
0178     ands    r0, r0, #0x1
0179     bne sync
0180 #endif
0181 
0182 do_WFI:
0183     bl  omap_do_wfi
0184 
0185     /*
0186      * CPU is here when it failed to enter OFF/DORMANT or
0187      * no low power state was attempted.
0188      */
0189     mrc p15, 0, r0, c1, c0, 0
0190     tst r0, #(1 << 2)           @ Check C bit enabled?
0191     orreq   r0, r0, #(1 << 2)       @ Enable the C bit
0192     mcreq   p15, 0, r0, c1, c0, 0
0193     isb
0194 
0195     /*
0196      * Ensure the CPU power state is set to NORMAL in
0197      * SCU power state so that CPU is back in coherency.
0198      * In non-coherent mode CPU can lock-up and lead to
0199      * system deadlock.
0200      */
0201     mrc p15, 0, r0, c1, c0, 1
0202     tst r0, #(1 << 6)           @ Check SMP bit enabled?
0203     orreq   r0, r0, #(1 << 6)
0204     mcreq   p15, 0, r0, c1, c0, 1
0205     isb
0206     bl  omap4_get_sar_ram_base
0207     mov r8, r0
0208     ldr r9, [r8, #OMAP_TYPE_OFFSET]
0209     cmp r9, #0x1            @ Check for HS device
0210     bne scu_gp_clear
0211     mov r0, #SCU_PM_NORMAL
0212     mov r1, #0x00
0213     stmfd   r13!, {r4-r12, r14}
0214     ldr r12, =OMAP4_MON_SCU_PWR_INDEX
0215     DO_SMC
0216     ldmfd   r13!, {r4-r12, r14}
0217     b   skip_scu_gp_clear
0218 scu_gp_clear:
0219     bl  omap4_get_scu_base
0220     mov r1, #SCU_PM_NORMAL
0221     bl  scu_power_mode
0222 skip_scu_gp_clear:
0223     isb
0224     dsb
0225     ldmfd   sp!, {r4-r12, pc}
0226 ENDPROC(omap4_finish_suspend)
0227 
0228 /*
0229  * ============================
0230  * == CPU resume entry point ==
0231  * ============================
0232  *
0233  * void omap4_cpu_resume(void)
0234  *
0235  * ROM code jumps to this function while waking up from CPU
0236  * OFF or DORMANT state. Physical address of the function is
0237  * stored in the SAR RAM while entering to OFF or DORMANT mode.
0238  * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
0239  */
0240 ENTRY(omap4_cpu_resume)
0241     /*
0242      * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
0243      * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
0244      * init and for CPU1, a secure PPA API provided. CPU0 must be ON
0245      * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
0246      * OMAP443X GP devices- SMP bit isn't accessible.
0247      * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
0248      */
0249     ldr r8, =OMAP44XX_SAR_RAM_BASE
0250     ldr r9, [r8, #OMAP_TYPE_OFFSET]
0251     cmp r9, #0x1            @ Skip if GP device
0252     bne skip_ns_smp_enable
0253     mrc     p15, 0, r0, c0, c0, 5
0254     ands    r0, r0, #0x0f
0255     beq skip_ns_smp_enable
0256 ppa_actrl_retry:
0257     mov     r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
0258     adr r1, ppa_zero_params_offset
0259     ldr r3, [r1]
0260     add r3, r3, r1          @ Pointer to ppa_zero_params
0261     mov r1, #0x0            @ Process ID
0262     mov r2, #0x4            @ Flag
0263     mov r6, #0xff
0264     mov r12, #0x00          @ Secure Service ID
0265     DO_SMC
0266     cmp r0, #0x0            @ API returns 0 on success.
0267     beq enable_smp_bit
0268     b   ppa_actrl_retry
0269 enable_smp_bit:
0270     mrc p15, 0, r0, c1, c0, 1
0271     tst r0, #(1 << 6)           @ Check SMP bit enabled?
0272     orreq   r0, r0, #(1 << 6)
0273     mcreq   p15, 0, r0, c1, c0, 1
0274     isb
0275 skip_ns_smp_enable:
0276 #ifdef CONFIG_CACHE_L2X0
0277     /*
0278      * Restore the L2 AUXCTRL and enable the L2 cache.
0279      * OMAP4_MON_L2X0_AUXCTRL_INDEX =  Program the L2X0 AUXCTRL
0280      * OMAP4_MON_L2X0_CTRL_INDEX =  Enable the L2 using L2X0 CTRL
0281      * register r0 contains value to be programmed.
0282      * L2 cache is already invalidate by ROM code as part
0283      * of MPUSS OFF wakeup path.
0284      */
0285     ldr r2, =OMAP44XX_L2CACHE_BASE
0286     ldr r0, [r2, #L2X0_CTRL]
0287     and r0, #0x0f
0288     cmp r0, #1
0289     beq skip_l2en           @ Skip if already enabled
0290     ldr r3, =OMAP44XX_SAR_RAM_BASE
0291     ldr r1, [r3, #OMAP_TYPE_OFFSET]
0292     cmp r1, #0x1            @ Check for HS device
0293     bne     set_gp_por
0294     ldr     r0, =OMAP4_PPA_L2_POR_INDEX
0295     ldr     r1, =OMAP44XX_SAR_RAM_BASE
0296     ldr     r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
0297     adr     r1, ppa_por_params_offset
0298     ldr r3, [r1]
0299     add r3, r3, r1          @ Pointer to ppa_por_params
0300     str     r4, [r3, #0x04]
0301     mov r1, #0x0            @ Process ID
0302     mov r2, #0x4            @ Flag
0303     mov r6, #0xff
0304     mov r12, #0x00          @ Secure Service ID
0305     DO_SMC
0306     b   set_aux_ctrl
0307 set_gp_por:
0308     ldr     r1, =OMAP44XX_SAR_RAM_BASE
0309     ldr     r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
0310     ldr r12, =OMAP4_MON_L2X0_PREFETCH_INDEX @ Setup L2 PREFETCH
0311     DO_SMC
0312 set_aux_ctrl:
0313     ldr     r1, =OMAP44XX_SAR_RAM_BASE
0314     ldr r0, [r1, #L2X0_AUXCTRL_OFFSET]
0315     ldr r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX  @ Setup L2 AUXCTRL
0316     DO_SMC
0317     mov r0, #0x1
0318     ldr r12, =OMAP4_MON_L2X0_CTRL_INDEX     @ Enable L2 cache
0319     DO_SMC
0320 skip_l2en:
0321 #endif
0322 
0323     b   cpu_resume          @ Jump to generic resume
0324 ppa_por_params_offset:
0325     .long   ppa_por_params - .
0326 ENDPROC(omap4_cpu_resume)
0327 #endif  /* CONFIG_ARCH_OMAP4 */
0328 
0329 #endif  /* defined(CONFIG_SMP) && defined(CONFIG_PM) */
0330 
0331 ENTRY(omap_do_wfi)
0332     stmfd   sp!, {lr}
0333 #ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
0334     /* Drain interconnect write buffers. */
0335     bl  omap_interconnect_sync
0336 #endif
0337 
0338     /*
0339      * Execute an ISB instruction to ensure that all of the
0340      * CP15 register changes have been committed.
0341      */
0342     isb
0343 
0344     /*
0345      * Execute a barrier instruction to ensure that all cache,
0346      * TLB and branch predictor maintenance operations issued
0347      * by any CPU in the cluster have completed.
0348      */
0349     dsb
0350     dmb
0351 
0352     /*
0353      * Execute a WFI instruction and wait until the
0354      * STANDBYWFI output is asserted to indicate that the
0355      * CPU is in idle and low power state. CPU can specualatively
0356      * prefetch the instructions so add NOPs after WFI. Sixteen
0357      * NOPs as per Cortex-A9 pipeline.
0358      */
0359     wfi                 @ Wait For Interrupt
0360     nop
0361     nop
0362     nop
0363     nop
0364     nop
0365     nop
0366     nop
0367     nop
0368     nop
0369     nop
0370     nop
0371     nop
0372     nop
0373     nop
0374     nop
0375     nop
0376 
0377     ldmfd   sp!, {pc}
0378 ppa_zero_params_offset:
0379     .long   ppa_zero_params - .
0380 ENDPROC(omap_do_wfi)
0381 
0382     .data
0383     .align  2
0384 ppa_zero_params:
0385     .word       0
0386 
0387 ppa_por_params:
0388     .word       1, 0