Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Low level suspend code for AM33XX SoCs
0004  *
0005  * Copyright (C) 2012-2018 Texas Instruments Incorporated - https://www.ti.com/
0006  *  Dave Gerlach, Vaibhav Bedia
0007  */
0008 
0009 #include <linux/linkage.h>
0010 #include <linux/platform_data/pm33xx.h>
0011 #include <linux/ti-emif-sram.h>
0012 #include <asm/assembler.h>
0013 #include <asm/memory.h>
0014 
0015 #include "iomap.h"
0016 #include "cm33xx.h"
0017 #include "pm-asm-offsets.h"
0018 
0019 #define AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED          0x00030000
0020 #define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE            0x0003
0021 #define AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE         0x0002
0022 
0023 /* replicated define because linux/bitops.h cannot be included in assembly */
0024 #define BIT(nr)         (1 << (nr))
0025 
0026     .arm
0027     .arch armv7-a
0028     .align 3
0029 
0030 ENTRY(am33xx_do_wfi)
0031     stmfd   sp!, {r4 - r11, lr} @ save registers on stack
0032 
0033     /* Save wfi_flags arg to data space */
0034     mov r4, r0
0035     adr r3, am33xx_pm_ro_sram_data
0036     ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
0037     str r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
0038 
0039     /* Only flush cache is we know we are losing MPU context */
0040     tst r4, #WFI_FLAG_FLUSH_CACHE
0041     beq cache_skip_flush
0042 
0043     /*
0044      * Flush all data from the L1 and L2 data cache before disabling
0045      * SCTLR.C bit.
0046      */
0047     ldr r1, kernel_flush
0048     blx r1
0049 
0050     /*
0051      * Clear the SCTLR.C bit to prevent further data cache
0052      * allocation. Clearing SCTLR.C would make all the data accesses
0053      * strongly ordered and would not hit the cache.
0054      */
0055     mrc p15, 0, r0, c1, c0, 0
0056     bic r0, r0, #(1 << 2)   @ Disable the C bit
0057     mcr p15, 0, r0, c1, c0, 0
0058     isb
0059 
0060     /*
0061      * Invalidate L1 and L2 data cache.
0062      */
0063     ldr r1, kernel_flush
0064     blx r1
0065 
0066     adr r3, am33xx_pm_ro_sram_data
0067     ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
0068     ldr r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
0069 
0070 cache_skip_flush:
0071     /* Check if we want self refresh */
0072     tst r4, #WFI_FLAG_SELF_REFRESH
0073     beq emif_skip_enter_sr
0074 
0075     adr r9, am33xx_emif_sram_table
0076 
0077     ldr r3, [r9, #EMIF_PM_ENTER_SR_OFFSET]
0078     blx r3
0079 
0080 emif_skip_enter_sr:
0081     /* Only necessary if PER is losing context */
0082     tst r4, #WFI_FLAG_SAVE_EMIF
0083     beq emif_skip_save
0084 
0085     ldr r3, [r9, #EMIF_PM_SAVE_CONTEXT_OFFSET]
0086     blx r3
0087 
0088 emif_skip_save:
0089     /* Only can disable EMIF if we have entered self refresh */
0090     tst     r4, #WFI_FLAG_SELF_REFRESH
0091     beq     emif_skip_disable
0092 
0093     /* Disable EMIF */
0094     ldr     r1, virt_emif_clkctrl
0095     ldr     r2, [r1]
0096     bic     r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
0097     str     r2, [r1]
0098 
0099     ldr r1, virt_emif_clkctrl
0100 wait_emif_disable:
0101     ldr r2, [r1]
0102     mov r3, #AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED
0103     cmp r2, r3
0104     bne wait_emif_disable
0105 
0106 emif_skip_disable:
0107     tst r4, #WFI_FLAG_WAKE_M3
0108     beq wkup_m3_skip
0109 
0110     /*
0111      * For the MPU WFI to be registered as an interrupt
0112      * to WKUP_M3, MPU_CLKCTRL.MODULEMODE needs to be set
0113      * to DISABLED
0114      */
0115     ldr r1, virt_mpu_clkctrl
0116     ldr r2, [r1]
0117     bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
0118     str r2, [r1]
0119 
0120 wkup_m3_skip:
0121     /*
0122      * Execute an ISB instruction to ensure that all of the
0123      * CP15 register changes have been committed.
0124      */
0125     isb
0126 
0127     /*
0128      * Execute a barrier instruction to ensure that all cache,
0129      * TLB and branch predictor maintenance operations issued
0130      * have completed.
0131      */
0132     dsb
0133     dmb
0134 
0135     /*
0136      * Execute a WFI instruction and wait until the
0137      * STANDBYWFI output is asserted to indicate that the
0138      * CPU is in idle and low power state. CPU can specualatively
0139      * prefetch the instructions so add NOPs after WFI. Thirteen
0140      * NOPs as per Cortex-A8 pipeline.
0141      */
0142     wfi
0143 
0144     nop
0145     nop
0146     nop
0147     nop
0148     nop
0149     nop
0150     nop
0151     nop
0152     nop
0153     nop
0154     nop
0155     nop
0156     nop
0157 
0158     /* We come here in case of an abort due to a late interrupt */
0159 
0160     /* Set MPU_CLKCTRL.MODULEMODE back to ENABLE */
0161     ldr r1, virt_mpu_clkctrl
0162     mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
0163     str r2, [r1]
0164 
0165     /* Re-enable EMIF */
0166     ldr r1, virt_emif_clkctrl
0167     mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
0168     str r2, [r1]
0169 wait_emif_enable:
0170     ldr r3, [r1]
0171     cmp r2, r3
0172     bne wait_emif_enable
0173 
0174     /* Only necessary if PER is losing context */
0175     tst r4, #WFI_FLAG_SELF_REFRESH
0176     beq emif_skip_exit_sr_abt
0177 
0178     adr r9, am33xx_emif_sram_table
0179     ldr r1, [r9, #EMIF_PM_ABORT_SR_OFFSET]
0180     blx r1
0181 
0182 emif_skip_exit_sr_abt:
0183     tst r4, #WFI_FLAG_FLUSH_CACHE
0184     beq cache_skip_restore
0185 
0186     /*
0187      * Set SCTLR.C bit to allow data cache allocation
0188      */
0189     mrc p15, 0, r0, c1, c0, 0
0190     orr r0, r0, #(1 << 2)   @ Enable the C bit
0191     mcr p15, 0, r0, c1, c0, 0
0192     isb
0193 
0194 cache_skip_restore:
0195     /* Let the suspend code know about the abort */
0196     mov r0, #1
0197     ldmfd   sp!, {r4 - r11, pc} @ restore regs and return
0198 ENDPROC(am33xx_do_wfi)
0199 
0200     .align
0201 ENTRY(am33xx_resume_offset)
0202     .word . - am33xx_do_wfi
0203 
0204 ENTRY(am33xx_resume_from_deep_sleep)
0205     /* Re-enable EMIF */
0206     ldr r0, phys_emif_clkctrl
0207     mov r1, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
0208     str r1, [r0]
0209 wait_emif_enable1:
0210     ldr r2, [r0]
0211     cmp r1, r2
0212     bne wait_emif_enable1
0213 
0214     adr r9, am33xx_emif_sram_table
0215 
0216     ldr r1, [r9, #EMIF_PM_RESTORE_CONTEXT_OFFSET]
0217     blx r1
0218 
0219     ldr r1, [r9, #EMIF_PM_EXIT_SR_OFFSET]
0220     blx r1
0221 
0222 resume_to_ddr:
0223     /* We are back. Branch to the common CPU resume routine */
0224     mov r0, #0
0225     ldr pc, resume_addr
0226 ENDPROC(am33xx_resume_from_deep_sleep)
0227 
0228 /*
0229  * Local variables
0230  */
0231     .align
0232 kernel_flush:
0233     .word   v7_flush_dcache_all
0234 virt_mpu_clkctrl:
0235     .word   AM33XX_CM_MPU_MPU_CLKCTRL
0236 virt_emif_clkctrl:
0237     .word   AM33XX_CM_PER_EMIF_CLKCTRL
0238 phys_emif_clkctrl:
0239     .word   (AM33XX_CM_BASE + AM33XX_CM_PER_MOD + \
0240         AM33XX_CM_PER_EMIF_CLKCTRL_OFFSET)
0241 
0242 .align 3
0243 /* DDR related defines */
0244 am33xx_emif_sram_table:
0245     .space EMIF_PM_FUNCTIONS_SIZE
0246 
0247 ENTRY(am33xx_pm_sram)
0248     .word am33xx_do_wfi
0249     .word am33xx_do_wfi_sz
0250     .word am33xx_resume_offset
0251     .word am33xx_emif_sram_table
0252     .word am33xx_pm_ro_sram_data
0253 
0254 resume_addr:
0255 .word  cpu_resume - PAGE_OFFSET + 0x80000000
0256 
0257 .align 3
0258 ENTRY(am33xx_pm_ro_sram_data)
0259     .space AMX3_PM_RO_SRAM_DATA_SIZE
0260 
0261 ENTRY(am33xx_do_wfi_sz)
0262     .word   . - am33xx_do_wfi