0001
0002 #include <linux/linkage.h>
0003 #include <linux/threads.h>
0004 #include <asm/asm-offsets.h>
0005 #include <asm/assembler.h>
0006 #include <asm/glue-cache.h>
0007 #include <asm/glue-proc.h>
0008 .text
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038 .macro compute_mpidr_hash dst, rs0, rs1, rs2, mpidr, mask
0039 and \mpidr, \mpidr, \mask @ mask out MPIDR bits
0040 and \dst, \mpidr, #0xff @ mask=aff0
0041 ARM( mov \dst, \dst, lsr \rs0 ) @ dst=aff0>>rs0
0042 THUMB( lsr \dst, \dst, \rs0 )
0043 and \mask, \mpidr, #0xff00 @ mask = aff1
0044 ARM( orr \dst, \dst, \mask, lsr \rs1 ) @ dst|=(aff1>>rs1)
0045 THUMB( lsr \mask, \mask, \rs1 )
0046 THUMB( orr \dst, \dst, \mask )
0047 and \mask, \mpidr, #0xff0000 @ mask = aff2
0048 ARM( orr \dst, \dst, \mask, lsr \rs2 ) @ dst|=(aff2>>rs2)
0049 THUMB( lsr \mask, \mask, \rs2 )
0050 THUMB( orr \dst, \dst, \mask )
0051 .endm
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061 ENTRY(__cpu_suspend)
0062 stmfd sp!, {r4 - r11, lr}
0063 #ifdef MULTI_CPU
0064 ldr r10, =processor
0065 ldr r4, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
0066 #else
0067 ldr r4, =cpu_suspend_size
0068 #endif
0069 mov r5, sp @ current virtual SP
0070 #ifdef CONFIG_VMAP_STACK
0071 @ Run the suspend code from the overflow stack so we don't have to rely
0072 @ on vmalloc-to-phys conversions anywhere in the arch suspend code.
0073 @ The original SP value captured in R5 will be restored on the way out.
0074 ldr_this_cpu sp, overflow_stack_ptr, r6, r7
0075 #endif
0076 add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn
0077 sub sp, sp, r4 @ allocate CPU state on stack
0078 ldr r3, =sleep_save_sp
0079 stmfd sp!, {r0, r1} @ save suspend func arg and pointer
0080 ldr r3, [r3, #SLEEP_SAVE_SP_VIRT]
0081 ALT_SMP(W(nop)) @ don't use adr_l inside ALT_SMP()
0082 ALT_UP_B(1f)
0083 adr_l r0, mpidr_hash
0084
0085 ldmia r0, {r1, r6-r8} @ r1 = mpidr mask (r6,r7,r8) = l[0,1,2] shifts
0086 compute_mpidr_hash r0, r6, r7, r8, r2, r1
0087 add r3, r3, r0, lsl #2
0088 1: mov r2, r5 @ virtual SP
0089 mov r1, r4 @ size of save block
0090 add r0, sp, #8 @ pointer to save block
0091 bl __cpu_suspend_save
0092 badr lr, cpu_suspend_abort
0093 ldmfd sp!, {r0, pc} @ call suspend fn
0094 ENDPROC(__cpu_suspend)
0095 .ltorg
0096
0097 cpu_suspend_abort:
0098 ldmia sp!, {r1 - r3} @ pop phys pgd, virt SP, phys resume fn
0099 teq r0, #0
0100 moveq r0, #1 @ force non-zero value
0101 mov sp, r2
0102 ldmfd sp!, {r4 - r11, pc}
0103 ENDPROC(cpu_suspend_abort)
0104
0105
0106
0107
0108 .align 5
0109 .pushsection .idmap.text,"ax"
0110 ENTRY(cpu_resume_mmu)
0111 ldr r3, =cpu_resume_after_mmu
0112 instr_sync
0113 mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc
0114 mrc p15, 0, r0, c0, c0, 0 @ read id reg
0115 instr_sync
0116 mov r0, r0
0117 mov r0, r0
0118 ret r3 @ jump to virtual address
0119 ENDPROC(cpu_resume_mmu)
0120 .popsection
0121 cpu_resume_after_mmu:
0122 #if defined(CONFIG_VMAP_STACK) && !defined(CONFIG_ARM_LPAE)
0123 @ Before using the vmap'ed stack, we have to switch to swapper_pg_dir
0124 @ as the ID map does not cover the vmalloc region.
0125 mrc p15, 0, ip, c2, c0, 1 @ read TTBR1
0126 mcr p15, 0, ip, c2, c0, 0 @ set TTBR0
0127 instr_sync
0128 #endif
0129 bl cpu_init @ restore the und/abt/irq banked regs
0130 mov r0, #0 @ return zero on success
0131 ldmfd sp!, {r4 - r11, pc}
0132 ENDPROC(cpu_resume_after_mmu)
0133
0134 .text
0135 .align
0136
0137 #ifdef CONFIG_MCPM
0138 .arm
0139 THUMB( .thumb )
0140 ENTRY(cpu_resume_no_hyp)
0141 ARM_BE8(setend be) @ ensure we are in BE mode
0142 b no_hyp
0143 #endif
0144
0145 #ifdef CONFIG_MMU
0146 .arm
0147 ENTRY(cpu_resume_arm)
0148 THUMB( badr r9, 1f ) @ Kernel is entered in ARM.
0149 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
0150 THUMB( .thumb ) @ switch to Thumb now.
0151 THUMB(1: )
0152 #endif
0153
0154 ENTRY(cpu_resume)
0155 ARM_BE8(setend be) @ ensure we are in BE mode
0156 #ifdef CONFIG_ARM_VIRT_EXT
0157 bl __hyp_stub_install_secondary
0158 #endif
0159 safe_svcmode_maskall r1
0160 no_hyp:
0161 mov r1, #0
0162 ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
0163 ALT_UP_B(1f)
0164 adr_l r2, mpidr_hash @ r2 = struct mpidr_hash phys address
0165
0166
0167
0168
0169
0170 ldmia r2, { r3-r6 } @ r3 = mpidr mask (r4,r5,r6) = l[0,1,2] shifts
0171 compute_mpidr_hash r1, r4, r5, r6, r0, r3
0172 1:
0173 ldr_l r0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
0174 ldr r0, [r0, r1, lsl #2]
0175
0176 @ load phys pgd, stack, resume fn
0177 ARM( ldmia r0!, {r1, sp, pc} )
0178 THUMB( ldmia r0!, {r1, r2, r3} )
0179 THUMB( mov sp, r2 )
0180 THUMB( bx r3 )
0181 ENDPROC(cpu_resume)
0182
0183 #ifdef CONFIG_MMU
0184 ENDPROC(cpu_resume_arm)
0185 #endif
0186 #ifdef CONFIG_MCPM
0187 ENDPROC(cpu_resume_no_hyp)
0188 #endif
0189
0190 .data
0191 .align 2
0192 .type sleep_save_sp, #object
0193 ENTRY(sleep_save_sp)
0194 .space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp