![]() |
|
|||
0001 /* SPDX-License-Identifier: GPL-2.0-only */ 0002 /* 0003 * Hibernate low-level support 0004 * 0005 * Copyright (C) 2016 ARM Ltd. 0006 * Author: James Morse <james.morse@arm.com> 0007 */ 0008 #include <linux/linkage.h> 0009 #include <linux/errno.h> 0010 0011 #include <asm/asm-offsets.h> 0012 #include <asm/assembler.h> 0013 #include <asm/cputype.h> 0014 #include <asm/memory.h> 0015 #include <asm/page.h> 0016 #include <asm/virt.h> 0017 0018 /* 0019 * Resume from hibernate 0020 * 0021 * Loads temporary page tables then restores the memory image. 0022 * Finally branches to cpu_resume() to restore the state saved by 0023 * swsusp_arch_suspend(). 0024 * 0025 * Because this code has to be copied to a 'safe' page, it can't call out to 0026 * other functions by PC-relative address. Also remember that it may be 0027 * mid-way through over-writing other functions. For this reason it contains 0028 * code from caches_clean_inval_pou() and uses the copy_page() macro. 0029 * 0030 * This 'safe' page is mapped via ttbr0, and executed from there. This function 0031 * switches to a copy of the linear map in ttbr1, performs the restore, then 0032 * switches ttbr1 to the original kernel's swapper_pg_dir. 0033 * 0034 * All of memory gets written to, including code. We need to clean the kernel 0035 * text to the Point of Coherence (PoC) before secondary cores can be booted. 0036 * Because the kernel modules and executable pages mapped to user space are 0037 * also written as data, we clean all pages we touch to the Point of 0038 * Unification (PoU). 0039 * 0040 * x0: physical address of temporary page tables 0041 * x1: physical address of swapper page tables 0042 * x2: address of cpu_resume 0043 * x3: linear map address of restore_pblist in the current kernel 0044 * x4: physical address of __hyp_stub_vectors, or 0 0045 * x5: physical address of a zero page that remains zero after resume 0046 */ 0047 .pushsection ".hibernate_exit.text", "ax" 0048 SYM_CODE_START(swsusp_arch_suspend_exit) 0049 /* 0050 * We execute from ttbr0, change ttbr1 to our copied linear map tables 0051 * with a break-before-make via the zero page 0052 */ 0053 break_before_make_ttbr_switch x5, x0, x6, x8 0054 0055 mov x21, x1 0056 mov x30, x2 0057 mov x24, x4 0058 mov x25, x5 0059 0060 /* walk the restore_pblist and use copy_page() to over-write memory */ 0061 mov x19, x3 0062 0063 1: ldr x10, [x19, #HIBERN_PBE_ORIG] 0064 mov x0, x10 0065 ldr x1, [x19, #HIBERN_PBE_ADDR] 0066 0067 copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9 0068 0069 add x1, x10, #PAGE_SIZE 0070 /* Clean the copied page to PoU - based on caches_clean_inval_pou() */ 0071 raw_dcache_line_size x2, x3 0072 sub x3, x2, #1 0073 bic x4, x10, x3 0074 2: /* clean D line / unified line */ 0075 alternative_insn "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE 0076 add x4, x4, x2 0077 cmp x4, x1 0078 b.lo 2b 0079 0080 ldr x19, [x19, #HIBERN_PBE_NEXT] 0081 cbnz x19, 1b 0082 dsb ish /* wait for PoU cleaning to finish */ 0083 0084 /* switch to the restored kernels page tables */ 0085 break_before_make_ttbr_switch x25, x21, x6, x8 0086 0087 ic ialluis 0088 dsb ish 0089 isb 0090 0091 cbz x24, 3f /* Do we need to re-initialise EL2? */ 0092 hvc #0 0093 3: ret 0094 SYM_CODE_END(swsusp_arch_suspend_exit) 0095 .popsection
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |