0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #define pr_fmt(x) "hibernate: " x
0013 #include <linux/cpu.h>
0014 #include <linux/kvm_host.h>
0015 #include <linux/pm.h>
0016 #include <linux/sched.h>
0017 #include <linux/suspend.h>
0018 #include <linux/utsname.h>
0019
0020 #include <asm/barrier.h>
0021 #include <asm/cacheflush.h>
0022 #include <asm/cputype.h>
0023 #include <asm/daifflags.h>
0024 #include <asm/irqflags.h>
0025 #include <asm/kexec.h>
0026 #include <asm/memory.h>
0027 #include <asm/mmu_context.h>
0028 #include <asm/mte.h>
0029 #include <asm/sections.h>
0030 #include <asm/smp.h>
0031 #include <asm/smp_plat.h>
0032 #include <asm/suspend.h>
0033 #include <asm/sysreg.h>
0034 #include <asm/trans_pgd.h>
0035 #include <asm/virt.h>
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045 extern int in_suspend;
0046
0047
0048 #define el2_reset_needed() (is_hyp_nvhe())
0049
0050
0051 extern char __hyp_stub_vectors[];
0052
0053
0054
0055
0056
0057 static int sleep_cpu = -EINVAL;
0058
0059
0060
0061
0062
0063
0064 struct arch_hibernate_hdr_invariants {
0065 char uts_version[__NEW_UTS_LEN + 1];
0066 };
0067
0068
0069 static struct arch_hibernate_hdr {
0070 struct arch_hibernate_hdr_invariants invariants;
0071
0072
0073 phys_addr_t ttbr1_el1;
0074 void (*reenter_kernel)(void);
0075
0076
0077
0078
0079
0080 phys_addr_t __hyp_stub_vectors;
0081
0082 u64 sleep_cpu_mpidr;
0083 } resume_hdr;
0084
0085 static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
0086 {
0087 memset(i, 0, sizeof(*i));
0088 memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
0089 }
0090
0091 int pfn_is_nosave(unsigned long pfn)
0092 {
0093 unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
0094 unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
0095
0096 return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) ||
0097 crash_is_nosave(pfn);
0098 }
0099
0100 void notrace save_processor_state(void)
0101 {
0102 WARN_ON(num_online_cpus() != 1);
0103 }
0104
0105 void notrace restore_processor_state(void)
0106 {
0107 }
0108
0109 int arch_hibernation_header_save(void *addr, unsigned int max_size)
0110 {
0111 struct arch_hibernate_hdr *hdr = addr;
0112
0113 if (max_size < sizeof(*hdr))
0114 return -EOVERFLOW;
0115
0116 arch_hdr_invariants(&hdr->invariants);
0117 hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir);
0118 hdr->reenter_kernel = _cpu_resume;
0119
0120
0121 if (el2_reset_needed())
0122 hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
0123 else
0124 hdr->__hyp_stub_vectors = 0;
0125
0126
0127 if (sleep_cpu < 0) {
0128 pr_err("Failing to hibernate on an unknown CPU.\n");
0129 return -ENODEV;
0130 }
0131 hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu);
0132 pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
0133 hdr->sleep_cpu_mpidr);
0134
0135 return 0;
0136 }
0137 EXPORT_SYMBOL(arch_hibernation_header_save);
0138
0139 int arch_hibernation_header_restore(void *addr)
0140 {
0141 int ret;
0142 struct arch_hibernate_hdr_invariants invariants;
0143 struct arch_hibernate_hdr *hdr = addr;
0144
0145 arch_hdr_invariants(&invariants);
0146 if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
0147 pr_crit("Hibernate image not generated by this kernel!\n");
0148 return -EINVAL;
0149 }
0150
0151 sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr);
0152 pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
0153 hdr->sleep_cpu_mpidr);
0154 if (sleep_cpu < 0) {
0155 pr_crit("Hibernated on a CPU not known to this kernel!\n");
0156 sleep_cpu = -EINVAL;
0157 return -EINVAL;
0158 }
0159
0160 ret = bringup_hibernate_cpu(sleep_cpu);
0161 if (ret) {
0162 sleep_cpu = -EINVAL;
0163 return ret;
0164 }
0165
0166 resume_hdr = *hdr;
0167
0168 return 0;
0169 }
0170 EXPORT_SYMBOL(arch_hibernation_header_restore);
0171
0172 static void *hibernate_page_alloc(void *arg)
0173 {
0174 return (void *)get_safe_page((__force gfp_t)(unsigned long)arg);
0175 }
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189 static int create_safe_exec_page(void *src_start, size_t length,
0190 phys_addr_t *phys_dst_addr)
0191 {
0192 struct trans_pgd_info trans_info = {
0193 .trans_alloc_page = hibernate_page_alloc,
0194 .trans_alloc_arg = (__force void *)GFP_ATOMIC,
0195 };
0196
0197 void *page = (void *)get_safe_page(GFP_ATOMIC);
0198 phys_addr_t trans_ttbr0;
0199 unsigned long t0sz;
0200 int rc;
0201
0202 if (!page)
0203 return -ENOMEM;
0204
0205 memcpy(page, src_start, length);
0206 caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length);
0207 rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page);
0208 if (rc)
0209 return rc;
0210
0211 cpu_install_ttbr0(trans_ttbr0, t0sz);
0212 *phys_dst_addr = virt_to_phys(page);
0213
0214 return 0;
0215 }
0216
0217 #ifdef CONFIG_ARM64_MTE
0218
0219 static DEFINE_XARRAY(mte_pages);
0220
0221 static int save_tags(struct page *page, unsigned long pfn)
0222 {
0223 void *tag_storage, *ret;
0224
0225 tag_storage = mte_allocate_tag_storage();
0226 if (!tag_storage)
0227 return -ENOMEM;
0228
0229 mte_save_page_tags(page_address(page), tag_storage);
0230
0231 ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL);
0232 if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
0233 mte_free_tag_storage(tag_storage);
0234 return xa_err(ret);
0235 } else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) {
0236 mte_free_tag_storage(ret);
0237 }
0238
0239 return 0;
0240 }
0241
0242 static void swsusp_mte_free_storage(void)
0243 {
0244 XA_STATE(xa_state, &mte_pages, 0);
0245 void *tags;
0246
0247 xa_lock(&mte_pages);
0248 xas_for_each(&xa_state, tags, ULONG_MAX) {
0249 mte_free_tag_storage(tags);
0250 }
0251 xa_unlock(&mte_pages);
0252
0253 xa_destroy(&mte_pages);
0254 }
0255
0256 static int swsusp_mte_save_tags(void)
0257 {
0258 struct zone *zone;
0259 unsigned long pfn, max_zone_pfn;
0260 int ret = 0;
0261 int n = 0;
0262
0263 if (!system_supports_mte())
0264 return 0;
0265
0266 for_each_populated_zone(zone) {
0267 max_zone_pfn = zone_end_pfn(zone);
0268 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
0269 struct page *page = pfn_to_online_page(pfn);
0270
0271 if (!page)
0272 continue;
0273
0274 if (!test_bit(PG_mte_tagged, &page->flags))
0275 continue;
0276
0277 ret = save_tags(page, pfn);
0278 if (ret) {
0279 swsusp_mte_free_storage();
0280 goto out;
0281 }
0282
0283 n++;
0284 }
0285 }
0286 pr_info("Saved %d MTE pages\n", n);
0287
0288 out:
0289 return ret;
0290 }
0291
0292 static void swsusp_mte_restore_tags(void)
0293 {
0294 XA_STATE(xa_state, &mte_pages, 0);
0295 int n = 0;
0296 void *tags;
0297
0298 xa_lock(&mte_pages);
0299 xas_for_each(&xa_state, tags, ULONG_MAX) {
0300 unsigned long pfn = xa_state.xa_index;
0301 struct page *page = pfn_to_online_page(pfn);
0302
0303 mte_restore_page_tags(page_address(page), tags);
0304
0305 mte_free_tag_storage(tags);
0306 n++;
0307 }
0308 xa_unlock(&mte_pages);
0309
0310 pr_info("Restored %d MTE pages\n", n);
0311
0312 xa_destroy(&mte_pages);
0313 }
0314
0315 #else
0316
0317 static int swsusp_mte_save_tags(void)
0318 {
0319 return 0;
0320 }
0321
0322 static void swsusp_mte_restore_tags(void)
0323 {
0324 }
0325
0326 #endif
0327
0328 int swsusp_arch_suspend(void)
0329 {
0330 int ret = 0;
0331 unsigned long flags;
0332 struct sleep_stack_data state;
0333
0334 if (cpus_are_stuck_in_kernel()) {
0335 pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
0336 return -EBUSY;
0337 }
0338
0339 flags = local_daif_save();
0340
0341 if (__cpu_suspend_enter(&state)) {
0342
0343 crash_prepare_suspend();
0344
0345 ret = swsusp_mte_save_tags();
0346 if (ret)
0347 return ret;
0348
0349 sleep_cpu = smp_processor_id();
0350 ret = swsusp_save();
0351 } else {
0352
0353 dcache_clean_inval_poc((unsigned long)__mmuoff_data_start,
0354 (unsigned long)__mmuoff_data_end);
0355 dcache_clean_inval_poc((unsigned long)__idmap_text_start,
0356 (unsigned long)__idmap_text_end);
0357
0358
0359 if (el2_reset_needed()) {
0360 dcache_clean_inval_poc(
0361 (unsigned long)__hyp_idmap_text_start,
0362 (unsigned long)__hyp_idmap_text_end);
0363 dcache_clean_inval_poc((unsigned long)__hyp_text_start,
0364 (unsigned long)__hyp_text_end);
0365 }
0366
0367 swsusp_mte_restore_tags();
0368
0369
0370 crash_post_resume();
0371
0372
0373
0374
0375
0376 in_suspend = 0;
0377
0378 sleep_cpu = -EINVAL;
0379 __cpu_suspend_exit();
0380
0381
0382
0383
0384
0385
0386 spectre_v4_enable_mitigation(NULL);
0387 }
0388
0389 local_daif_restore(flags);
0390
0391 return ret;
0392 }
0393
0394
0395
0396
0397
0398
0399
0400 int swsusp_arch_resume(void)
0401 {
0402 int rc;
0403 void *zero_page;
0404 size_t exit_size;
0405 pgd_t *tmp_pg_dir;
0406 phys_addr_t el2_vectors;
0407 void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
0408 void *, phys_addr_t, phys_addr_t);
0409 struct trans_pgd_info trans_info = {
0410 .trans_alloc_page = hibernate_page_alloc,
0411 .trans_alloc_arg = (void *)GFP_ATOMIC,
0412 };
0413
0414
0415
0416
0417
0418
0419 rc = trans_pgd_create_copy(&trans_info, &tmp_pg_dir, PAGE_OFFSET,
0420 PAGE_END);
0421 if (rc)
0422 return rc;
0423
0424
0425
0426
0427
0428 zero_page = (void *)get_safe_page(GFP_ATOMIC);
0429 if (!zero_page) {
0430 pr_err("Failed to allocate zero page.\n");
0431 return -ENOMEM;
0432 }
0433
0434 if (el2_reset_needed()) {
0435 rc = trans_pgd_copy_el2_vectors(&trans_info, &el2_vectors);
0436 if (rc) {
0437 pr_err("Failed to setup el2 vectors\n");
0438 return rc;
0439 }
0440 }
0441
0442 exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
0443
0444
0445
0446
0447 rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
0448 (phys_addr_t *)&hibernate_exit);
0449 if (rc) {
0450 pr_err("Failed to create safe executable page for hibernate_exit code.\n");
0451 return rc;
0452 }
0453
0454
0455
0456
0457
0458
0459
0460 if (el2_reset_needed())
0461 __hyp_set_vectors(el2_vectors);
0462
0463 hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
0464 resume_hdr.reenter_kernel, restore_pblist,
0465 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
0466
0467 return 0;
0468 }
0469
0470 int hibernate_resume_nonboot_cpu_disable(void)
0471 {
0472 if (sleep_cpu < 0) {
0473 pr_err("Failing to resume from hibernate on an unknown CPU.\n");
0474 return -ENODEV;
0475 }
0476
0477 return freeze_secondary_cpus(sleep_cpu);
0478 }