0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/suspend.h>
0011 #include <linux/export.h>
0012 #include <linux/smp.h>
0013 #include <linux/perf_event.h>
0014 #include <linux/tboot.h>
0015 #include <linux/dmi.h>
0016 #include <linux/pgtable.h>
0017
0018 #include <asm/proto.h>
0019 #include <asm/mtrr.h>
0020 #include <asm/page.h>
0021 #include <asm/mce.h>
0022 #include <asm/suspend.h>
0023 #include <asm/fpu/api.h>
0024 #include <asm/debugreg.h>
0025 #include <asm/cpu.h>
0026 #include <asm/mmu_context.h>
0027 #include <asm/cpu_device_id.h>
0028 #include <asm/microcode.h>
0029
0030 #ifdef CONFIG_X86_32
0031 __visible unsigned long saved_context_ebx;
0032 __visible unsigned long saved_context_esp, saved_context_ebp;
0033 __visible unsigned long saved_context_esi, saved_context_edi;
0034 __visible unsigned long saved_context_eflags;
0035 #endif
0036 struct saved_context saved_context;
0037
0038 static void msr_save_context(struct saved_context *ctxt)
0039 {
0040 struct saved_msr *msr = ctxt->saved_msrs.array;
0041 struct saved_msr *end = msr + ctxt->saved_msrs.num;
0042
0043 while (msr < end) {
0044 if (msr->valid)
0045 rdmsrl(msr->info.msr_no, msr->info.reg.q);
0046 msr++;
0047 }
0048 }
0049
0050 static void msr_restore_context(struct saved_context *ctxt)
0051 {
0052 struct saved_msr *msr = ctxt->saved_msrs.array;
0053 struct saved_msr *end = msr + ctxt->saved_msrs.num;
0054
0055 while (msr < end) {
0056 if (msr->valid)
0057 wrmsrl(msr->info.msr_no, msr->info.reg.q);
0058 msr++;
0059 }
0060 }
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078 static void __save_processor_state(struct saved_context *ctxt)
0079 {
0080 #ifdef CONFIG_X86_32
0081 mtrr_save_fixed_ranges(NULL);
0082 #endif
0083 kernel_fpu_begin();
0084
0085
0086
0087
0088 store_idt(&ctxt->idt);
0089
0090
0091
0092
0093
0094
0095
0096 ctxt->gdt_desc.size = GDT_SIZE - 1;
0097 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id());
0098
0099 store_tr(ctxt->tr);
0100
0101
0102
0103
0104
0105 savesegment(gs, ctxt->gs);
0106 #ifdef CONFIG_X86_64
0107 savesegment(fs, ctxt->fs);
0108 savesegment(ds, ctxt->ds);
0109 savesegment(es, ctxt->es);
0110
0111 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
0112 rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
0113 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
0114 mtrr_save_fixed_ranges(NULL);
0115
0116 rdmsrl(MSR_EFER, ctxt->efer);
0117 #endif
0118
0119
0120
0121
0122 ctxt->cr0 = read_cr0();
0123 ctxt->cr2 = read_cr2();
0124 ctxt->cr3 = __read_cr3();
0125 ctxt->cr4 = __read_cr4();
0126 ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
0127 &ctxt->misc_enable);
0128 msr_save_context(ctxt);
0129 }
0130
0131
0132 void save_processor_state(void)
0133 {
0134 __save_processor_state(&saved_context);
0135 x86_platform.save_sched_clock_state();
0136 }
0137 #ifdef CONFIG_X86_32
0138 EXPORT_SYMBOL(save_processor_state);
0139 #endif
0140
0141 static void do_fpu_end(void)
0142 {
0143
0144
0145
0146 kernel_fpu_end();
0147 }
0148
0149 static void fix_processor_context(void)
0150 {
0151 int cpu = smp_processor_id();
0152 #ifdef CONFIG_X86_64
0153 struct desc_struct *desc = get_cpu_gdt_rw(cpu);
0154 tss_desc tss;
0155 #endif
0156
0157
0158
0159
0160
0161
0162
0163
0164 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
0165
0166 #ifdef CONFIG_X86_64
0167 memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
0168 tss.type = 0x9;
0169 write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
0170
0171 syscall_init();
0172 #else
0173 if (boot_cpu_has(X86_FEATURE_SEP))
0174 enable_sep_cpu();
0175 #endif
0176 load_TR_desc();
0177 load_mm_ldt(current->active_mm);
0178 initialize_tlbstate_and_flush();
0179
0180 fpu__resume_cpu();
0181
0182
0183 load_fixmap_gdt(cpu);
0184 }
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194 static void notrace __restore_processor_state(struct saved_context *ctxt)
0195 {
0196 struct cpuinfo_x86 *c;
0197
0198 if (ctxt->misc_enable_saved)
0199 wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
0200
0201
0202
0203
0204 #ifdef CONFIG_X86_32
0205 if (ctxt->cr4)
0206 __write_cr4(ctxt->cr4);
0207 #else
0208
0209 wrmsrl(MSR_EFER, ctxt->efer);
0210 __write_cr4(ctxt->cr4);
0211 #endif
0212 write_cr3(ctxt->cr3);
0213 write_cr2(ctxt->cr2);
0214 write_cr0(ctxt->cr0);
0215
0216
0217 load_idt(&ctxt->idt);
0218
0219
0220
0221
0222
0223 loadsegment(ss, __KERNEL_DS);
0224 loadsegment(ds, __USER_DS);
0225 loadsegment(es, __USER_DS);
0226
0227
0228
0229
0230
0231 #ifdef CONFIG_X86_64
0232 wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
0233 #else
0234 loadsegment(fs, __KERNEL_PERCPU);
0235 #endif
0236
0237
0238 fix_processor_context();
0239
0240
0241
0242
0243
0244 #ifdef CONFIG_X86_64
0245 loadsegment(ds, ctxt->es);
0246 loadsegment(es, ctxt->es);
0247 loadsegment(fs, ctxt->fs);
0248 load_gs_index(ctxt->gs);
0249
0250
0251
0252
0253
0254
0255 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
0256 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
0257 #else
0258 loadsegment(gs, ctxt->gs);
0259 #endif
0260
0261 do_fpu_end();
0262 tsc_verify_tsc_adjust(true);
0263 x86_platform.restore_sched_clock_state();
0264 mtrr_bp_restore();
0265 perf_restore_debug_store();
0266
0267 c = &cpu_data(smp_processor_id());
0268 if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
0269 init_ia32_feat_ctl(c);
0270
0271 microcode_bsp_resume();
0272
0273
0274
0275
0276
0277 msr_restore_context(ctxt);
0278 }
0279
0280
0281 void notrace restore_processor_state(void)
0282 {
0283 __restore_processor_state(&saved_context);
0284 }
0285 #ifdef CONFIG_X86_32
0286 EXPORT_SYMBOL(restore_processor_state);
0287 #endif
0288
0289 #if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
0290 static void resume_play_dead(void)
0291 {
0292 play_dead_common();
0293 tboot_shutdown(TB_SHUTDOWN_WFS);
0294 hlt_play_dead();
0295 }
0296
0297 int hibernate_resume_nonboot_cpu_disable(void)
0298 {
0299 void (*play_dead)(void) = smp_ops.play_dead;
0300 int ret;
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318 ret = cpuhp_smt_enable();
0319 if (ret)
0320 return ret;
0321 smp_ops.play_dead = resume_play_dead;
0322 ret = freeze_secondary_cpus(0);
0323 smp_ops.play_dead = play_dead;
0324 return ret;
0325 }
0326 #endif
0327
0328
0329
0330
0331
0332
0333 static int bsp_check(void)
0334 {
0335 if (cpumask_first(cpu_online_mask) != 0) {
0336 pr_warn("CPU0 is offline.\n");
0337 return -ENODEV;
0338 }
0339
0340 return 0;
0341 }
0342
0343 static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
0344 void *ptr)
0345 {
0346 int ret = 0;
0347
0348 switch (action) {
0349 case PM_SUSPEND_PREPARE:
0350 case PM_HIBERNATION_PREPARE:
0351 ret = bsp_check();
0352 break;
0353 #ifdef CONFIG_DEBUG_HOTPLUG_CPU0
0354 case PM_RESTORE_PREPARE:
0355
0356
0357
0358
0359
0360 if (!cpu_online(0))
0361 _debug_hotplug_cpu(0, 1);
0362 break;
0363 case PM_POST_RESTORE:
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387 _debug_hotplug_cpu(0, 0);
0388 break;
0389 #endif
0390 default:
0391 break;
0392 }
0393 return notifier_from_errno(ret);
0394 }
0395
0396 static int __init bsp_pm_check_init(void)
0397 {
0398
0399
0400
0401
0402
0403 pm_notifier(bsp_pm_callback, -INT_MAX);
0404 return 0;
0405 }
0406
0407 core_initcall(bsp_pm_check_init);
0408
0409 static int msr_build_context(const u32 *msr_id, const int num)
0410 {
0411 struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
0412 struct saved_msr *msr_array;
0413 int total_num;
0414 int i, j;
0415
0416 total_num = saved_msrs->num + num;
0417
0418 msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
0419 if (!msr_array) {
0420 pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
0421 return -ENOMEM;
0422 }
0423
0424 if (saved_msrs->array) {
0425
0426
0427
0428
0429 memcpy(msr_array, saved_msrs->array,
0430 sizeof(struct saved_msr) * saved_msrs->num);
0431
0432 kfree(saved_msrs->array);
0433 }
0434
0435 for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
0436 u64 dummy;
0437
0438 msr_array[i].info.msr_no = msr_id[j];
0439 msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy);
0440 msr_array[i].info.reg.q = 0;
0441 }
0442 saved_msrs->num = total_num;
0443 saved_msrs->array = msr_array;
0444
0445 return 0;
0446 }
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458 static int msr_initialize_bdw(const struct dmi_system_id *d)
0459 {
0460
0461 u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
0462
0463 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
0464 return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
0465 }
0466
0467 static const struct dmi_system_id msr_save_dmi_table[] = {
0468 {
0469 .callback = msr_initialize_bdw,
0470 .ident = "BROADWELL BDX_EP",
0471 .matches = {
0472 DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"),
0473 DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
0474 },
0475 },
0476 {}
0477 };
0478
0479 static int msr_save_cpuid_features(const struct x86_cpu_id *c)
0480 {
0481 u32 cpuid_msr_id[] = {
0482 MSR_AMD64_CPUID_FN_1,
0483 };
0484
0485 pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
0486 c->family);
0487
0488 return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
0489 }
0490
0491 static const struct x86_cpu_id msr_save_cpu_table[] = {
0492 X86_MATCH_VENDOR_FAM(AMD, 0x15, &msr_save_cpuid_features),
0493 X86_MATCH_VENDOR_FAM(AMD, 0x16, &msr_save_cpuid_features),
0494 {}
0495 };
0496
0497 typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
0498 static int pm_cpu_check(const struct x86_cpu_id *c)
0499 {
0500 const struct x86_cpu_id *m;
0501 int ret = 0;
0502
0503 m = x86_match_cpu(msr_save_cpu_table);
0504 if (m) {
0505 pm_cpu_match_t fn;
0506
0507 fn = (pm_cpu_match_t)m->driver_data;
0508 ret = fn(m);
0509 }
0510
0511 return ret;
0512 }
0513
0514 static void pm_save_spec_msr(void)
0515 {
0516 u32 spec_msr_id[] = {
0517 MSR_IA32_SPEC_CTRL,
0518 MSR_IA32_TSX_CTRL,
0519 MSR_TSX_FORCE_ABORT,
0520 MSR_IA32_MCU_OPT_CTRL,
0521 MSR_AMD64_LS_CFG,
0522 };
0523
0524 msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
0525 }
0526
0527 static int pm_check_save_msr(void)
0528 {
0529 dmi_check_system(msr_save_dmi_table);
0530 pm_cpu_check(msr_save_cpu_table);
0531 pm_save_spec_msr();
0532
0533 return 0;
0534 }
0535
0536 device_initcall(pm_check_save_msr);