0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/linkage.h>
0014 #include <linux/threads.h>
0015 #include <linux/init.h>
0016 #include <linux/pgtable.h>
0017 #include <asm/segment.h>
0018 #include <asm/page.h>
0019 #include <asm/msr.h>
0020 #include <asm/cache.h>
0021 #include <asm/processor-flags.h>
0022 #include <asm/percpu.h>
0023 #include <asm/nops.h>
0024 #include "../entry/calling.h"
0025 #include <asm/export.h>
0026 #include <asm/nospec-branch.h>
0027 #include <asm/fixmap.h>
0028
0029
0030
0031
0032
0033 #define l4_index(x) (((x) >> 39) & 511)
0034 #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
0035
0036 L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
0037 L4_START_KERNEL = l4_index(__START_KERNEL_map)
0038
0039 L3_START_KERNEL = pud_index(__START_KERNEL_map)
0040
0041 .text
0042 __HEAD
0043 .code64
0044 SYM_CODE_START_NOALIGN(startup_64)
0045 UNWIND_HINT_EMPTY
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065 leaq (__end_init_task - FRAME_SIZE)(%rip), %rsp
0066
0067 leaq _text(%rip), %rdi
0068
0069
0070
0071
0072
0073
0074
0075 movl $MSR_GS_BASE, %ecx
0076 movq initial_gs(%rip), %rax
0077 movq $_text, %rdx
0078 subq %rdx, %rax
0079 addq %rdi, %rax
0080 movq %rax, %rdx
0081 shrq $32, %rdx
0082 wrmsr
0083
0084 pushq %rsi
0085 call startup_64_setup_env
0086 popq %rsi
0087
0088 #ifdef CONFIG_AMD_MEM_ENCRYPT
0089
0090
0091
0092
0093
0094
0095 movq %rsi, %rdi
0096 pushq %rsi
0097 call sme_enable
0098 popq %rsi
0099 #endif
0100
0101
0102 pushq $__KERNEL_CS
0103 leaq .Lon_kernel_cs(%rip), %rax
0104 pushq %rax
0105 lretq
0106
0107 .Lon_kernel_cs:
0108 UNWIND_HINT_EMPTY
0109
0110
0111 call verify_cpu
0112
0113
0114
0115
0116
0117
0118
0119 leaq _text(%rip), %rdi
0120 pushq %rsi
0121 call __startup_64
0122 popq %rsi
0123
0124
0125 addq $(early_top_pgt - __START_KERNEL_map), %rax
0126 jmp 1f
0127 SYM_CODE_END(startup_64)
0128
0129 SYM_CODE_START(secondary_startup_64)
0130 UNWIND_HINT_EMPTY
0131 ANNOTATE_NOENDBR
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147 call verify_cpu
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158 SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
0159 UNWIND_HINT_EMPTY
0160 ANNOTATE_NOENDBR
0161
0162
0163
0164
0165
0166 #ifdef CONFIG_AMD_MEM_ENCRYPT
0167 movq sme_me_mask, %rax
0168 #else
0169 xorq %rax, %rax
0170 #endif
0171
0172
0173 addq $(init_top_pgt - __START_KERNEL_map), %rax
0174 1:
0175
0176 #ifdef CONFIG_X86_MCE
0177
0178
0179
0180
0181
0182
0183
0184 movq %cr4, %rcx
0185 andl $X86_CR4_MCE, %ecx
0186 #else
0187 movl $0, %ecx
0188 #endif
0189
0190
0191 orl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
0192 #ifdef CONFIG_X86_5LEVEL
0193 testl $1, __pgtable_l5_enabled(%rip)
0194 jz 1f
0195 orl $X86_CR4_LA57, %ecx
0196 1:
0197 #endif
0198 movq %rcx, %cr4
0199
0200
0201 addq phys_base(%rip), %rax
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211 pushq %rsi
0212 movq %rax, %rdi
0213 call sev_verify_cbit
0214 popq %rsi
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224 movq %rax, %cr3
0225
0226
0227
0228
0229
0230 movq %cr4, %rcx
0231 movq %rcx, %rax
0232 xorq $X86_CR4_PGE, %rcx
0233 movq %rcx, %cr4
0234 movq %rax, %cr4
0235
0236
0237 movq $1f, %rax
0238 ANNOTATE_RETPOLINE_SAFE
0239 jmp *%rax
0240 1:
0241 UNWIND_HINT_EMPTY
0242 ANNOTATE_NOENDBR // above
0243
0244
0245
0246
0247
0248
0249
0250 lgdt early_gdt_descr(%rip)
0251
0252
0253 xorl %eax,%eax
0254 movl %eax,%ds
0255 movl %eax,%ss
0256 movl %eax,%es
0257
0258
0259
0260
0261
0262
0263 movl %eax,%fs
0264 movl %eax,%gs
0265
0266
0267
0268
0269
0270
0271
0272
0273 movl $MSR_GS_BASE,%ecx
0274 movl initial_gs(%rip),%eax
0275 movl initial_gs+4(%rip),%edx
0276 wrmsr
0277
0278
0279
0280
0281
0282 movq initial_stack(%rip), %rsp
0283
0284
0285 pushq %rsi
0286 call early_setup_idt
0287 popq %rsi
0288
0289
0290 movl $0x80000001, %eax
0291 cpuid
0292 movl %edx,%edi
0293
0294
0295 movl $MSR_EFER, %ecx
0296 rdmsr
0297
0298
0299
0300
0301 movl %eax, %edx
0302 btsl $_EFER_SCE, %eax
0303 btl $20,%edi
0304 jnc 1f
0305 btsl $_EFER_NX, %eax
0306 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
0307
0308
0309 1: cmpl %edx, %eax
0310 je 1f
0311 xor %edx, %edx
0312 wrmsr
0313 1:
0314
0315 movl $CR0_STATE, %eax
0316
0317 movq %rax, %cr0
0318
0319
0320 pushq $0
0321 popfq
0322
0323
0324
0325 movq %rsi, %rdi
0326
0327 .Ljump_to_C_code:
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353 pushq $.Lafter_lret # put return address on stack for unwinder
0354 xorl %ebp, %ebp # clear frame pointer
0355 movq initial_code(%rip), %rax
0356 pushq $__KERNEL_CS # set correct cs
0357 pushq %rax # target address in negative space
0358 lretq
0359 .Lafter_lret:
0360 ANNOTATE_NOENDBR
0361 SYM_CODE_END(secondary_startup_64)
0362
0363 #include "verify_cpu.S"
0364 #include "sev_verify_cbit.S"
0365
0366 #ifdef CONFIG_HOTPLUG_CPU
0367
0368
0369
0370
0371
0372 SYM_CODE_START(start_cpu0)
0373 UNWIND_HINT_EMPTY
0374 movq initial_stack(%rip), %rsp
0375 jmp .Ljump_to_C_code
0376 SYM_CODE_END(start_cpu0)
0377 #endif
0378
0379 #ifdef CONFIG_AMD_MEM_ENCRYPT
0380
0381
0382
0383
0384
0385
0386
0387
0388 SYM_CODE_START_NOALIGN(vc_boot_ghcb)
0389 UNWIND_HINT_IRET_REGS offset=8
0390 ENDBR
0391
0392 ANNOTATE_UNRET_END
0393
0394
0395 PUSH_AND_CLEAR_REGS
0396
0397
0398 movq %rsp, %rdi
0399 movq ORIG_RAX(%rsp), %rsi
0400 movq initial_vc_handler(%rip), %rax
0401 ANNOTATE_RETPOLINE_SAFE
0402 call *%rax
0403
0404
0405 POP_REGS
0406
0407
0408 addq $8, %rsp
0409
0410 iretq
0411 SYM_CODE_END(vc_boot_ghcb)
0412 #endif
0413
0414
0415 __REFDATA
0416 .balign 8
0417 SYM_DATA(initial_code, .quad x86_64_start_kernel)
0418 SYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data))
0419 #ifdef CONFIG_AMD_MEM_ENCRYPT
0420 SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb)
0421 #endif
0422
0423
0424
0425
0426
0427 SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - FRAME_SIZE)
0428 __FINITDATA
0429
0430 __INIT
0431 SYM_CODE_START(early_idt_handler_array)
0432 i = 0
0433 .rept NUM_EXCEPTION_VECTORS
0434 .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
0435 UNWIND_HINT_IRET_REGS
0436 ENDBR
0437 pushq $0 # Dummy error code, to make stack frame uniform
0438 .else
0439 UNWIND_HINT_IRET_REGS offset=8
0440 ENDBR
0441 .endif
0442 pushq $i # 72(%rsp) Vector number
0443 jmp early_idt_handler_common
0444 UNWIND_HINT_IRET_REGS
0445 i = i + 1
0446 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
0447 .endr
0448 SYM_CODE_END(early_idt_handler_array)
0449 ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS]
0450
0451 SYM_CODE_START_LOCAL(early_idt_handler_common)
0452 UNWIND_HINT_IRET_REGS offset=16
0453 ANNOTATE_UNRET_END
0454
0455
0456
0457
0458 cld
0459
0460 incl early_recursion_flag(%rip)
0461
0462
0463 pushq %rsi
0464 movq 8(%rsp), %rsi
0465 movq %rdi, 8(%rsp)
0466 pushq %rdx
0467 pushq %rcx
0468 pushq %rax
0469 pushq %r8
0470 pushq %r9
0471 pushq %r10
0472 pushq %r11
0473 pushq %rbx
0474 pushq %rbp
0475 pushq %r12
0476 pushq %r13
0477 pushq %r14
0478 pushq %r15
0479 UNWIND_HINT_REGS
0480
0481 movq %rsp,%rdi
0482 call do_early_exception
0483
0484 decl early_recursion_flag(%rip)
0485 jmp restore_regs_and_return_to_kernel
0486 SYM_CODE_END(early_idt_handler_common)
0487
0488 #ifdef CONFIG_AMD_MEM_ENCRYPT
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499 SYM_CODE_START_NOALIGN(vc_no_ghcb)
0500 UNWIND_HINT_IRET_REGS offset=8
0501 ENDBR
0502
0503 ANNOTATE_UNRET_END
0504
0505
0506 PUSH_AND_CLEAR_REGS
0507
0508
0509 movq %rsp, %rdi
0510 movq ORIG_RAX(%rsp), %rsi
0511 call do_vc_no_ghcb
0512
0513
0514 POP_REGS
0515
0516
0517 addq $8, %rsp
0518
0519
0520 iretq
0521 SYM_CODE_END(vc_no_ghcb)
0522 #endif
0523
0524 #define SYM_DATA_START_PAGE_ALIGNED(name) \
0525 SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
0526
0527 #ifdef CONFIG_PAGE_TABLE_ISOLATION
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538 #define PTI_USER_PGD_FILL 512
0539
0540 #define SYM_DATA_START_PTI_ALIGNED(name) \
0541 SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE)
0542 #else
0543 #define SYM_DATA_START_PTI_ALIGNED(name) \
0544 SYM_DATA_START_PAGE_ALIGNED(name)
0545 #define PTI_USER_PGD_FILL 0
0546 #endif
0547
0548
0549 #define PMDS(START, PERM, COUNT) \
0550 i = 0 ; \
0551 .rept (COUNT) ; \
0552 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
0553 i = i + 1 ; \
0554 .endr
0555
0556 __INITDATA
0557 .balign 4
0558
0559 SYM_DATA_START_PTI_ALIGNED(early_top_pgt)
0560 .fill 512,8,0
0561 .fill PTI_USER_PGD_FILL,8,0
0562 SYM_DATA_END(early_top_pgt)
0563
0564 SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)
0565 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
0566 SYM_DATA_END(early_dynamic_pgts)
0567
0568 SYM_DATA(early_recursion_flag, .long 0)
0569
0570 .data
0571
0572 #if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
0573 SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
0574 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
0575 .org init_top_pgt + L4_PAGE_OFFSET*8, 0
0576 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
0577 .org init_top_pgt + L4_START_KERNEL*8, 0
0578
0579 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
0580 .fill PTI_USER_PGD_FILL,8,0
0581 SYM_DATA_END(init_top_pgt)
0582
0583 SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt)
0584 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
0585 .fill 511, 8, 0
0586 SYM_DATA_END(level3_ident_pgt)
0587 SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt)
0588
0589
0590
0591
0592
0593
0594
0595
0596 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
0597 SYM_DATA_END(level2_ident_pgt)
0598 #else
0599 SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
0600 .fill 512,8,0
0601 .fill PTI_USER_PGD_FILL,8,0
0602 SYM_DATA_END(init_top_pgt)
0603 #endif
0604
0605 #ifdef CONFIG_X86_5LEVEL
0606 SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
0607 .fill 511,8,0
0608 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
0609 SYM_DATA_END(level4_kernel_pgt)
0610 #endif
0611
0612 SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
0613 .fill L3_START_KERNEL,8,0
0614
0615 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
0616 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
0617 SYM_DATA_END(level3_kernel_pgt)
0618
0619 SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
0634 SYM_DATA_END(level2_kernel_pgt)
0635
0636 SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
0637 .fill (512 - 4 - FIXMAP_PMD_NUM),8,0
0638 pgtno = 0
0639 .rept (FIXMAP_PMD_NUM)
0640 .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
0641 + _PAGE_TABLE_NOENC;
0642 pgtno = pgtno + 1
0643 .endr
0644
0645 .fill 4,8,0
0646 SYM_DATA_END(level2_fixmap_pgt)
0647
0648 SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
0649 .rept (FIXMAP_PMD_NUM)
0650 .fill 512,8,0
0651 .endr
0652 SYM_DATA_END(level1_fixmap_pgt)
0653
0654 #undef PMDS
0655
0656 .data
0657 .align 16
0658
0659 SYM_DATA(early_gdt_descr, .word GDT_ENTRIES*8-1)
0660 SYM_DATA_LOCAL(early_gdt_descr_base, .quad INIT_PER_CPU_VAR(gdt_page))
0661
0662 .align 16
0663
0664 SYM_DATA(phys_base, .quad 0x0)
0665 EXPORT_SYMBOL(phys_base)
0666
0667 #include "../../x86/xen/xen-head.S"
0668
0669 __PAGE_ALIGNED_BSS
0670 SYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
0671 .skip PAGE_SIZE
0672 SYM_DATA_END(empty_zero_page)
0673 EXPORT_SYMBOL(empty_zero_page)
0674