Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Based on arch/arm/mm/proc.S
0004  *
0005  * Copyright (C) 2001 Deep Blue Solutions Ltd.
0006  * Copyright (C) 2012 ARM Ltd.
0007  * Author: Catalin Marinas <catalin.marinas@arm.com>
0008  */
0009 
0010 #include <linux/init.h>
0011 #include <linux/linkage.h>
0012 #include <linux/pgtable.h>
0013 #include <asm/assembler.h>
0014 #include <asm/asm-offsets.h>
0015 #include <asm/asm_pointer_auth.h>
0016 #include <asm/hwcap.h>
0017 #include <asm/kernel-pgtable.h>
0018 #include <asm/pgtable-hwdef.h>
0019 #include <asm/cpufeature.h>
0020 #include <asm/alternative.h>
0021 #include <asm/smp.h>
0022 #include <asm/sysreg.h>
0023 
0024 #ifdef CONFIG_ARM64_64K_PAGES
0025 #define TCR_TG_FLAGS    TCR_TG0_64K | TCR_TG1_64K
0026 #elif defined(CONFIG_ARM64_16K_PAGES)
0027 #define TCR_TG_FLAGS    TCR_TG0_16K | TCR_TG1_16K
0028 #else /* CONFIG_ARM64_4K_PAGES */
0029 #define TCR_TG_FLAGS    TCR_TG0_4K | TCR_TG1_4K
0030 #endif
0031 
0032 #ifdef CONFIG_RANDOMIZE_BASE
0033 #define TCR_KASLR_FLAGS TCR_NFD1
0034 #else
0035 #define TCR_KASLR_FLAGS 0
0036 #endif
0037 
0038 #define TCR_SMP_FLAGS   TCR_SHARED
0039 
0040 /* PTWs cacheable, inner/outer WBWA */
0041 #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
0042 
0043 #ifdef CONFIG_KASAN_SW_TAGS
0044 #define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1
0045 #else
0046 #define TCR_KASAN_SW_FLAGS 0
0047 #endif
0048 
0049 #ifdef CONFIG_KASAN_HW_TAGS
0050 #define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1
0051 #else
0052 /*
0053  * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
0054  * TBI being enabled at EL1.
0055  */
0056 #define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
0057 #endif
0058 
0059 /*
0060  * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
0061  * changed during __cpu_setup to Normal Tagged if the system supports MTE.
0062  */
0063 #define MAIR_EL1_SET                            \
0064     (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) |  \
0065      MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) |    \
0066      MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) |      \
0067      MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) |            \
0068      MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
0069 
0070 #ifdef CONFIG_CPU_PM
0071 /**
0072  * cpu_do_suspend - save CPU registers context
0073  *
0074  * x0: virtual address of context pointer
0075  *
0076  * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>.
0077  */
0078 SYM_FUNC_START(cpu_do_suspend)
0079     mrs x2, tpidr_el0
0080     mrs x3, tpidrro_el0
0081     mrs x4, contextidr_el1
0082     mrs x5, osdlr_el1
0083     mrs x6, cpacr_el1
0084     mrs x7, tcr_el1
0085     mrs x8, vbar_el1
0086     mrs x9, mdscr_el1
0087     mrs x10, oslsr_el1
0088     mrs x11, sctlr_el1
0089     get_this_cpu_offset x12
0090     mrs x13, sp_el0
0091     stp x2, x3, [x0]
0092     stp x4, x5, [x0, #16]
0093     stp x6, x7, [x0, #32]
0094     stp x8, x9, [x0, #48]
0095     stp x10, x11, [x0, #64]
0096     stp x12, x13, [x0, #80]
0097     /*
0098      * Save x18 as it may be used as a platform register, e.g. by shadow
0099      * call stack.
0100      */
0101     str x18, [x0, #96]
0102     ret
0103 SYM_FUNC_END(cpu_do_suspend)
0104 
0105 /**
0106  * cpu_do_resume - restore CPU register context
0107  *
0108  * x0: Address of context pointer
0109  */
0110     .pushsection ".idmap.text", "awx"
0111 SYM_FUNC_START(cpu_do_resume)
0112     ldp x2, x3, [x0]
0113     ldp x4, x5, [x0, #16]
0114     ldp x6, x8, [x0, #32]
0115     ldp x9, x10, [x0, #48]
0116     ldp x11, x12, [x0, #64]
0117     ldp x13, x14, [x0, #80]
0118     /*
0119      * Restore x18, as it may be used as a platform register, and clear
0120      * the buffer to minimize the risk of exposure when used for shadow
0121      * call stack.
0122      */
0123     ldr x18, [x0, #96]
0124     str xzr, [x0, #96]
0125     msr tpidr_el0, x2
0126     msr tpidrro_el0, x3
0127     msr contextidr_el1, x4
0128     msr cpacr_el1, x6
0129 
0130     /* Don't change t0sz here, mask those bits when restoring */
0131     mrs x7, tcr_el1
0132     bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
0133 
0134     msr tcr_el1, x8
0135     msr vbar_el1, x9
0136 
0137     /*
0138      * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
0139      * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
0140      * exception. Mask them until local_daif_restore() in cpu_suspend()
0141      * resets them.
0142      */
0143     disable_daif
0144     msr mdscr_el1, x10
0145 
0146     msr sctlr_el1, x12
0147     set_this_cpu_offset x13
0148     msr sp_el0, x14
0149     /*
0150      * Restore oslsr_el1 by writing oslar_el1
0151      */
0152     msr osdlr_el1, x5
0153     ubfx    x11, x11, #1, #1
0154     msr oslar_el1, x11
0155     reset_pmuserenr_el0 x0          // Disable PMU access from EL0
0156     reset_amuserenr_el0 x0          // Disable AMU access from EL0
0157 
0158 alternative_if ARM64_HAS_RAS_EXTN
0159     msr_s   SYS_DISR_EL1, xzr
0160 alternative_else_nop_endif
0161 
0162     ptrauth_keys_install_kernel_nosync x14, x1, x2, x3
0163     isb
0164     ret
0165 SYM_FUNC_END(cpu_do_resume)
0166     .popsection
0167 #endif
0168 
0169     .pushsection ".idmap.text", "awx"
0170 
0171 .macro  __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
0172     adrp    \tmp1, reserved_pg_dir
0173     phys_to_ttbr \tmp2, \tmp1
0174     offset_ttbr1 \tmp2, \tmp1
0175     msr ttbr1_el1, \tmp2
0176     isb
0177     tlbi    vmalle1
0178     dsb nsh
0179     isb
0180 .endm
0181 
0182 /*
0183  * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)
0184  *
0185  * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
0186  * called by anything else. It can only be executed from a TTBR0 mapping.
0187  */
0188 SYM_FUNC_START(idmap_cpu_replace_ttbr1)
0189     save_and_disable_daif flags=x2
0190 
0191     __idmap_cpu_set_reserved_ttbr1 x1, x3
0192 
0193     offset_ttbr1 x0, x3
0194     msr ttbr1_el1, x0
0195     isb
0196 
0197     restore_daif x2
0198 
0199     ret
0200 SYM_FUNC_END(idmap_cpu_replace_ttbr1)
0201     .popsection
0202 
0203 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
0204 
0205 #define KPTI_NG_PTE_FLAGS   (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
0206 
0207     .pushsection ".idmap.text", "awx"
0208 
0209     .macro  kpti_mk_tbl_ng, type, num_entries
0210     add end_\type\()p, cur_\type\()p, #\num_entries * 8
0211 .Ldo_\type:
0212     ldr \type, [cur_\type\()p]      // Load the entry
0213     tbz \type, #0, .Lnext_\type     // Skip invalid and
0214     tbnz    \type, #11, .Lnext_\type    // non-global entries
0215     orr \type, \type, #PTE_NG       // Same bit for blocks and pages
0216     str \type, [cur_\type\()p]      // Update the entry
0217     .ifnc   \type, pte
0218     tbnz    \type, #1, .Lderef_\type
0219     .endif
0220 .Lnext_\type:
0221     add cur_\type\()p, cur_\type\()p, #8
0222     cmp cur_\type\()p, end_\type\()p
0223     b.ne    .Ldo_\type
0224     .endm
0225 
0226     /*
0227      * Dereference the current table entry and map it into the temporary
0228      * fixmap slot associated with the current level.
0229      */
0230     .macro  kpti_map_pgtbl, type, level
0231     str xzr, [temp_pte, #8 * (\level + 1)]  // break before make
0232     dsb nshst
0233     add pte, temp_pte, #PAGE_SIZE * (\level + 1)
0234     lsr pte, pte, #12
0235     tlbi    vaae1, pte
0236     dsb nsh
0237     isb
0238 
0239     phys_to_pte pte, cur_\type\()p
0240     add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 1)
0241     orr pte, pte, pte_flags
0242     str pte, [temp_pte, #8 * (\level + 1)]
0243     dsb nshst
0244     .endm
0245 
0246 /*
0247  * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,
0248  *                 unsigned long temp_pte_va)
0249  *
0250  * Called exactly once from stop_machine context by each CPU found during boot.
0251  */
0252     .pushsection    ".data", "aw", %progbits
0253 SYM_DATA(__idmap_kpti_flag, .long 1)
0254     .popsection
0255 
0256 SYM_FUNC_START(idmap_kpti_install_ng_mappings)
0257     cpu     .req    w0
0258     temp_pte    .req    x0
0259     num_cpus    .req    w1
0260     pte_flags   .req    x1
0261     temp_pgd_phys   .req    x2
0262     swapper_ttb .req    x3
0263     flag_ptr    .req    x4
0264     cur_pgdp    .req    x5
0265     end_pgdp    .req    x6
0266     pgd     .req    x7
0267     cur_pudp    .req    x8
0268     end_pudp    .req    x9
0269     cur_pmdp    .req    x11
0270     end_pmdp    .req    x12
0271     cur_ptep    .req    x14
0272     end_ptep    .req    x15
0273     pte     .req    x16
0274     valid       .req    x17
0275 
0276     mov x5, x3              // preserve temp_pte arg
0277     mrs swapper_ttb, ttbr1_el1
0278     adr_l   flag_ptr, __idmap_kpti_flag
0279 
0280     cbnz    cpu, __idmap_kpti_secondary
0281 
0282     /* We're the boot CPU. Wait for the others to catch up */
0283     sevl
0284 1:  wfe
0285     ldaxr   w17, [flag_ptr]
0286     eor w17, w17, num_cpus
0287     cbnz    w17, 1b
0288 
0289     /* Switch to the temporary page tables on this CPU only */
0290     __idmap_cpu_set_reserved_ttbr1 x8, x9
0291     offset_ttbr1 temp_pgd_phys, x8
0292     msr ttbr1_el1, temp_pgd_phys
0293     isb
0294 
0295     mov temp_pte, x5
0296     mov pte_flags, #KPTI_NG_PTE_FLAGS
0297 
0298     /* Everybody is enjoying the idmap, so we can rewrite swapper. */
0299     /* PGD */
0300     adrp        cur_pgdp, swapper_pg_dir
0301     kpti_map_pgtbl  pgd, 0
0302     kpti_mk_tbl_ng  pgd, PTRS_PER_PGD
0303 
0304     /* Ensure all the updated entries are visible to secondary CPUs */
0305     dsb ishst
0306 
0307     /* We're done: fire up swapper_pg_dir again */
0308     __idmap_cpu_set_reserved_ttbr1 x8, x9
0309     msr ttbr1_el1, swapper_ttb
0310     isb
0311 
0312     /* Set the flag to zero to indicate that we're all done */
0313     str wzr, [flag_ptr]
0314     ret
0315 
0316 .Lderef_pgd:
0317     /* PUD */
0318     .if     CONFIG_PGTABLE_LEVELS > 3
0319     pud     .req    x10
0320     pte_to_phys cur_pudp, pgd
0321     kpti_map_pgtbl  pud, 1
0322     kpti_mk_tbl_ng  pud, PTRS_PER_PUD
0323     b       .Lnext_pgd
0324     .else       /* CONFIG_PGTABLE_LEVELS <= 3 */
0325     pud     .req    pgd
0326     .set        .Lnext_pud, .Lnext_pgd
0327     .endif
0328 
0329 .Lderef_pud:
0330     /* PMD */
0331     .if     CONFIG_PGTABLE_LEVELS > 2
0332     pmd     .req    x13
0333     pte_to_phys cur_pmdp, pud
0334     kpti_map_pgtbl  pmd, 2
0335     kpti_mk_tbl_ng  pmd, PTRS_PER_PMD
0336     b       .Lnext_pud
0337     .else       /* CONFIG_PGTABLE_LEVELS <= 2 */
0338     pmd     .req    pgd
0339     .set        .Lnext_pmd, .Lnext_pgd
0340     .endif
0341 
0342 .Lderef_pmd:
0343     /* PTE */
0344     pte_to_phys cur_ptep, pmd
0345     kpti_map_pgtbl  pte, 3
0346     kpti_mk_tbl_ng  pte, PTRS_PER_PTE
0347     b       .Lnext_pmd
0348 
0349     .unreq  cpu
0350     .unreq  temp_pte
0351     .unreq  num_cpus
0352     .unreq  pte_flags
0353     .unreq  temp_pgd_phys
0354     .unreq  cur_pgdp
0355     .unreq  end_pgdp
0356     .unreq  pgd
0357     .unreq  cur_pudp
0358     .unreq  end_pudp
0359     .unreq  pud
0360     .unreq  cur_pmdp
0361     .unreq  end_pmdp
0362     .unreq  pmd
0363     .unreq  cur_ptep
0364     .unreq  end_ptep
0365     .unreq  pte
0366     .unreq  valid
0367 
0368     /* Secondary CPUs end up here */
0369 __idmap_kpti_secondary:
0370     /* Uninstall swapper before surgery begins */
0371     __idmap_cpu_set_reserved_ttbr1 x16, x17
0372 
0373     /* Increment the flag to let the boot CPU we're ready */
0374 1:  ldxr    w16, [flag_ptr]
0375     add w16, w16, #1
0376     stxr    w17, w16, [flag_ptr]
0377     cbnz    w17, 1b
0378 
0379     /* Wait for the boot CPU to finish messing around with swapper */
0380     sevl
0381 1:  wfe
0382     ldxr    w16, [flag_ptr]
0383     cbnz    w16, 1b
0384 
0385     /* All done, act like nothing happened */
0386     msr ttbr1_el1, swapper_ttb
0387     isb
0388     ret
0389 
0390     .unreq  swapper_ttb
0391     .unreq  flag_ptr
0392 SYM_FUNC_END(idmap_kpti_install_ng_mappings)
0393     .popsection
0394 #endif
0395 
0396 /*
0397  *  __cpu_setup
0398  *
0399  *  Initialise the processor for turning the MMU on.
0400  *
0401  * Input:
0402  *  x0 - actual number of VA bits (ignored unless VA_BITS > 48)
0403  * Output:
0404  *  Return in x0 the value of the SCTLR_EL1 register.
0405  */
0406     .pushsection ".idmap.text", "awx"
0407 SYM_FUNC_START(__cpu_setup)
0408     tlbi    vmalle1             // Invalidate local TLB
0409     dsb nsh
0410 
0411     mov x1, #3 << 20
0412     msr cpacr_el1, x1           // Enable FP/ASIMD
0413     mov x1, #1 << 12            // Reset mdscr_el1 and disable
0414     msr mdscr_el1, x1           // access to the DCC from EL0
0415     isb                 // Unmask debug exceptions now,
0416     enable_dbg              // since this is per-cpu
0417     reset_pmuserenr_el0 x1          // Disable PMU access from EL0
0418     reset_amuserenr_el0 x1          // Disable AMU access from EL0
0419 
0420     /*
0421      * Default values for VMSA control registers. These will be adjusted
0422      * below depending on detected CPU features.
0423      */
0424     mair    .req    x17
0425     tcr .req    x16
0426     mov_q   mair, MAIR_EL1_SET
0427     mov_q   tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
0428             TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
0429             TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS
0430 
0431 #ifdef CONFIG_ARM64_MTE
0432     /*
0433      * Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
0434      * (ID_AA64PFR1_EL1[11:8] > 1).
0435      */
0436     mrs x10, ID_AA64PFR1_EL1
0437     ubfx    x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4
0438     cmp x10, #ID_AA64PFR1_MTE
0439     b.lt    1f
0440 
0441     /* Normal Tagged memory type at the corresponding MAIR index */
0442     mov x10, #MAIR_ATTR_NORMAL_TAGGED
0443     bfi mair, x10, #(8 *  MT_NORMAL_TAGGED), #8
0444 
0445     mov x10, #KERNEL_GCR_EL1
0446     msr_s   SYS_GCR_EL1, x10
0447 
0448     /*
0449      * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
0450      * RGSR_EL1.SEED must be non-zero for IRG to produce
0451      * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
0452      * must initialize it.
0453      */
0454     mrs x10, CNTVCT_EL0
0455     ands    x10, x10, #SYS_RGSR_EL1_SEED_MASK
0456     csinc   x10, x10, xzr, ne
0457     lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
0458     msr_s   SYS_RGSR_EL1, x10
0459 
0460     /* clear any pending tag check faults in TFSR*_EL1 */
0461     msr_s   SYS_TFSR_EL1, xzr
0462     msr_s   SYS_TFSRE0_EL1, xzr
0463 
0464     /* set the TCR_EL1 bits */
0465     mov_q   x10, TCR_MTE_FLAGS
0466     orr tcr, tcr, x10
0467 1:
0468 #endif
0469     tcr_clear_errata_bits tcr, x9, x5
0470 
0471 #ifdef CONFIG_ARM64_VA_BITS_52
0472     sub     x9, xzr, x0
0473     add     x9, x9, #64
0474     tcr_set_t1sz    tcr, x9
0475 #else
0476     idmap_get_t0sz  x9
0477 #endif
0478     tcr_set_t0sz    tcr, x9
0479 
0480     /*
0481      * Set the IPS bits in TCR_EL1.
0482      */
0483     tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6
0484 #ifdef CONFIG_ARM64_HW_AFDBM
0485     /*
0486      * Enable hardware update of the Access Flags bit.
0487      * Hardware dirty bit management is enabled later,
0488      * via capabilities.
0489      */
0490     mrs x9, ID_AA64MMFR1_EL1
0491     and x9, x9, #0xf
0492     cbz x9, 1f
0493     orr tcr, tcr, #TCR_HA       // hardware Access flag update
0494 1:
0495 #endif  /* CONFIG_ARM64_HW_AFDBM */
0496     msr mair_el1, mair
0497     msr tcr_el1, tcr
0498     /*
0499      * Prepare SCTLR
0500      */
0501     mov_q   x0, INIT_SCTLR_EL1_MMU_ON
0502     ret                 // return to head.S
0503 
0504     .unreq  mair
0505     .unreq  tcr
0506 SYM_FUNC_END(__cpu_setup)