Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Based on arch/arm/include/asm/mmu_context.h
0004  *
0005  * Copyright (C) 1996 Russell King.
0006  * Copyright (C) 2012 ARM Ltd.
0007  */
0008 #ifndef __ASM_MMU_CONTEXT_H
0009 #define __ASM_MMU_CONTEXT_H
0010 
0011 #ifndef __ASSEMBLY__
0012 
0013 #include <linux/compiler.h>
0014 #include <linux/sched.h>
0015 #include <linux/sched/hotplug.h>
0016 #include <linux/mm_types.h>
0017 #include <linux/pgtable.h>
0018 
0019 #include <asm/cacheflush.h>
0020 #include <asm/cpufeature.h>
0021 #include <asm/proc-fns.h>
0022 #include <asm-generic/mm_hooks.h>
0023 #include <asm/cputype.h>
0024 #include <asm/sysreg.h>
0025 #include <asm/tlbflush.h>
0026 
0027 extern bool rodata_full;
0028 
0029 static inline void contextidr_thread_switch(struct task_struct *next)
0030 {
0031     if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
0032         return;
0033 
0034     write_sysreg(task_pid_nr(next), contextidr_el1);
0035     isb();
0036 }
0037 
0038 /*
0039  * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
0040  */
0041 static inline void cpu_set_reserved_ttbr0(void)
0042 {
0043     unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
0044 
0045     write_sysreg(ttbr, ttbr0_el1);
0046     isb();
0047 }
0048 
0049 void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
0050 
0051 static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
0052 {
0053     BUG_ON(pgd == swapper_pg_dir);
0054     cpu_set_reserved_ttbr0();
0055     cpu_do_switch_mm(virt_to_phys(pgd),mm);
0056 }
0057 
0058 /*
0059  * TCR.T0SZ value to use when the ID map is active. Usually equals
0060  * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
0061  * physical memory, in which case it will be smaller.
0062  */
0063 extern int idmap_t0sz;
0064 
0065 /*
0066  * Ensure TCR.T0SZ is set to the provided value.
0067  */
0068 static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
0069 {
0070     unsigned long tcr = read_sysreg(tcr_el1);
0071 
0072     if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz)
0073         return;
0074 
0075     tcr &= ~TCR_T0SZ_MASK;
0076     tcr |= t0sz << TCR_T0SZ_OFFSET;
0077     write_sysreg(tcr, tcr_el1);
0078     isb();
0079 }
0080 
0081 #define cpu_set_default_tcr_t0sz()  __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
0082 #define cpu_set_idmap_tcr_t0sz()    __cpu_set_tcr_t0sz(idmap_t0sz)
0083 
0084 /*
0085  * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
0086  *
0087  * The idmap lives in the same VA range as userspace, but uses global entries
0088  * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
0089  * speculative TLB fetches, we must temporarily install the reserved page
0090  * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
0091  *
0092  * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
0093  * which should not be installed in TTBR0_EL1. In this case we can leave the
0094  * reserved page tables in place.
0095  */
0096 static inline void cpu_uninstall_idmap(void)
0097 {
0098     struct mm_struct *mm = current->active_mm;
0099 
0100     cpu_set_reserved_ttbr0();
0101     local_flush_tlb_all();
0102     cpu_set_default_tcr_t0sz();
0103 
0104     if (mm != &init_mm && !system_uses_ttbr0_pan())
0105         cpu_switch_mm(mm->pgd, mm);
0106 }
0107 
0108 static inline void __cpu_install_idmap(pgd_t *idmap)
0109 {
0110     cpu_set_reserved_ttbr0();
0111     local_flush_tlb_all();
0112     cpu_set_idmap_tcr_t0sz();
0113 
0114     cpu_switch_mm(lm_alias(idmap), &init_mm);
0115 }
0116 
0117 static inline void cpu_install_idmap(void)
0118 {
0119     __cpu_install_idmap(idmap_pg_dir);
0120 }
0121 
0122 /*
0123  * Load our new page tables. A strict BBM approach requires that we ensure that
0124  * TLBs are free of any entries that may overlap with the global mappings we are
0125  * about to install.
0126  *
0127  * For a real hibernate/resume/kexec cycle TTBR0 currently points to a zero
0128  * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI runtime
0129  * services), while for a userspace-driven test_resume cycle it points to
0130  * userspace page tables (and we must point it at a zero page ourselves).
0131  *
0132  * We change T0SZ as part of installing the idmap. This is undone by
0133  * cpu_uninstall_idmap() in __cpu_suspend_exit().
0134  */
0135 static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
0136 {
0137     cpu_set_reserved_ttbr0();
0138     local_flush_tlb_all();
0139     __cpu_set_tcr_t0sz(t0sz);
0140 
0141     /* avoid cpu_switch_mm() and its SW-PAN and CNP interactions */
0142     write_sysreg(ttbr0, ttbr0_el1);
0143     isb();
0144 }
0145 
0146 /*
0147  * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
0148  * avoiding the possibility of conflicting TLB entries being allocated.
0149  */
0150 static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
0151 {
0152     typedef void (ttbr_replace_func)(phys_addr_t);
0153     extern ttbr_replace_func idmap_cpu_replace_ttbr1;
0154     ttbr_replace_func *replace_phys;
0155 
0156     /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
0157     phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
0158 
0159     if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
0160         /*
0161          * cpu_replace_ttbr1() is used when there's a boot CPU
0162          * up (i.e. cpufeature framework is not up yet) and
0163          * latter only when we enable CNP via cpufeature's
0164          * enable() callback.
0165          * Also we rely on the cpu_hwcap bit being set before
0166          * calling the enable() function.
0167          */
0168         ttbr1 |= TTBR_CNP_BIT;
0169     }
0170 
0171     replace_phys = (void *)__pa_symbol(function_nocfi(idmap_cpu_replace_ttbr1));
0172 
0173     __cpu_install_idmap(idmap);
0174     replace_phys(ttbr1);
0175     cpu_uninstall_idmap();
0176 }
0177 
0178 /*
0179  * It would be nice to return ASIDs back to the allocator, but unfortunately
0180  * that introduces a race with a generation rollover where we could erroneously
0181  * free an ASID allocated in a future generation. We could workaround this by
0182  * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
0183  * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
0184  * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
0185  * take CPU migration into account.
0186  */
0187 void check_and_switch_context(struct mm_struct *mm);
0188 
0189 #define init_new_context(tsk, mm) init_new_context(tsk, mm)
0190 static inline int
0191 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
0192 {
0193     atomic64_set(&mm->context.id, 0);
0194     refcount_set(&mm->context.pinned, 0);
0195     return 0;
0196 }
0197 
0198 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
0199 static inline void update_saved_ttbr0(struct task_struct *tsk,
0200                       struct mm_struct *mm)
0201 {
0202     u64 ttbr;
0203 
0204     if (!system_uses_ttbr0_pan())
0205         return;
0206 
0207     if (mm == &init_mm)
0208         ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
0209     else
0210         ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
0211 
0212     WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
0213 }
0214 #else
0215 static inline void update_saved_ttbr0(struct task_struct *tsk,
0216                       struct mm_struct *mm)
0217 {
0218 }
0219 #endif
0220 
0221 #define enter_lazy_tlb enter_lazy_tlb
0222 static inline void
0223 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
0224 {
0225     /*
0226      * We don't actually care about the ttbr0 mapping, so point it at the
0227      * zero page.
0228      */
0229     update_saved_ttbr0(tsk, &init_mm);
0230 }
0231 
0232 static inline void __switch_mm(struct mm_struct *next)
0233 {
0234     /*
0235      * init_mm.pgd does not contain any user mappings and it is always
0236      * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
0237      */
0238     if (next == &init_mm) {
0239         cpu_set_reserved_ttbr0();
0240         return;
0241     }
0242 
0243     check_and_switch_context(next);
0244 }
0245 
0246 static inline void
0247 switch_mm(struct mm_struct *prev, struct mm_struct *next,
0248       struct task_struct *tsk)
0249 {
0250     if (prev != next)
0251         __switch_mm(next);
0252 
0253     /*
0254      * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
0255      * value may have not been initialised yet (activate_mm caller) or the
0256      * ASID has changed since the last run (following the context switch
0257      * of another thread of the same process).
0258      */
0259     update_saved_ttbr0(tsk, next);
0260 }
0261 
0262 static inline const struct cpumask *
0263 task_cpu_possible_mask(struct task_struct *p)
0264 {
0265     if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
0266         return cpu_possible_mask;
0267 
0268     if (!is_compat_thread(task_thread_info(p)))
0269         return cpu_possible_mask;
0270 
0271     return system_32bit_el0_cpumask();
0272 }
0273 #define task_cpu_possible_mask  task_cpu_possible_mask
0274 
0275 void verify_cpu_asid_bits(void);
0276 void post_ttbr_update_workaround(void);
0277 
0278 unsigned long arm64_mm_context_get(struct mm_struct *mm);
0279 void arm64_mm_context_put(struct mm_struct *mm);
0280 
0281 #include <asm-generic/mmu_context.h>
0282 
0283 #endif /* !__ASSEMBLY__ */
0284 
0285 #endif /* !__ASM_MMU_CONTEXT_H */