0001
0002
0003
0004
0005
0006
0007
0008 #ifndef __ASM_MMU_CONTEXT_H
0009 #define __ASM_MMU_CONTEXT_H
0010
0011 #ifndef __ASSEMBLY__
0012
0013 #include <linux/compiler.h>
0014 #include <linux/sched.h>
0015 #include <linux/sched/hotplug.h>
0016 #include <linux/mm_types.h>
0017 #include <linux/pgtable.h>
0018
0019 #include <asm/cacheflush.h>
0020 #include <asm/cpufeature.h>
0021 #include <asm/proc-fns.h>
0022 #include <asm-generic/mm_hooks.h>
0023 #include <asm/cputype.h>
0024 #include <asm/sysreg.h>
0025 #include <asm/tlbflush.h>
0026
0027 extern bool rodata_full;
0028
0029 static inline void contextidr_thread_switch(struct task_struct *next)
0030 {
0031 if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
0032 return;
0033
0034 write_sysreg(task_pid_nr(next), contextidr_el1);
0035 isb();
0036 }
0037
0038
0039
0040
0041 static inline void cpu_set_reserved_ttbr0(void)
0042 {
0043 unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
0044
0045 write_sysreg(ttbr, ttbr0_el1);
0046 isb();
0047 }
0048
0049 void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
0050
0051 static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
0052 {
0053 BUG_ON(pgd == swapper_pg_dir);
0054 cpu_set_reserved_ttbr0();
0055 cpu_do_switch_mm(virt_to_phys(pgd),mm);
0056 }
0057
0058
0059
0060
0061
0062
0063 extern int idmap_t0sz;
0064
0065
0066
0067
0068 static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
0069 {
0070 unsigned long tcr = read_sysreg(tcr_el1);
0071
0072 if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz)
0073 return;
0074
0075 tcr &= ~TCR_T0SZ_MASK;
0076 tcr |= t0sz << TCR_T0SZ_OFFSET;
0077 write_sysreg(tcr, tcr_el1);
0078 isb();
0079 }
0080
0081 #define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
0082 #define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096 static inline void cpu_uninstall_idmap(void)
0097 {
0098 struct mm_struct *mm = current->active_mm;
0099
0100 cpu_set_reserved_ttbr0();
0101 local_flush_tlb_all();
0102 cpu_set_default_tcr_t0sz();
0103
0104 if (mm != &init_mm && !system_uses_ttbr0_pan())
0105 cpu_switch_mm(mm->pgd, mm);
0106 }
0107
0108 static inline void __cpu_install_idmap(pgd_t *idmap)
0109 {
0110 cpu_set_reserved_ttbr0();
0111 local_flush_tlb_all();
0112 cpu_set_idmap_tcr_t0sz();
0113
0114 cpu_switch_mm(lm_alias(idmap), &init_mm);
0115 }
0116
0117 static inline void cpu_install_idmap(void)
0118 {
0119 __cpu_install_idmap(idmap_pg_dir);
0120 }
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135 static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
0136 {
0137 cpu_set_reserved_ttbr0();
0138 local_flush_tlb_all();
0139 __cpu_set_tcr_t0sz(t0sz);
0140
0141
0142 write_sysreg(ttbr0, ttbr0_el1);
0143 isb();
0144 }
0145
0146
0147
0148
0149
0150 static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
0151 {
0152 typedef void (ttbr_replace_func)(phys_addr_t);
0153 extern ttbr_replace_func idmap_cpu_replace_ttbr1;
0154 ttbr_replace_func *replace_phys;
0155
0156
0157 phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
0158
0159 if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
0160
0161
0162
0163
0164
0165
0166
0167
0168 ttbr1 |= TTBR_CNP_BIT;
0169 }
0170
0171 replace_phys = (void *)__pa_symbol(function_nocfi(idmap_cpu_replace_ttbr1));
0172
0173 __cpu_install_idmap(idmap);
0174 replace_phys(ttbr1);
0175 cpu_uninstall_idmap();
0176 }
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 void check_and_switch_context(struct mm_struct *mm);
0188
0189 #define init_new_context(tsk, mm) init_new_context(tsk, mm)
0190 static inline int
0191 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
0192 {
0193 atomic64_set(&mm->context.id, 0);
0194 refcount_set(&mm->context.pinned, 0);
0195 return 0;
0196 }
0197
0198 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
0199 static inline void update_saved_ttbr0(struct task_struct *tsk,
0200 struct mm_struct *mm)
0201 {
0202 u64 ttbr;
0203
0204 if (!system_uses_ttbr0_pan())
0205 return;
0206
0207 if (mm == &init_mm)
0208 ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
0209 else
0210 ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
0211
0212 WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
0213 }
0214 #else
0215 static inline void update_saved_ttbr0(struct task_struct *tsk,
0216 struct mm_struct *mm)
0217 {
0218 }
0219 #endif
0220
0221 #define enter_lazy_tlb enter_lazy_tlb
0222 static inline void
0223 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
0224 {
0225
0226
0227
0228
0229 update_saved_ttbr0(tsk, &init_mm);
0230 }
0231
0232 static inline void __switch_mm(struct mm_struct *next)
0233 {
0234
0235
0236
0237
0238 if (next == &init_mm) {
0239 cpu_set_reserved_ttbr0();
0240 return;
0241 }
0242
0243 check_and_switch_context(next);
0244 }
0245
0246 static inline void
0247 switch_mm(struct mm_struct *prev, struct mm_struct *next,
0248 struct task_struct *tsk)
0249 {
0250 if (prev != next)
0251 __switch_mm(next);
0252
0253
0254
0255
0256
0257
0258
0259 update_saved_ttbr0(tsk, next);
0260 }
0261
0262 static inline const struct cpumask *
0263 task_cpu_possible_mask(struct task_struct *p)
0264 {
0265 if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
0266 return cpu_possible_mask;
0267
0268 if (!is_compat_thread(task_thread_info(p)))
0269 return cpu_possible_mask;
0270
0271 return system_32bit_el0_cpumask();
0272 }
0273 #define task_cpu_possible_mask task_cpu_possible_mask
0274
0275 void verify_cpu_asid_bits(void);
0276 void post_ttbr_update_workaround(void);
0277
0278 unsigned long arm64_mm_context_get(struct mm_struct *mm);
0279 void arm64_mm_context_put(struct mm_struct *mm);
0280
0281 #include <asm-generic/mmu_context.h>
0282
0283 #endif
0284
0285 #endif