0001
0002
0003
0004
0005
0006
0007
0008 #ifndef __ASM_TLBFLUSH_H
0009 #define __ASM_TLBFLUSH_H
0010
0011 #ifndef __ASSEMBLY__
0012
0013 #include <linux/bitfield.h>
0014 #include <linux/mm_types.h>
0015 #include <linux/sched.h>
0016 #include <asm/cputype.h>
0017 #include <asm/mmu.h>
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \
0032 "tlbi " #op "\n" \
0033 ALTERNATIVE("nop\n nop", \
0034 "dsb ish\n tlbi " #op, \
0035 ARM64_WORKAROUND_REPEAT_TLBI, \
0036 CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
0037 : : )
0038
0039 #define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \
0040 "tlbi " #op ", %0\n" \
0041 ALTERNATIVE("nop\n nop", \
0042 "dsb ish\n tlbi " #op ", %0", \
0043 ARM64_WORKAROUND_REPEAT_TLBI, \
0044 CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
0045 : : "r" (arg))
0046
0047 #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
0048
0049 #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
0050
0051 #define __tlbi_user(op, arg) do { \
0052 if (arm64_kernel_unmapped_at_el0()) \
0053 __tlbi(op, (arg) | USER_ASID_FLAG); \
0054 } while (0)
0055
0056
0057 #define __TLBI_VADDR(addr, asid) \
0058 ({ \
0059 unsigned long __ta = (addr) >> 12; \
0060 __ta &= GENMASK_ULL(43, 0); \
0061 __ta |= (unsigned long)(asid) << 48; \
0062 __ta; \
0063 })
0064
0065
0066
0067
0068
0069
0070
0071
0072 #define TLBI_TTL_TG_4K 1
0073 #define TLBI_TTL_TG_16K 2
0074 #define TLBI_TTL_TG_64K 3
0075
0076 static inline unsigned long get_trans_granule(void)
0077 {
0078 switch (PAGE_SIZE) {
0079 case SZ_4K:
0080 return TLBI_TTL_TG_4K;
0081 case SZ_16K:
0082 return TLBI_TTL_TG_16K;
0083 case SZ_64K:
0084 return TLBI_TTL_TG_64K;
0085 default:
0086 return 0;
0087 }
0088 }
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102 #define TLBI_TTL_MASK GENMASK_ULL(47, 44)
0103
0104 #define __tlbi_level(op, addr, level) do { \
0105 u64 arg = addr; \
0106 \
0107 if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) && \
0108 level) { \
0109 u64 ttl = level & 3; \
0110 ttl |= get_trans_granule() << 2; \
0111 arg &= ~TLBI_TTL_MASK; \
0112 arg |= FIELD_PREP(TLBI_TTL_MASK, ttl); \
0113 } \
0114 \
0115 __tlbi(op, arg); \
0116 } while(0)
0117
0118 #define __tlbi_user_level(op, arg, level) do { \
0119 if (arm64_kernel_unmapped_at_el0()) \
0120 __tlbi_level(op, (arg | USER_ASID_FLAG), level); \
0121 } while (0)
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136 #define __TLBI_VADDR_RANGE(addr, asid, scale, num, ttl) \
0137 ({ \
0138 unsigned long __ta = (addr) >> PAGE_SHIFT; \
0139 __ta &= GENMASK_ULL(36, 0); \
0140 __ta |= (unsigned long)(ttl) << 37; \
0141 __ta |= (unsigned long)(num) << 39; \
0142 __ta |= (unsigned long)(scale) << 44; \
0143 __ta |= get_trans_granule() << 46; \
0144 __ta |= (unsigned long)(asid) << 48; \
0145 __ta; \
0146 })
0147
0148
0149 #define __TLBI_RANGE_PAGES(num, scale) \
0150 ((unsigned long)((num) + 1) << (5 * (scale) + 1))
0151 #define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
0152
0153
0154
0155
0156
0157 #define TLBI_RANGE_MASK GENMASK_ULL(4, 0)
0158 #define __TLBI_RANGE_NUM(pages, scale) \
0159 ((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230 static inline void local_flush_tlb_all(void)
0231 {
0232 dsb(nshst);
0233 __tlbi(vmalle1);
0234 dsb(nsh);
0235 isb();
0236 }
0237
0238 static inline void flush_tlb_all(void)
0239 {
0240 dsb(ishst);
0241 __tlbi(vmalle1is);
0242 dsb(ish);
0243 isb();
0244 }
0245
0246 static inline void flush_tlb_mm(struct mm_struct *mm)
0247 {
0248 unsigned long asid;
0249
0250 dsb(ishst);
0251 asid = __TLBI_VADDR(0, ASID(mm));
0252 __tlbi(aside1is, asid);
0253 __tlbi_user(aside1is, asid);
0254 dsb(ish);
0255 }
0256
0257 static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
0258 unsigned long uaddr)
0259 {
0260 unsigned long addr;
0261
0262 dsb(ishst);
0263 addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
0264 __tlbi(vale1is, addr);
0265 __tlbi_user(vale1is, addr);
0266 }
0267
0268 static inline void flush_tlb_page(struct vm_area_struct *vma,
0269 unsigned long uaddr)
0270 {
0271 flush_tlb_page_nosync(vma, uaddr);
0272 dsb(ish);
0273 }
0274
0275
0276
0277
0278
0279 #define MAX_TLBI_OPS PTRS_PER_PTE
0280
0281 static inline void __flush_tlb_range(struct vm_area_struct *vma,
0282 unsigned long start, unsigned long end,
0283 unsigned long stride, bool last_level,
0284 int tlb_level)
0285 {
0286 int num = 0;
0287 int scale = 0;
0288 unsigned long asid, addr, pages;
0289
0290 start = round_down(start, stride);
0291 end = round_up(end, stride);
0292 pages = (end - start) >> PAGE_SHIFT;
0293
0294
0295
0296
0297
0298
0299
0300 if ((!system_supports_tlb_range() &&
0301 (end - start) >= (MAX_TLBI_OPS * stride)) ||
0302 pages >= MAX_TLBI_RANGE_PAGES) {
0303 flush_tlb_mm(vma->vm_mm);
0304 return;
0305 }
0306
0307 dsb(ishst);
0308 asid = ASID(vma->vm_mm);
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328 while (pages > 0) {
0329 if (!system_supports_tlb_range() ||
0330 pages % 2 == 1) {
0331 addr = __TLBI_VADDR(start, asid);
0332 if (last_level) {
0333 __tlbi_level(vale1is, addr, tlb_level);
0334 __tlbi_user_level(vale1is, addr, tlb_level);
0335 } else {
0336 __tlbi_level(vae1is, addr, tlb_level);
0337 __tlbi_user_level(vae1is, addr, tlb_level);
0338 }
0339 start += stride;
0340 pages -= stride >> PAGE_SHIFT;
0341 continue;
0342 }
0343
0344 num = __TLBI_RANGE_NUM(pages, scale);
0345 if (num >= 0) {
0346 addr = __TLBI_VADDR_RANGE(start, asid, scale,
0347 num, tlb_level);
0348 if (last_level) {
0349 __tlbi(rvale1is, addr);
0350 __tlbi_user(rvale1is, addr);
0351 } else {
0352 __tlbi(rvae1is, addr);
0353 __tlbi_user(rvae1is, addr);
0354 }
0355 start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT;
0356 pages -= __TLBI_RANGE_PAGES(num, scale);
0357 }
0358 scale++;
0359 }
0360 dsb(ish);
0361 }
0362
0363 static inline void flush_tlb_range(struct vm_area_struct *vma,
0364 unsigned long start, unsigned long end)
0365 {
0366
0367
0368
0369
0370
0371 __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
0372 }
0373
0374 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
0375 {
0376 unsigned long addr;
0377
0378 if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
0379 flush_tlb_all();
0380 return;
0381 }
0382
0383 start = __TLBI_VADDR(start, 0);
0384 end = __TLBI_VADDR(end, 0);
0385
0386 dsb(ishst);
0387 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
0388 __tlbi(vaale1is, addr);
0389 dsb(ish);
0390 isb();
0391 }
0392
0393
0394
0395
0396
0397 static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
0398 {
0399 unsigned long addr = __TLBI_VADDR(kaddr, 0);
0400
0401 dsb(ishst);
0402 __tlbi(vaae1is, addr);
0403 dsb(ish);
0404 isb();
0405 }
0406 #endif
0407
0408 #endif