0001
0002 #include <linux/atomic.h>
0003 #include <linux/mmu_context.h>
0004 #include <linux/percpu.h>
0005 #include <linux/spinlock.h>
0006
0007 static DEFINE_RAW_SPINLOCK(cpu_mmid_lock);
0008
0009 static atomic64_t mmid_version;
0010 static unsigned int num_mmids;
0011 static unsigned long *mmid_map;
0012
0013 static DEFINE_PER_CPU(u64, reserved_mmids);
0014 static cpumask_t tlb_flush_pending;
0015
0016 static bool asid_versions_eq(int cpu, u64 a, u64 b)
0017 {
0018 return ((a ^ b) & asid_version_mask(cpu)) == 0;
0019 }
0020
0021 void get_new_mmu_context(struct mm_struct *mm)
0022 {
0023 unsigned int cpu;
0024 u64 asid;
0025
0026
0027
0028
0029
0030 if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
0031 return;
0032
0033 cpu = smp_processor_id();
0034 asid = asid_cache(cpu);
0035
0036 if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
0037 if (cpu_has_vtag_icache)
0038 flush_icache_all();
0039 local_flush_tlb_all();
0040 }
0041
0042 set_cpu_context(cpu, mm, asid);
0043 asid_cache(cpu) = asid;
0044 }
0045 EXPORT_SYMBOL_GPL(get_new_mmu_context);
0046
0047 void check_mmu_context(struct mm_struct *mm)
0048 {
0049 unsigned int cpu = smp_processor_id();
0050
0051
0052
0053
0054
0055 if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
0056 return;
0057
0058
0059 if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu)))
0060 get_new_mmu_context(mm);
0061 }
0062 EXPORT_SYMBOL_GPL(check_mmu_context);
0063
0064 static void flush_context(void)
0065 {
0066 u64 mmid;
0067 int cpu;
0068
0069
0070 bitmap_zero(mmid_map, num_mmids);
0071
0072
0073 __set_bit(MMID_KERNEL_WIRED, mmid_map);
0074
0075 for_each_possible_cpu(cpu) {
0076 mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0);
0077
0078
0079
0080
0081
0082
0083
0084
0085 if (mmid == 0)
0086 mmid = per_cpu(reserved_mmids, cpu);
0087
0088 __set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map);
0089 per_cpu(reserved_mmids, cpu) = mmid;
0090 }
0091
0092
0093
0094
0095
0096 cpumask_setall(&tlb_flush_pending);
0097 }
0098
0099 static bool check_update_reserved_mmid(u64 mmid, u64 newmmid)
0100 {
0101 bool hit;
0102 int cpu;
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113 hit = false;
0114 for_each_possible_cpu(cpu) {
0115 if (per_cpu(reserved_mmids, cpu) == mmid) {
0116 hit = true;
0117 per_cpu(reserved_mmids, cpu) = newmmid;
0118 }
0119 }
0120
0121 return hit;
0122 }
0123
0124 static u64 get_new_mmid(struct mm_struct *mm)
0125 {
0126 static u32 cur_idx = MMID_KERNEL_WIRED + 1;
0127 u64 mmid, version, mmid_mask;
0128
0129 mmid = cpu_context(0, mm);
0130 version = atomic64_read(&mmid_version);
0131 mmid_mask = cpu_asid_mask(&boot_cpu_data);
0132
0133 if (!asid_versions_eq(0, mmid, 0)) {
0134 u64 newmmid = version | (mmid & mmid_mask);
0135
0136
0137
0138
0139
0140 if (check_update_reserved_mmid(mmid, newmmid)) {
0141 mmid = newmmid;
0142 goto set_context;
0143 }
0144
0145
0146
0147
0148
0149 if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) {
0150 mmid = newmmid;
0151 goto set_context;
0152 }
0153 }
0154
0155
0156 mmid = find_next_zero_bit(mmid_map, num_mmids, cur_idx);
0157 if (mmid != num_mmids)
0158 goto reserve_mmid;
0159
0160
0161 version = atomic64_add_return_relaxed(asid_first_version(0),
0162 &mmid_version);
0163
0164
0165 flush_context();
0166
0167
0168 mmid = find_first_zero_bit(mmid_map, num_mmids);
0169
0170 reserve_mmid:
0171 __set_bit(mmid, mmid_map);
0172 cur_idx = mmid;
0173 mmid |= version;
0174 set_context:
0175 set_cpu_context(0, mm, mmid);
0176 return mmid;
0177 }
0178
0179 void check_switch_mmu_context(struct mm_struct *mm)
0180 {
0181 unsigned int cpu = smp_processor_id();
0182 u64 ctx, old_active_mmid;
0183 unsigned long flags;
0184
0185 if (!cpu_has_mmid) {
0186 check_mmu_context(mm);
0187 write_c0_entryhi(cpu_asid(cpu, mm));
0188 goto setup_pgd;
0189 }
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209 ctx = cpu_context(cpu, mm);
0210 old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache);
0211 if (!old_active_mmid ||
0212 !asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) ||
0213 !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) {
0214 raw_spin_lock_irqsave(&cpu_mmid_lock, flags);
0215
0216 ctx = cpu_context(cpu, mm);
0217 if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)))
0218 ctx = get_new_mmid(mm);
0219
0220 WRITE_ONCE(cpu_data[cpu].asid_cache, ctx);
0221 raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags);
0222 }
0223
0224
0225
0226
0227
0228
0229 if (cpumask_test_cpu(cpu, &tlb_flush_pending)) {
0230 if (cpu_has_vtag_icache)
0231 flush_icache_all();
0232 local_flush_tlb_all();
0233 cpumask_clear_cpu(cpu, &tlb_flush_pending);
0234 }
0235
0236 write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data));
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247 #ifdef CONFIG_SMP
0248 if (cpu_has_shared_ftlb_entries &&
0249 cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) {
0250
0251 mtc0_tlbw_hazard();
0252
0253
0254
0255
0256
0257 ginvt_mmid();
0258 sync_ginv();
0259 }
0260 #endif
0261
0262 setup_pgd:
0263 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
0264 }
0265 EXPORT_SYMBOL_GPL(check_switch_mmu_context);
0266
0267 static int mmid_init(void)
0268 {
0269 if (!cpu_has_mmid)
0270 return 0;
0271
0272
0273
0274
0275
0276 num_mmids = asid_first_version(0);
0277 WARN_ON(num_mmids <= num_possible_cpus());
0278
0279 atomic64_set(&mmid_version, asid_first_version(0));
0280 mmid_map = bitmap_zalloc(num_mmids, GFP_KERNEL);
0281 if (!mmid_map)
0282 panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids);
0283
0284
0285 __set_bit(MMID_KERNEL_WIRED, mmid_map);
0286
0287 pr_info("MMID allocator initialised with %u entries\n", num_mmids);
0288 return 0;
0289 }
0290 early_initcall(mmid_init);