Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/atomic.h>
0003 #include <linux/mmu_context.h>
0004 #include <linux/percpu.h>
0005 #include <linux/spinlock.h>
0006 
0007 static DEFINE_RAW_SPINLOCK(cpu_mmid_lock);
0008 
0009 static atomic64_t mmid_version;
0010 static unsigned int num_mmids;
0011 static unsigned long *mmid_map;
0012 
0013 static DEFINE_PER_CPU(u64, reserved_mmids);
0014 static cpumask_t tlb_flush_pending;
0015 
0016 static bool asid_versions_eq(int cpu, u64 a, u64 b)
0017 {
0018     return ((a ^ b) & asid_version_mask(cpu)) == 0;
0019 }
0020 
0021 void get_new_mmu_context(struct mm_struct *mm)
0022 {
0023     unsigned int cpu;
0024     u64 asid;
0025 
0026     /*
0027      * This function is specific to ASIDs, and should not be called when
0028      * MMIDs are in use.
0029      */
0030     if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
0031         return;
0032 
0033     cpu = smp_processor_id();
0034     asid = asid_cache(cpu);
0035 
0036     if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
0037         if (cpu_has_vtag_icache)
0038             flush_icache_all();
0039         local_flush_tlb_all();  /* start new asid cycle */
0040     }
0041 
0042     set_cpu_context(cpu, mm, asid);
0043     asid_cache(cpu) = asid;
0044 }
0045 EXPORT_SYMBOL_GPL(get_new_mmu_context);
0046 
0047 void check_mmu_context(struct mm_struct *mm)
0048 {
0049     unsigned int cpu = smp_processor_id();
0050 
0051     /*
0052      * This function is specific to ASIDs, and should not be called when
0053      * MMIDs are in use.
0054      */
0055     if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
0056         return;
0057 
0058     /* Check if our ASID is of an older version and thus invalid */
0059     if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu)))
0060         get_new_mmu_context(mm);
0061 }
0062 EXPORT_SYMBOL_GPL(check_mmu_context);
0063 
0064 static void flush_context(void)
0065 {
0066     u64 mmid;
0067     int cpu;
0068 
0069     /* Update the list of reserved MMIDs and the MMID bitmap */
0070     bitmap_zero(mmid_map, num_mmids);
0071 
0072     /* Reserve an MMID for kmap/wired entries */
0073     __set_bit(MMID_KERNEL_WIRED, mmid_map);
0074 
0075     for_each_possible_cpu(cpu) {
0076         mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0);
0077 
0078         /*
0079          * If this CPU has already been through a
0080          * rollover, but hasn't run another task in
0081          * the meantime, we must preserve its reserved
0082          * MMID, as this is the only trace we have of
0083          * the process it is still running.
0084          */
0085         if (mmid == 0)
0086             mmid = per_cpu(reserved_mmids, cpu);
0087 
0088         __set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map);
0089         per_cpu(reserved_mmids, cpu) = mmid;
0090     }
0091 
0092     /*
0093      * Queue a TLB invalidation for each CPU to perform on next
0094      * context-switch
0095      */
0096     cpumask_setall(&tlb_flush_pending);
0097 }
0098 
0099 static bool check_update_reserved_mmid(u64 mmid, u64 newmmid)
0100 {
0101     bool hit;
0102     int cpu;
0103 
0104     /*
0105      * Iterate over the set of reserved MMIDs looking for a match.
0106      * If we find one, then we can update our mm to use newmmid
0107      * (i.e. the same MMID in the current generation) but we can't
0108      * exit the loop early, since we need to ensure that all copies
0109      * of the old MMID are updated to reflect the mm. Failure to do
0110      * so could result in us missing the reserved MMID in a future
0111      * generation.
0112      */
0113     hit = false;
0114     for_each_possible_cpu(cpu) {
0115         if (per_cpu(reserved_mmids, cpu) == mmid) {
0116             hit = true;
0117             per_cpu(reserved_mmids, cpu) = newmmid;
0118         }
0119     }
0120 
0121     return hit;
0122 }
0123 
0124 static u64 get_new_mmid(struct mm_struct *mm)
0125 {
0126     static u32 cur_idx = MMID_KERNEL_WIRED + 1;
0127     u64 mmid, version, mmid_mask;
0128 
0129     mmid = cpu_context(0, mm);
0130     version = atomic64_read(&mmid_version);
0131     mmid_mask = cpu_asid_mask(&boot_cpu_data);
0132 
0133     if (!asid_versions_eq(0, mmid, 0)) {
0134         u64 newmmid = version | (mmid & mmid_mask);
0135 
0136         /*
0137          * If our current MMID was active during a rollover, we
0138          * can continue to use it and this was just a false alarm.
0139          */
0140         if (check_update_reserved_mmid(mmid, newmmid)) {
0141             mmid = newmmid;
0142             goto set_context;
0143         }
0144 
0145         /*
0146          * We had a valid MMID in a previous life, so try to re-use
0147          * it if possible.
0148          */
0149         if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) {
0150             mmid = newmmid;
0151             goto set_context;
0152         }
0153     }
0154 
0155     /* Allocate a free MMID */
0156     mmid = find_next_zero_bit(mmid_map, num_mmids, cur_idx);
0157     if (mmid != num_mmids)
0158         goto reserve_mmid;
0159 
0160     /* We're out of MMIDs, so increment the global version */
0161     version = atomic64_add_return_relaxed(asid_first_version(0),
0162                           &mmid_version);
0163 
0164     /* Note currently active MMIDs & mark TLBs as requiring flushes */
0165     flush_context();
0166 
0167     /* We have more MMIDs than CPUs, so this will always succeed */
0168     mmid = find_first_zero_bit(mmid_map, num_mmids);
0169 
0170 reserve_mmid:
0171     __set_bit(mmid, mmid_map);
0172     cur_idx = mmid;
0173     mmid |= version;
0174 set_context:
0175     set_cpu_context(0, mm, mmid);
0176     return mmid;
0177 }
0178 
0179 void check_switch_mmu_context(struct mm_struct *mm)
0180 {
0181     unsigned int cpu = smp_processor_id();
0182     u64 ctx, old_active_mmid;
0183     unsigned long flags;
0184 
0185     if (!cpu_has_mmid) {
0186         check_mmu_context(mm);
0187         write_c0_entryhi(cpu_asid(cpu, mm));
0188         goto setup_pgd;
0189     }
0190 
0191     /*
0192      * MMID switch fast-path, to avoid acquiring cpu_mmid_lock when it's
0193      * unnecessary.
0194      *
0195      * The memory ordering here is subtle. If our active_mmids is non-zero
0196      * and the MMID matches the current version, then we update the CPU's
0197      * asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover
0198      * means that either:
0199      *
0200      * - We get a zero back from the cmpxchg and end up waiting on
0201      *   cpu_mmid_lock in check_mmu_context(). Taking the lock synchronises
0202      *   with the rollover and so we are forced to see the updated
0203      *   generation.
0204      *
0205      * - We get a valid MMID back from the cmpxchg, which means the
0206      *   relaxed xchg in flush_context will treat us as reserved
0207      *   because atomic RmWs are totally ordered for a given location.
0208      */
0209     ctx = cpu_context(cpu, mm);
0210     old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache);
0211     if (!old_active_mmid ||
0212         !asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) ||
0213         !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) {
0214         raw_spin_lock_irqsave(&cpu_mmid_lock, flags);
0215 
0216         ctx = cpu_context(cpu, mm);
0217         if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)))
0218             ctx = get_new_mmid(mm);
0219 
0220         WRITE_ONCE(cpu_data[cpu].asid_cache, ctx);
0221         raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags);
0222     }
0223 
0224     /*
0225      * Invalidate the local TLB if needed. Note that we must only clear our
0226      * bit in tlb_flush_pending after this is complete, so that the
0227      * cpu_has_shared_ftlb_entries case below isn't misled.
0228      */
0229     if (cpumask_test_cpu(cpu, &tlb_flush_pending)) {
0230         if (cpu_has_vtag_icache)
0231             flush_icache_all();
0232         local_flush_tlb_all();
0233         cpumask_clear_cpu(cpu, &tlb_flush_pending);
0234     }
0235 
0236     write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data));
0237 
0238     /*
0239      * If this CPU shares FTLB entries with its siblings and one or more of
0240      * those siblings hasn't yet invalidated its TLB following a version
0241      * increase then we need to invalidate any TLB entries for our MMID
0242      * that we might otherwise pick up from a sibling.
0243      *
0244      * We ifdef on CONFIG_SMP because cpu_sibling_map isn't defined in
0245      * CONFIG_SMP=n kernels.
0246      */
0247 #ifdef CONFIG_SMP
0248     if (cpu_has_shared_ftlb_entries &&
0249         cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) {
0250         /* Ensure we operate on the new MMID */
0251         mtc0_tlbw_hazard();
0252 
0253         /*
0254          * Invalidate all TLB entries associated with the new
0255          * MMID, and wait for the invalidation to complete.
0256          */
0257         ginvt_mmid();
0258         sync_ginv();
0259     }
0260 #endif
0261 
0262 setup_pgd:
0263     TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
0264 }
0265 EXPORT_SYMBOL_GPL(check_switch_mmu_context);
0266 
0267 static int mmid_init(void)
0268 {
0269     if (!cpu_has_mmid)
0270         return 0;
0271 
0272     /*
0273      * Expect allocation after rollover to fail if we don't have at least
0274      * one more MMID than CPUs.
0275      */
0276     num_mmids = asid_first_version(0);
0277     WARN_ON(num_mmids <= num_possible_cpus());
0278 
0279     atomic64_set(&mmid_version, asid_first_version(0));
0280     mmid_map = bitmap_zalloc(num_mmids, GFP_KERNEL);
0281     if (!mmid_map)
0282         panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids);
0283 
0284     /* Reserve an MMID for kmap/wired entries */
0285     __set_bit(MMID_KERNEL_WIRED, mmid_map);
0286 
0287     pr_info("MMID allocator initialised with %u entries\n", num_mmids);
0288     return 0;
0289 }
0290 early_initcall(mmid_init);