Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  linux/arch/arm/mm/context.c
0004  *
0005  *  Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
0006  *  Copyright (C) 2012 ARM Limited
0007  *
0008  *  Author: Will Deacon <will.deacon@arm.com>
0009  */
0010 #include <linux/init.h>
0011 #include <linux/sched.h>
0012 #include <linux/mm.h>
0013 #include <linux/smp.h>
0014 #include <linux/percpu.h>
0015 
0016 #include <asm/mmu_context.h>
0017 #include <asm/smp_plat.h>
0018 #include <asm/thread_notify.h>
0019 #include <asm/tlbflush.h>
0020 #include <asm/proc-fns.h>
0021 
0022 /*
0023  * On ARMv6, we have the following structure in the Context ID:
0024  *
0025  * 31                         7          0
0026  * +-------------------------+-----------+
0027  * |      process ID         |   ASID    |
0028  * +-------------------------+-----------+
0029  * |              context ID             |
0030  * +-------------------------------------+
0031  *
0032  * The ASID is used to tag entries in the CPU caches and TLBs.
0033  * The context ID is used by debuggers and trace logic, and
0034  * should be unique within all running processes.
0035  *
0036  * In big endian operation, the two 32 bit words are swapped if accessed
0037  * by non-64-bit operations.
0038  */
0039 #define ASID_FIRST_VERSION  (1ULL << ASID_BITS)
0040 #define NUM_USER_ASIDS      ASID_FIRST_VERSION
0041 
0042 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
0043 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
0044 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
0045 
0046 static DEFINE_PER_CPU(atomic64_t, active_asids);
0047 static DEFINE_PER_CPU(u64, reserved_asids);
0048 static cpumask_t tlb_flush_pending;
0049 
0050 #ifdef CONFIG_ARM_ERRATA_798181
0051 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
0052                  cpumask_t *mask)
0053 {
0054     int cpu;
0055     unsigned long flags;
0056     u64 context_id, asid;
0057 
0058     raw_spin_lock_irqsave(&cpu_asid_lock, flags);
0059     context_id = mm->context.id.counter;
0060     for_each_online_cpu(cpu) {
0061         if (cpu == this_cpu)
0062             continue;
0063         /*
0064          * We only need to send an IPI if the other CPUs are
0065          * running the same ASID as the one being invalidated.
0066          */
0067         asid = per_cpu(active_asids, cpu).counter;
0068         if (asid == 0)
0069             asid = per_cpu(reserved_asids, cpu);
0070         if (context_id == asid)
0071             cpumask_set_cpu(cpu, mask);
0072     }
0073     raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
0074 }
0075 #endif
0076 
0077 #ifdef CONFIG_ARM_LPAE
0078 /*
0079  * With LPAE, the ASID and page tables are updated atomicly, so there is
0080  * no need for a reserved set of tables (the active ASID tracking prevents
0081  * any issues across a rollover).
0082  */
0083 #define cpu_set_reserved_ttbr0()
0084 #else
0085 static void cpu_set_reserved_ttbr0(void)
0086 {
0087     u32 ttb;
0088     /*
0089      * Copy TTBR1 into TTBR0.
0090      * This points at swapper_pg_dir, which contains only global
0091      * entries so any speculative walks are perfectly safe.
0092      */
0093     asm volatile(
0094     "   mrc p15, 0, %0, c2, c0, 1       @ read TTBR1\n"
0095     "   mcr p15, 0, %0, c2, c0, 0       @ set TTBR0\n"
0096     : "=r" (ttb));
0097     isb();
0098 }
0099 #endif
0100 
0101 #ifdef CONFIG_PID_IN_CONTEXTIDR
0102 static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
0103                    void *t)
0104 {
0105     u32 contextidr;
0106     pid_t pid;
0107     struct thread_info *thread = t;
0108 
0109     if (cmd != THREAD_NOTIFY_SWITCH)
0110         return NOTIFY_DONE;
0111 
0112     pid = task_pid_nr(thread_task(thread)) << ASID_BITS;
0113     asm volatile(
0114     "   mrc p15, 0, %0, c13, c0, 1\n"
0115     "   and %0, %0, %2\n"
0116     "   orr %0, %0, %1\n"
0117     "   mcr p15, 0, %0, c13, c0, 1\n"
0118     : "=r" (contextidr), "+r" (pid)
0119     : "I" (~ASID_MASK));
0120     isb();
0121 
0122     return NOTIFY_OK;
0123 }
0124 
0125 static struct notifier_block contextidr_notifier_block = {
0126     .notifier_call = contextidr_notifier,
0127 };
0128 
0129 static int __init contextidr_notifier_init(void)
0130 {
0131     return thread_register_notifier(&contextidr_notifier_block);
0132 }
0133 arch_initcall(contextidr_notifier_init);
0134 #endif
0135 
0136 static void flush_context(unsigned int cpu)
0137 {
0138     int i;
0139     u64 asid;
0140 
0141     /* Update the list of reserved ASIDs and the ASID bitmap. */
0142     bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
0143     for_each_possible_cpu(i) {
0144         asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
0145         /*
0146          * If this CPU has already been through a
0147          * rollover, but hasn't run another task in
0148          * the meantime, we must preserve its reserved
0149          * ASID, as this is the only trace we have of
0150          * the process it is still running.
0151          */
0152         if (asid == 0)
0153             asid = per_cpu(reserved_asids, i);
0154         __set_bit(asid & ~ASID_MASK, asid_map);
0155         per_cpu(reserved_asids, i) = asid;
0156     }
0157 
0158     /* Queue a TLB invalidate and flush the I-cache if necessary. */
0159     cpumask_setall(&tlb_flush_pending);
0160 
0161     if (icache_is_vivt_asid_tagged())
0162         __flush_icache_all();
0163 }
0164 
0165 static bool check_update_reserved_asid(u64 asid, u64 newasid)
0166 {
0167     int cpu;
0168     bool hit = false;
0169 
0170     /*
0171      * Iterate over the set of reserved ASIDs looking for a match.
0172      * If we find one, then we can update our mm to use newasid
0173      * (i.e. the same ASID in the current generation) but we can't
0174      * exit the loop early, since we need to ensure that all copies
0175      * of the old ASID are updated to reflect the mm. Failure to do
0176      * so could result in us missing the reserved ASID in a future
0177      * generation.
0178      */
0179     for_each_possible_cpu(cpu) {
0180         if (per_cpu(reserved_asids, cpu) == asid) {
0181             hit = true;
0182             per_cpu(reserved_asids, cpu) = newasid;
0183         }
0184     }
0185 
0186     return hit;
0187 }
0188 
0189 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
0190 {
0191     static u32 cur_idx = 1;
0192     u64 asid = atomic64_read(&mm->context.id);
0193     u64 generation = atomic64_read(&asid_generation);
0194 
0195     if (asid != 0) {
0196         u64 newasid = generation | (asid & ~ASID_MASK);
0197 
0198         /*
0199          * If our current ASID was active during a rollover, we
0200          * can continue to use it and this was just a false alarm.
0201          */
0202         if (check_update_reserved_asid(asid, newasid))
0203             return newasid;
0204 
0205         /*
0206          * We had a valid ASID in a previous life, so try to re-use
0207          * it if possible.,
0208          */
0209         asid &= ~ASID_MASK;
0210         if (!__test_and_set_bit(asid, asid_map))
0211             return newasid;
0212     }
0213 
0214     /*
0215      * Allocate a free ASID. If we can't find one, take a note of the
0216      * currently active ASIDs and mark the TLBs as requiring flushes.
0217      * We always count from ASID #1, as we reserve ASID #0 to switch
0218      * via TTBR0 and to avoid speculative page table walks from hitting
0219      * in any partial walk caches, which could be populated from
0220      * overlapping level-1 descriptors used to map both the module
0221      * area and the userspace stack.
0222      */
0223     asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
0224     if (asid == NUM_USER_ASIDS) {
0225         generation = atomic64_add_return(ASID_FIRST_VERSION,
0226                          &asid_generation);
0227         flush_context(cpu);
0228         asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
0229     }
0230 
0231     __set_bit(asid, asid_map);
0232     cur_idx = asid;
0233     cpumask_clear(mm_cpumask(mm));
0234     return asid | generation;
0235 }
0236 
0237 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
0238 {
0239     unsigned long flags;
0240     unsigned int cpu = smp_processor_id();
0241     u64 asid;
0242 
0243     check_vmalloc_seq(mm);
0244 
0245     /*
0246      * We cannot update the pgd and the ASID atomicly with classic
0247      * MMU, so switch exclusively to global mappings to avoid
0248      * speculative page table walking with the wrong TTBR.
0249      */
0250     cpu_set_reserved_ttbr0();
0251 
0252     asid = atomic64_read(&mm->context.id);
0253     if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
0254         && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
0255         goto switch_mm_fastpath;
0256 
0257     raw_spin_lock_irqsave(&cpu_asid_lock, flags);
0258     /* Check that our ASID belongs to the current generation. */
0259     asid = atomic64_read(&mm->context.id);
0260     if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
0261         asid = new_context(mm, cpu);
0262         atomic64_set(&mm->context.id, asid);
0263     }
0264 
0265     if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
0266         local_flush_bp_all();
0267         local_flush_tlb_all();
0268     }
0269 
0270     atomic64_set(&per_cpu(active_asids, cpu), asid);
0271     cpumask_set_cpu(cpu, mm_cpumask(mm));
0272     raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
0273 
0274 switch_mm_fastpath:
0275     cpu_switch_mm(mm->pgd, mm);
0276 }