0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/init.h>
0011 #include <linux/sched.h>
0012 #include <linux/mm.h>
0013 #include <linux/smp.h>
0014 #include <linux/percpu.h>
0015
0016 #include <asm/mmu_context.h>
0017 #include <asm/smp_plat.h>
0018 #include <asm/thread_notify.h>
0019 #include <asm/tlbflush.h>
0020 #include <asm/proc-fns.h>
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 #define ASID_FIRST_VERSION (1ULL << ASID_BITS)
0040 #define NUM_USER_ASIDS ASID_FIRST_VERSION
0041
0042 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
0043 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
0044 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
0045
0046 static DEFINE_PER_CPU(atomic64_t, active_asids);
0047 static DEFINE_PER_CPU(u64, reserved_asids);
0048 static cpumask_t tlb_flush_pending;
0049
0050 #ifdef CONFIG_ARM_ERRATA_798181
0051 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
0052 cpumask_t *mask)
0053 {
0054 int cpu;
0055 unsigned long flags;
0056 u64 context_id, asid;
0057
0058 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
0059 context_id = mm->context.id.counter;
0060 for_each_online_cpu(cpu) {
0061 if (cpu == this_cpu)
0062 continue;
0063
0064
0065
0066
0067 asid = per_cpu(active_asids, cpu).counter;
0068 if (asid == 0)
0069 asid = per_cpu(reserved_asids, cpu);
0070 if (context_id == asid)
0071 cpumask_set_cpu(cpu, mask);
0072 }
0073 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
0074 }
0075 #endif
0076
0077 #ifdef CONFIG_ARM_LPAE
0078
0079
0080
0081
0082
0083 #define cpu_set_reserved_ttbr0()
0084 #else
0085 static void cpu_set_reserved_ttbr0(void)
0086 {
0087 u32 ttb;
0088
0089
0090
0091
0092
0093 asm volatile(
0094 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
0095 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
0096 : "=r" (ttb));
0097 isb();
0098 }
0099 #endif
0100
0101 #ifdef CONFIG_PID_IN_CONTEXTIDR
0102 static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
0103 void *t)
0104 {
0105 u32 contextidr;
0106 pid_t pid;
0107 struct thread_info *thread = t;
0108
0109 if (cmd != THREAD_NOTIFY_SWITCH)
0110 return NOTIFY_DONE;
0111
0112 pid = task_pid_nr(thread_task(thread)) << ASID_BITS;
0113 asm volatile(
0114 " mrc p15, 0, %0, c13, c0, 1\n"
0115 " and %0, %0, %2\n"
0116 " orr %0, %0, %1\n"
0117 " mcr p15, 0, %0, c13, c0, 1\n"
0118 : "=r" (contextidr), "+r" (pid)
0119 : "I" (~ASID_MASK));
0120 isb();
0121
0122 return NOTIFY_OK;
0123 }
0124
0125 static struct notifier_block contextidr_notifier_block = {
0126 .notifier_call = contextidr_notifier,
0127 };
0128
0129 static int __init contextidr_notifier_init(void)
0130 {
0131 return thread_register_notifier(&contextidr_notifier_block);
0132 }
0133 arch_initcall(contextidr_notifier_init);
0134 #endif
0135
0136 static void flush_context(unsigned int cpu)
0137 {
0138 int i;
0139 u64 asid;
0140
0141
0142 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
0143 for_each_possible_cpu(i) {
0144 asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
0145
0146
0147
0148
0149
0150
0151
0152 if (asid == 0)
0153 asid = per_cpu(reserved_asids, i);
0154 __set_bit(asid & ~ASID_MASK, asid_map);
0155 per_cpu(reserved_asids, i) = asid;
0156 }
0157
0158
0159 cpumask_setall(&tlb_flush_pending);
0160
0161 if (icache_is_vivt_asid_tagged())
0162 __flush_icache_all();
0163 }
0164
0165 static bool check_update_reserved_asid(u64 asid, u64 newasid)
0166 {
0167 int cpu;
0168 bool hit = false;
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179 for_each_possible_cpu(cpu) {
0180 if (per_cpu(reserved_asids, cpu) == asid) {
0181 hit = true;
0182 per_cpu(reserved_asids, cpu) = newasid;
0183 }
0184 }
0185
0186 return hit;
0187 }
0188
0189 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
0190 {
0191 static u32 cur_idx = 1;
0192 u64 asid = atomic64_read(&mm->context.id);
0193 u64 generation = atomic64_read(&asid_generation);
0194
0195 if (asid != 0) {
0196 u64 newasid = generation | (asid & ~ASID_MASK);
0197
0198
0199
0200
0201
0202 if (check_update_reserved_asid(asid, newasid))
0203 return newasid;
0204
0205
0206
0207
0208
0209 asid &= ~ASID_MASK;
0210 if (!__test_and_set_bit(asid, asid_map))
0211 return newasid;
0212 }
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
0224 if (asid == NUM_USER_ASIDS) {
0225 generation = atomic64_add_return(ASID_FIRST_VERSION,
0226 &asid_generation);
0227 flush_context(cpu);
0228 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
0229 }
0230
0231 __set_bit(asid, asid_map);
0232 cur_idx = asid;
0233 cpumask_clear(mm_cpumask(mm));
0234 return asid | generation;
0235 }
0236
0237 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
0238 {
0239 unsigned long flags;
0240 unsigned int cpu = smp_processor_id();
0241 u64 asid;
0242
0243 check_vmalloc_seq(mm);
0244
0245
0246
0247
0248
0249
0250 cpu_set_reserved_ttbr0();
0251
0252 asid = atomic64_read(&mm->context.id);
0253 if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
0254 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
0255 goto switch_mm_fastpath;
0256
0257 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
0258
0259 asid = atomic64_read(&mm->context.id);
0260 if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
0261 asid = new_context(mm, cpu);
0262 atomic64_set(&mm->context.id, asid);
0263 }
0264
0265 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
0266 local_flush_bp_all();
0267 local_flush_tlb_all();
0268 }
0269
0270 atomic64_set(&per_cpu(active_asids, cpu), asid);
0271 cpumask_set_cpu(cpu, mm_cpumask(mm));
0272 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
0273
0274 switch_mm_fastpath:
0275 cpu_switch_mm(mm->pgd, mm);
0276 }