0001
0002 #ifndef __ASM_ASM_ASID_H
0003 #define __ASM_ASM_ASID_H
0004
0005 #include <linux/atomic.h>
0006 #include <linux/compiler.h>
0007 #include <linux/cpumask.h>
0008 #include <linux/percpu.h>
0009 #include <linux/spinlock.h>
0010
0011 struct asid_info
0012 {
0013 atomic64_t generation;
0014 unsigned long *map;
0015 atomic64_t __percpu *active;
0016 u64 __percpu *reserved;
0017 u32 bits;
0018
0019 raw_spinlock_t lock;
0020
0021 cpumask_t flush_pending;
0022
0023 unsigned int ctxt_shift;
0024
0025 void (*flush_cpu_ctxt_cb)(void);
0026 };
0027
0028 #define NUM_ASIDS(info) (1UL << ((info)->bits))
0029 #define NUM_CTXT_ASIDS(info) (NUM_ASIDS(info) >> (info)->ctxt_shift)
0030
0031 #define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu)
0032
0033 void asid_new_context(struct asid_info *info, atomic64_t *pasid,
0034 unsigned int cpu, struct mm_struct *mm);
0035
0036
0037
0038
0039
0040
0041
0042 static inline void asid_check_context(struct asid_info *info,
0043 atomic64_t *pasid, unsigned int cpu,
0044 struct mm_struct *mm)
0045 {
0046 u64 asid, old_active_asid;
0047
0048 asid = atomic64_read(pasid);
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064 old_active_asid = atomic64_read(&active_asid(info, cpu));
0065 if (old_active_asid &&
0066 !((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
0067 atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
0068 old_active_asid, asid))
0069 return;
0070
0071 asid_new_context(info, pasid, cpu, mm);
0072 }
0073
0074 int asid_allocator_init(struct asid_info *info,
0075 u32 bits, unsigned int asid_per_ctxt,
0076 void (*flush_cpu_ctxt_cb)(void));
0077
0078 #endif