Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __ASM_ASM_ASID_H
0003 #define __ASM_ASM_ASID_H
0004 
0005 #include <linux/atomic.h>
0006 #include <linux/compiler.h>
0007 #include <linux/cpumask.h>
0008 #include <linux/percpu.h>
0009 #include <linux/spinlock.h>
0010 
0011 struct asid_info
0012 {
0013     atomic64_t  generation;
0014     unsigned long   *map;
0015     atomic64_t __percpu *active;
0016     u64 __percpu        *reserved;
0017     u32         bits;
0018     /* Lock protecting the structure */
0019     raw_spinlock_t      lock;
0020     /* Which CPU requires context flush on next call */
0021     cpumask_t       flush_pending;
0022     /* Number of ASID allocated by context (shift value) */
0023     unsigned int        ctxt_shift;
0024     /* Callback to locally flush the context. */
0025     void            (*flush_cpu_ctxt_cb)(void);
0026 };
0027 
0028 #define NUM_ASIDS(info)         (1UL << ((info)->bits))
0029 #define NUM_CTXT_ASIDS(info)        (NUM_ASIDS(info) >> (info)->ctxt_shift)
0030 
0031 #define active_asid(info, cpu)  *per_cpu_ptr((info)->active, cpu)
0032 
0033 void asid_new_context(struct asid_info *info, atomic64_t *pasid,
0034               unsigned int cpu, struct mm_struct *mm);
0035 
0036 /*
0037  * Check the ASID is still valid for the context. If not generate a new ASID.
0038  *
0039  * @pasid: Pointer to the current ASID batch
0040  * @cpu: current CPU ID. Must have been acquired through get_cpu()
0041  */
0042 static inline void asid_check_context(struct asid_info *info,
0043                       atomic64_t *pasid, unsigned int cpu,
0044                       struct mm_struct *mm)
0045 {
0046     u64 asid, old_active_asid;
0047 
0048     asid = atomic64_read(pasid);
0049 
0050     /*
0051      * The memory ordering here is subtle.
0052      * If our active_asid is non-zero and the ASID matches the current
0053      * generation, then we update the active_asid entry with a relaxed
0054      * cmpxchg. Racing with a concurrent rollover means that either:
0055      *
0056      * - We get a zero back from the cmpxchg and end up waiting on the
0057      *   lock. Taking the lock synchronises with the rollover and so
0058      *   we are forced to see the updated generation.
0059      *
0060      * - We get a valid ASID back from the cmpxchg, which means the
0061      *   relaxed xchg in flush_context will treat us as reserved
0062      *   because atomic RmWs are totally ordered for a given location.
0063      */
0064     old_active_asid = atomic64_read(&active_asid(info, cpu));
0065     if (old_active_asid &&
0066         !((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
0067         atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
0068                      old_active_asid, asid))
0069         return;
0070 
0071     asid_new_context(info, pasid, cpu, mm);
0072 }
0073 
0074 int asid_allocator_init(struct asid_info *info,
0075             u32 bits, unsigned int asid_per_ctxt,
0076             void (*flush_cpu_ctxt_cb)(void));
0077 
0078 #endif