Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_X86_MMU_H
0003 #define _ASM_X86_MMU_H
0004 
0005 #include <linux/spinlock.h>
0006 #include <linux/rwsem.h>
0007 #include <linux/mutex.h>
0008 #include <linux/atomic.h>
0009 #include <linux/bits.h>
0010 
0011 /* Uprobes on this MM assume 32-bit code */
0012 #define MM_CONTEXT_UPROBE_IA32  BIT(0)
0013 /* vsyscall page is accessible on this MM */
0014 #define MM_CONTEXT_HAS_VSYSCALL BIT(1)
0015 
0016 /*
0017  * x86 has arch-specific MMU state beyond what lives in mm_struct.
0018  */
0019 typedef struct {
0020     /*
0021      * ctx_id uniquely identifies this mm_struct.  A ctx_id will never
0022      * be reused, and zero is not a valid ctx_id.
0023      */
0024     u64 ctx_id;
0025 
0026     /*
0027      * Any code that needs to do any sort of TLB flushing for this
0028      * mm will first make its changes to the page tables, then
0029      * increment tlb_gen, then flush.  This lets the low-level
0030      * flushing code keep track of what needs flushing.
0031      *
0032      * This is not used on Xen PV.
0033      */
0034     atomic64_t tlb_gen;
0035 
0036 #ifdef CONFIG_MODIFY_LDT_SYSCALL
0037     struct rw_semaphore ldt_usr_sem;
0038     struct ldt_struct   *ldt;
0039 #endif
0040 
0041 #ifdef CONFIG_X86_64
0042     unsigned short flags;
0043 #endif
0044 
0045     struct mutex lock;
0046     void __user *vdso;          /* vdso base address */
0047     const struct vdso_image *vdso_image;    /* vdso image in use */
0048 
0049     atomic_t perf_rdpmc_allowed;    /* nonzero if rdpmc is allowed */
0050 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
0051     /*
0052      * One bit per protection key says whether userspace can
0053      * use it or not.  protected by mmap_lock.
0054      */
0055     u16 pkey_allocation_map;
0056     s16 execute_only_pkey;
0057 #endif
0058 } mm_context_t;
0059 
0060 #define INIT_MM_CONTEXT(mm)                     \
0061     .context = {                            \
0062         .ctx_id = 1,                        \
0063         .lock = __MUTEX_INITIALIZER(mm.context.lock),       \
0064     }
0065 
0066 void leave_mm(int cpu);
0067 #define leave_mm leave_mm
0068 
0069 #endif /* _ASM_X86_MMU_H */