Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_MM_TYPES_TASK_H
0003 #define _LINUX_MM_TYPES_TASK_H
0004 
0005 /*
0006  * Here are the definitions of the MM data types that are embedded in 'struct task_struct'.
0007  *
0008  * (These are defined separately to decouple sched.h from mm_types.h as much as possible.)
0009  */
0010 
0011 #include <linux/types.h>
0012 #include <linux/threads.h>
0013 #include <linux/atomic.h>
0014 #include <linux/cpumask.h>
0015 
0016 #include <asm/page.h>
0017 
0018 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
0019 #include <asm/tlbbatch.h>
0020 #endif
0021 
0022 #define USE_SPLIT_PTE_PTLOCKS   (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
0023 #define USE_SPLIT_PMD_PTLOCKS   (USE_SPLIT_PTE_PTLOCKS && \
0024         IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
0025 #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
0026 
0027 /*
0028  * The per task VMA cache array:
0029  */
0030 #define VMACACHE_BITS 2
0031 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
0032 #define VMACACHE_MASK (VMACACHE_SIZE - 1)
0033 
0034 struct vmacache {
0035     u64 seqnum;
0036     struct vm_area_struct *vmas[VMACACHE_SIZE];
0037 };
0038 
0039 /*
0040  * When updating this, please also update struct resident_page_types[] in
0041  * kernel/fork.c
0042  */
0043 enum {
0044     MM_FILEPAGES,   /* Resident file mapping pages */
0045     MM_ANONPAGES,   /* Resident anonymous pages */
0046     MM_SWAPENTS,    /* Anonymous swap entries */
0047     MM_SHMEMPAGES,  /* Resident shared memory pages */
0048     NR_MM_COUNTERS
0049 };
0050 
0051 #if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
0052 #define SPLIT_RSS_COUNTING
0053 /* per-thread cached information, */
0054 struct task_rss_stat {
0055     int events; /* for synchronization threshold */
0056     int count[NR_MM_COUNTERS];
0057 };
0058 #endif /* USE_SPLIT_PTE_PTLOCKS */
0059 
0060 struct mm_rss_stat {
0061     atomic_long_t count[NR_MM_COUNTERS];
0062 };
0063 
0064 struct page_frag {
0065     struct page *page;
0066 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
0067     __u32 offset;
0068     __u32 size;
0069 #else
0070     __u16 offset;
0071     __u16 size;
0072 #endif
0073 };
0074 
0075 /* Track pages that require TLB flushes */
0076 struct tlbflush_unmap_batch {
0077 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
0078     /*
0079      * The arch code makes the following promise: generic code can modify a
0080      * PTE, then call arch_tlbbatch_add_mm() (which internally provides all
0081      * needed barriers), then call arch_tlbbatch_flush(), and the entries
0082      * will be flushed on all CPUs by the time that arch_tlbbatch_flush()
0083      * returns.
0084      */
0085     struct arch_tlbflush_unmap_batch arch;
0086 
0087     /* True if a flush is needed. */
0088     bool flush_required;
0089 
0090     /*
0091      * If true then the PTE was dirty when unmapped. The entry must be
0092      * flushed before IO is initiated or a stale TLB entry potentially
0093      * allows an update without redirtying the page.
0094      */
0095     bool writable;
0096 #endif
0097 };
0098 
0099 #endif /* _LINUX_MM_TYPES_TASK_H */