![]() |
|
|||
0001 /* SPDX-License-Identifier: GPL-2.0 */ 0002 #ifndef _LINUX_MM_TYPES_H 0003 #define _LINUX_MM_TYPES_H 0004 0005 #include <linux/mm_types_task.h> 0006 0007 #include <linux/auxvec.h> 0008 #include <linux/kref.h> 0009 #include <linux/list.h> 0010 #include <linux/spinlock.h> 0011 #include <linux/rbtree.h> 0012 #include <linux/rwsem.h> 0013 #include <linux/completion.h> 0014 #include <linux/cpumask.h> 0015 #include <linux/uprobes.h> 0016 #include <linux/rcupdate.h> 0017 #include <linux/page-flags-layout.h> 0018 #include <linux/workqueue.h> 0019 #include <linux/seqlock.h> 0020 0021 #include <asm/mmu.h> 0022 0023 #ifndef AT_VECTOR_SIZE_ARCH 0024 #define AT_VECTOR_SIZE_ARCH 0 0025 #endif 0026 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) 0027 0028 #define INIT_PASID 0 0029 0030 struct address_space; 0031 struct mem_cgroup; 0032 0033 /* 0034 * Each physical page in the system has a struct page associated with 0035 * it to keep track of whatever it is we are using the page for at the 0036 * moment. Note that we have no way to track which tasks are using 0037 * a page, though if it is a pagecache page, rmap structures can tell us 0038 * who is mapping it. 0039 * 0040 * If you allocate the page using alloc_pages(), you can use some of the 0041 * space in struct page for your own purposes. The five words in the main 0042 * union are available, except for bit 0 of the first word which must be 0043 * kept clear. Many users use this word to store a pointer to an object 0044 * which is guaranteed to be aligned. If you use the same storage as 0045 * page->mapping, you must restore it to NULL before freeing the page. 0046 * 0047 * If your page will not be mapped to userspace, you can also use the four 0048 * bytes in the mapcount union, but you must call page_mapcount_reset() 0049 * before freeing it. 0050 * 0051 * If you want to use the refcount field, it must be used in such a way 0052 * that other CPUs temporarily incrementing and then decrementing the 0053 * refcount does not cause problems. On receiving the page from 0054 * alloc_pages(), the refcount will be positive. 0055 * 0056 * If you allocate pages of order > 0, you can use some of the fields 0057 * in each subpage, but you may need to restore some of their values 0058 * afterwards. 0059 * 0060 * SLUB uses cmpxchg_double() to atomically update its freelist and counters. 0061 * That requires that freelist & counters in struct slab be adjacent and 0062 * double-word aligned. Because struct slab currently just reinterprets the 0063 * bits of struct page, we align all struct pages to double-word boundaries, 0064 * and ensure that 'freelist' is aligned within struct slab. 0065 */ 0066 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE 0067 #define _struct_page_alignment __aligned(2 * sizeof(unsigned long)) 0068 #else 0069 #define _struct_page_alignment 0070 #endif 0071 0072 struct page { 0073 unsigned long flags; /* Atomic flags, some possibly 0074 * updated asynchronously */ 0075 /* 0076 * Five words (20/40 bytes) are available in this union. 0077 * WARNING: bit 0 of the first word is used for PageTail(). That 0078 * means the other users of this union MUST NOT use the bit to 0079 * avoid collision and false-positive PageTail(). 0080 */ 0081 union { 0082 struct { /* Page cache and anonymous pages */ 0083 /** 0084 * @lru: Pageout list, eg. active_list protected by 0085 * lruvec->lru_lock. Sometimes used as a generic list 0086 * by the page owner. 0087 */ 0088 union { 0089 struct list_head lru; 0090 0091 /* Or, for the Unevictable "LRU list" slot */ 0092 struct { 0093 /* Always even, to negate PageTail */ 0094 void *__filler; 0095 /* Count page's or folio's mlocks */ 0096 unsigned int mlock_count; 0097 }; 0098 0099 /* Or, free page */ 0100 struct list_head buddy_list; 0101 struct list_head pcp_list; 0102 }; 0103 /* See page-flags.h for PAGE_MAPPING_FLAGS */ 0104 struct address_space *mapping; 0105 pgoff_t index; /* Our offset within mapping. */ 0106 /** 0107 * @private: Mapping-private opaque data. 0108 * Usually used for buffer_heads if PagePrivate. 0109 * Used for swp_entry_t if PageSwapCache. 0110 * Indicates order in the buddy system if PageBuddy. 0111 */ 0112 unsigned long private; 0113 }; 0114 struct { /* page_pool used by netstack */ 0115 /** 0116 * @pp_magic: magic value to avoid recycling non 0117 * page_pool allocated pages. 0118 */ 0119 unsigned long pp_magic; 0120 struct page_pool *pp; 0121 unsigned long _pp_mapping_pad; 0122 unsigned long dma_addr; 0123 union { 0124 /** 0125 * dma_addr_upper: might require a 64-bit 0126 * value on 32-bit architectures. 0127 */ 0128 unsigned long dma_addr_upper; 0129 /** 0130 * For frag page support, not supported in 0131 * 32-bit architectures with 64-bit DMA. 0132 */ 0133 atomic_long_t pp_frag_count; 0134 }; 0135 }; 0136 struct { /* Tail pages of compound page */ 0137 unsigned long compound_head; /* Bit zero is set */ 0138 0139 /* First tail page only */ 0140 unsigned char compound_dtor; 0141 unsigned char compound_order; 0142 atomic_t compound_mapcount; 0143 atomic_t compound_pincount; 0144 #ifdef CONFIG_64BIT 0145 unsigned int compound_nr; /* 1 << compound_order */ 0146 #endif 0147 }; 0148 struct { /* Second tail page of compound page */ 0149 unsigned long _compound_pad_1; /* compound_head */ 0150 unsigned long _compound_pad_2; 0151 /* For both global and memcg */ 0152 struct list_head deferred_list; 0153 }; 0154 struct { /* Page table pages */ 0155 unsigned long _pt_pad_1; /* compound_head */ 0156 pgtable_t pmd_huge_pte; /* protected by page->ptl */ 0157 unsigned long _pt_pad_2; /* mapping */ 0158 union { 0159 struct mm_struct *pt_mm; /* x86 pgds only */ 0160 atomic_t pt_frag_refcount; /* powerpc */ 0161 }; 0162 #if ALLOC_SPLIT_PTLOCKS 0163 spinlock_t *ptl; 0164 #else 0165 spinlock_t ptl; 0166 #endif 0167 }; 0168 struct { /* ZONE_DEVICE pages */ 0169 /** @pgmap: Points to the hosting device page map. */ 0170 struct dev_pagemap *pgmap; 0171 void *zone_device_data; 0172 /* 0173 * ZONE_DEVICE private pages are counted as being 0174 * mapped so the next 3 words hold the mapping, index, 0175 * and private fields from the source anonymous or 0176 * page cache page while the page is migrated to device 0177 * private memory. 0178 * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also 0179 * use the mapping, index, and private fields when 0180 * pmem backed DAX files are mapped. 0181 */ 0182 }; 0183 0184 /** @rcu_head: You can use this to free a page by RCU. */ 0185 struct rcu_head rcu_head; 0186 }; 0187 0188 union { /* This union is 4 bytes in size. */ 0189 /* 0190 * If the page can be mapped to userspace, encodes the number 0191 * of times this page is referenced by a page table. 0192 */ 0193 atomic_t _mapcount; 0194 0195 /* 0196 * If the page is neither PageSlab nor mappable to userspace, 0197 * the value stored here may help determine what this page 0198 * is used for. See page-flags.h for a list of page types 0199 * which are currently stored here. 0200 */ 0201 unsigned int page_type; 0202 }; 0203 0204 /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */ 0205 atomic_t _refcount; 0206 0207 #ifdef CONFIG_MEMCG 0208 unsigned long memcg_data; 0209 #endif 0210 0211 /* 0212 * On machines where all RAM is mapped into kernel address space, 0213 * we can simply calculate the virtual address. On machines with 0214 * highmem some memory is mapped into kernel virtual memory 0215 * dynamically, so we need a place to store that address. 0216 * Note that this field could be 16 bits on x86 ... ;) 0217 * 0218 * Architectures with slow multiplication can define 0219 * WANT_PAGE_VIRTUAL in asm/page.h 0220 */ 0221 #if defined(WANT_PAGE_VIRTUAL) 0222 void *virtual; /* Kernel virtual address (NULL if 0223 not kmapped, ie. highmem) */ 0224 #endif /* WANT_PAGE_VIRTUAL */ 0225 0226 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 0227 int _last_cpupid; 0228 #endif 0229 } _struct_page_alignment; 0230 0231 /** 0232 * struct folio - Represents a contiguous set of bytes. 0233 * @flags: Identical to the page flags. 0234 * @lru: Least Recently Used list; tracks how recently this folio was used. 0235 * @mlock_count: Number of times this folio has been pinned by mlock(). 0236 * @mapping: The file this page belongs to, or refers to the anon_vma for 0237 * anonymous memory. 0238 * @index: Offset within the file, in units of pages. For anonymous memory, 0239 * this is the index from the beginning of the mmap. 0240 * @private: Filesystem per-folio data (see folio_attach_private()). 0241 * Used for swp_entry_t if folio_test_swapcache(). 0242 * @_mapcount: Do not access this member directly. Use folio_mapcount() to 0243 * find out how many times this folio is mapped by userspace. 0244 * @_refcount: Do not access this member directly. Use folio_ref_count() 0245 * to find how many references there are to this folio. 0246 * @memcg_data: Memory Control Group data. 0247 * 0248 * A folio is a physically, virtually and logically contiguous set 0249 * of bytes. It is a power-of-two in size, and it is aligned to that 0250 * same power-of-two. It is at least as large as %PAGE_SIZE. If it is 0251 * in the page cache, it is at a file offset which is a multiple of that 0252 * power-of-two. It may be mapped into userspace at an address which is 0253 * at an arbitrary page offset, but its kernel virtual address is aligned 0254 * to its size. 0255 */ 0256 struct folio { 0257 /* private: don't document the anon union */ 0258 union { 0259 struct { 0260 /* public: */ 0261 unsigned long flags; 0262 union { 0263 struct list_head lru; 0264 /* private: avoid cluttering the output */ 0265 struct { 0266 void *__filler; 0267 /* public: */ 0268 unsigned int mlock_count; 0269 /* private: */ 0270 }; 0271 /* public: */ 0272 }; 0273 struct address_space *mapping; 0274 pgoff_t index; 0275 void *private; 0276 atomic_t _mapcount; 0277 atomic_t _refcount; 0278 #ifdef CONFIG_MEMCG 0279 unsigned long memcg_data; 0280 #endif 0281 /* private: the union with struct page is transitional */ 0282 }; 0283 struct page page; 0284 }; 0285 }; 0286 0287 static_assert(sizeof(struct page) == sizeof(struct folio)); 0288 #define FOLIO_MATCH(pg, fl) \ 0289 static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl)) 0290 FOLIO_MATCH(flags, flags); 0291 FOLIO_MATCH(lru, lru); 0292 FOLIO_MATCH(mapping, mapping); 0293 FOLIO_MATCH(compound_head, lru); 0294 FOLIO_MATCH(index, index); 0295 FOLIO_MATCH(private, private); 0296 FOLIO_MATCH(_mapcount, _mapcount); 0297 FOLIO_MATCH(_refcount, _refcount); 0298 #ifdef CONFIG_MEMCG 0299 FOLIO_MATCH(memcg_data, memcg_data); 0300 #endif 0301 #undef FOLIO_MATCH 0302 0303 static inline atomic_t *folio_mapcount_ptr(struct folio *folio) 0304 { 0305 struct page *tail = &folio->page + 1; 0306 return &tail->compound_mapcount; 0307 } 0308 0309 static inline atomic_t *compound_mapcount_ptr(struct page *page) 0310 { 0311 return &page[1].compound_mapcount; 0312 } 0313 0314 static inline atomic_t *compound_pincount_ptr(struct page *page) 0315 { 0316 return &page[1].compound_pincount; 0317 } 0318 0319 /* 0320 * Used for sizing the vmemmap region on some architectures 0321 */ 0322 #define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page))) 0323 0324 #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) 0325 #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) 0326 0327 /* 0328 * page_private can be used on tail pages. However, PagePrivate is only 0329 * checked by the VM on the head page. So page_private on the tail pages 0330 * should be used for data that's ancillary to the head page (eg attaching 0331 * buffer heads to tail pages after attaching buffer heads to the head page) 0332 */ 0333 #define page_private(page) ((page)->private) 0334 0335 static inline void set_page_private(struct page *page, unsigned long private) 0336 { 0337 page->private = private; 0338 } 0339 0340 static inline void *folio_get_private(struct folio *folio) 0341 { 0342 return folio->private; 0343 } 0344 0345 struct page_frag_cache { 0346 void * va; 0347 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 0348 __u16 offset; 0349 __u16 size; 0350 #else 0351 __u32 offset; 0352 #endif 0353 /* we maintain a pagecount bias, so that we dont dirty cache line 0354 * containing page->_refcount every time we allocate a fragment. 0355 */ 0356 unsigned int pagecnt_bias; 0357 bool pfmemalloc; 0358 }; 0359 0360 typedef unsigned long vm_flags_t; 0361 0362 /* 0363 * A region containing a mapping of a non-memory backed file under NOMMU 0364 * conditions. These are held in a global tree and are pinned by the VMAs that 0365 * map parts of them. 0366 */ 0367 struct vm_region { 0368 struct rb_node vm_rb; /* link in global region tree */ 0369 vm_flags_t vm_flags; /* VMA vm_flags */ 0370 unsigned long vm_start; /* start address of region */ 0371 unsigned long vm_end; /* region initialised to here */ 0372 unsigned long vm_top; /* region allocated to here */ 0373 unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ 0374 struct file *vm_file; /* the backing file or NULL */ 0375 0376 int vm_usage; /* region usage count (access under nommu_region_sem) */ 0377 bool vm_icache_flushed : 1; /* true if the icache has been flushed for 0378 * this region */ 0379 }; 0380 0381 #ifdef CONFIG_USERFAULTFD 0382 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, }) 0383 struct vm_userfaultfd_ctx { 0384 struct userfaultfd_ctx *ctx; 0385 }; 0386 #else /* CONFIG_USERFAULTFD */ 0387 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {}) 0388 struct vm_userfaultfd_ctx {}; 0389 #endif /* CONFIG_USERFAULTFD */ 0390 0391 struct anon_vma_name { 0392 struct kref kref; 0393 /* The name needs to be at the end because it is dynamically sized. */ 0394 char name[]; 0395 }; 0396 0397 /* 0398 * This struct describes a virtual memory area. There is one of these 0399 * per VM-area/task. A VM area is any part of the process virtual memory 0400 * space that has a special rule for the page-fault handlers (ie a shared 0401 * library, the executable area etc). 0402 */ 0403 struct vm_area_struct { 0404 /* The first cache line has the info for VMA tree walking. */ 0405 0406 unsigned long vm_start; /* Our start address within vm_mm. */ 0407 unsigned long vm_end; /* The first byte after our end address 0408 within vm_mm. */ 0409 0410 /* linked list of VM areas per task, sorted by address */ 0411 struct vm_area_struct *vm_next, *vm_prev; 0412 0413 struct rb_node vm_rb; 0414 0415 /* 0416 * Largest free memory gap in bytes to the left of this VMA. 0417 * Either between this VMA and vma->vm_prev, or between one of the 0418 * VMAs below us in the VMA rbtree and its ->vm_prev. This helps 0419 * get_unmapped_area find a free area of the right size. 0420 */ 0421 unsigned long rb_subtree_gap; 0422 0423 /* Second cache line starts here. */ 0424 0425 struct mm_struct *vm_mm; /* The address space we belong to. */ 0426 0427 /* 0428 * Access permissions of this VMA. 0429 * See vmf_insert_mixed_prot() for discussion. 0430 */ 0431 pgprot_t vm_page_prot; 0432 unsigned long vm_flags; /* Flags, see mm.h. */ 0433 0434 /* 0435 * For areas with an address space and backing store, 0436 * linkage into the address_space->i_mmap interval tree. 0437 * 0438 * For private anonymous mappings, a pointer to a null terminated string 0439 * containing the name given to the vma, or NULL if unnamed. 0440 */ 0441 0442 union { 0443 struct { 0444 struct rb_node rb; 0445 unsigned long rb_subtree_last; 0446 } shared; 0447 /* 0448 * Serialized by mmap_sem. Never use directly because it is 0449 * valid only when vm_file is NULL. Use anon_vma_name instead. 0450 */ 0451 struct anon_vma_name *anon_name; 0452 }; 0453 0454 /* 0455 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma 0456 * list, after a COW of one of the file pages. A MAP_SHARED vma 0457 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack 0458 * or brk vma (with NULL file) can only be in an anon_vma list. 0459 */ 0460 struct list_head anon_vma_chain; /* Serialized by mmap_lock & 0461 * page_table_lock */ 0462 struct anon_vma *anon_vma; /* Serialized by page_table_lock */ 0463 0464 /* Function pointers to deal with this struct. */ 0465 const struct vm_operations_struct *vm_ops; 0466 0467 /* Information about our backing store: */ 0468 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE 0469 units */ 0470 struct file * vm_file; /* File we map to (can be NULL). */ 0471 void * vm_private_data; /* was vm_pte (shared mem) */ 0472 0473 #ifdef CONFIG_SWAP 0474 atomic_long_t swap_readahead_info; 0475 #endif 0476 #ifndef CONFIG_MMU 0477 struct vm_region *vm_region; /* NOMMU mapping region */ 0478 #endif 0479 #ifdef CONFIG_NUMA 0480 struct mempolicy *vm_policy; /* NUMA policy for the VMA */ 0481 #endif 0482 struct vm_userfaultfd_ctx vm_userfaultfd_ctx; 0483 } __randomize_layout; 0484 0485 struct kioctx_table; 0486 struct mm_struct { 0487 struct { 0488 struct vm_area_struct *mmap; /* list of VMAs */ 0489 struct rb_root mm_rb; 0490 u64 vmacache_seqnum; /* per-thread vmacache */ 0491 #ifdef CONFIG_MMU 0492 unsigned long (*get_unmapped_area) (struct file *filp, 0493 unsigned long addr, unsigned long len, 0494 unsigned long pgoff, unsigned long flags); 0495 #endif 0496 unsigned long mmap_base; /* base of mmap area */ 0497 unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ 0498 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES 0499 /* Base addresses for compatible mmap() */ 0500 unsigned long mmap_compat_base; 0501 unsigned long mmap_compat_legacy_base; 0502 #endif 0503 unsigned long task_size; /* size of task vm space */ 0504 unsigned long highest_vm_end; /* highest vma end address */ 0505 pgd_t * pgd; 0506 0507 #ifdef CONFIG_MEMBARRIER 0508 /** 0509 * @membarrier_state: Flags controlling membarrier behavior. 0510 * 0511 * This field is close to @pgd to hopefully fit in the same 0512 * cache-line, which needs to be touched by switch_mm(). 0513 */ 0514 atomic_t membarrier_state; 0515 #endif 0516 0517 /** 0518 * @mm_users: The number of users including userspace. 0519 * 0520 * Use mmget()/mmget_not_zero()/mmput() to modify. When this 0521 * drops to 0 (i.e. when the task exits and there are no other 0522 * temporary reference holders), we also release a reference on 0523 * @mm_count (which may then free the &struct mm_struct if 0524 * @mm_count also drops to 0). 0525 */ 0526 atomic_t mm_users; 0527 0528 /** 0529 * @mm_count: The number of references to &struct mm_struct 0530 * (@mm_users count as 1). 0531 * 0532 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the 0533 * &struct mm_struct is freed. 0534 */ 0535 atomic_t mm_count; 0536 0537 #ifdef CONFIG_MMU 0538 atomic_long_t pgtables_bytes; /* PTE page table pages */ 0539 #endif 0540 int map_count; /* number of VMAs */ 0541 0542 spinlock_t page_table_lock; /* Protects page tables and some 0543 * counters 0544 */ 0545 /* 0546 * With some kernel config, the current mmap_lock's offset 0547 * inside 'mm_struct' is at 0x120, which is very optimal, as 0548 * its two hot fields 'count' and 'owner' sit in 2 different 0549 * cachelines, and when mmap_lock is highly contended, both 0550 * of the 2 fields will be accessed frequently, current layout 0551 * will help to reduce cache bouncing. 0552 * 0553 * So please be careful with adding new fields before 0554 * mmap_lock, which can easily push the 2 fields into one 0555 * cacheline. 0556 */ 0557 struct rw_semaphore mmap_lock; 0558 0559 struct list_head mmlist; /* List of maybe swapped mm's. These 0560 * are globally strung together off 0561 * init_mm.mmlist, and are protected 0562 * by mmlist_lock 0563 */ 0564 0565 0566 unsigned long hiwater_rss; /* High-watermark of RSS usage */ 0567 unsigned long hiwater_vm; /* High-water virtual memory usage */ 0568 0569 unsigned long total_vm; /* Total pages mapped */ 0570 unsigned long locked_vm; /* Pages that have PG_mlocked set */ 0571 atomic64_t pinned_vm; /* Refcount permanently increased */ 0572 unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ 0573 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ 0574 unsigned long stack_vm; /* VM_STACK */ 0575 unsigned long def_flags; 0576 0577 /** 0578 * @write_protect_seq: Locked when any thread is write 0579 * protecting pages mapped by this mm to enforce a later COW, 0580 * for instance during page table copying for fork(). 0581 */ 0582 seqcount_t write_protect_seq; 0583 0584 spinlock_t arg_lock; /* protect the below fields */ 0585 0586 unsigned long start_code, end_code, start_data, end_data; 0587 unsigned long start_brk, brk, start_stack; 0588 unsigned long arg_start, arg_end, env_start, env_end; 0589 0590 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ 0591 0592 /* 0593 * Special counters, in some configurations protected by the 0594 * page_table_lock, in other configurations by being atomic. 0595 */ 0596 struct mm_rss_stat rss_stat; 0597 0598 struct linux_binfmt *binfmt; 0599 0600 /* Architecture-specific MM context */ 0601 mm_context_t context; 0602 0603 unsigned long flags; /* Must use atomic bitops to access */ 0604 0605 #ifdef CONFIG_AIO 0606 spinlock_t ioctx_lock; 0607 struct kioctx_table __rcu *ioctx_table; 0608 #endif 0609 #ifdef CONFIG_MEMCG 0610 /* 0611 * "owner" points to a task that is regarded as the canonical 0612 * user/owner of this mm. All of the following must be true in 0613 * order for it to be changed: 0614 * 0615 * current == mm->owner 0616 * current->mm != mm 0617 * new_owner->mm == mm 0618 * new_owner->alloc_lock is held 0619 */ 0620 struct task_struct __rcu *owner; 0621 #endif 0622 struct user_namespace *user_ns; 0623 0624 /* store ref to file /proc/<pid>/exe symlink points to */ 0625 struct file __rcu *exe_file; 0626 #ifdef CONFIG_MMU_NOTIFIER 0627 struct mmu_notifier_subscriptions *notifier_subscriptions; 0628 #endif 0629 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 0630 pgtable_t pmd_huge_pte; /* protected by page_table_lock */ 0631 #endif 0632 #ifdef CONFIG_NUMA_BALANCING 0633 /* 0634 * numa_next_scan is the next time that the PTEs will be marked 0635 * pte_numa. NUMA hinting faults will gather statistics and 0636 * migrate pages to new nodes if necessary. 0637 */ 0638 unsigned long numa_next_scan; 0639 0640 /* Restart point for scanning and setting pte_numa */ 0641 unsigned long numa_scan_offset; 0642 0643 /* numa_scan_seq prevents two threads setting pte_numa */ 0644 int numa_scan_seq; 0645 #endif 0646 /* 0647 * An operation with batched TLB flushing is going on. Anything 0648 * that can move process memory needs to flush the TLB when 0649 * moving a PROT_NONE or PROT_NUMA mapped page. 0650 */ 0651 atomic_t tlb_flush_pending; 0652 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 0653 /* See flush_tlb_batched_pending() */ 0654 atomic_t tlb_flush_batched; 0655 #endif 0656 struct uprobes_state uprobes_state; 0657 #ifdef CONFIG_PREEMPT_RT 0658 struct rcu_head delayed_drop; 0659 #endif 0660 #ifdef CONFIG_HUGETLB_PAGE 0661 atomic_long_t hugetlb_usage; 0662 #endif 0663 struct work_struct async_put_work; 0664 0665 #ifdef CONFIG_IOMMU_SVA 0666 u32 pasid; 0667 #endif 0668 #ifdef CONFIG_KSM 0669 /* 0670 * Represent how many pages of this process are involved in KSM 0671 * merging. 0672 */ 0673 unsigned long ksm_merging_pages; 0674 #endif 0675 } __randomize_layout; 0676 0677 /* 0678 * The mm_cpumask needs to be at the end of mm_struct, because it 0679 * is dynamically sized based on nr_cpu_ids. 0680 */ 0681 unsigned long cpu_bitmap[]; 0682 }; 0683 0684 extern struct mm_struct init_mm; 0685 0686 /* Pointer magic because the dynamic array size confuses some compilers. */ 0687 static inline void mm_init_cpumask(struct mm_struct *mm) 0688 { 0689 unsigned long cpu_bitmap = (unsigned long)mm; 0690 0691 cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap); 0692 cpumask_clear((struct cpumask *)cpu_bitmap); 0693 } 0694 0695 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ 0696 static inline cpumask_t *mm_cpumask(struct mm_struct *mm) 0697 { 0698 return (struct cpumask *)&mm->cpu_bitmap; 0699 } 0700 0701 struct mmu_gather; 0702 extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm); 0703 extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm); 0704 extern void tlb_finish_mmu(struct mmu_gather *tlb); 0705 0706 struct vm_fault; 0707 0708 /** 0709 * typedef vm_fault_t - Return type for page fault handlers. 0710 * 0711 * Page fault handlers return a bitmask of %VM_FAULT values. 0712 */ 0713 typedef __bitwise unsigned int vm_fault_t; 0714 0715 /** 0716 * enum vm_fault_reason - Page fault handlers return a bitmask of 0717 * these values to tell the core VM what happened when handling the 0718 * fault. Used to decide whether a process gets delivered SIGBUS or 0719 * just gets major/minor fault counters bumped up. 0720 * 0721 * @VM_FAULT_OOM: Out Of Memory 0722 * @VM_FAULT_SIGBUS: Bad access 0723 * @VM_FAULT_MAJOR: Page read from storage 0724 * @VM_FAULT_WRITE: Special case for get_user_pages 0725 * @VM_FAULT_HWPOISON: Hit poisoned small page 0726 * @VM_FAULT_HWPOISON_LARGE: Hit poisoned large page. Index encoded 0727 * in upper bits 0728 * @VM_FAULT_SIGSEGV: segmentation fault 0729 * @VM_FAULT_NOPAGE: ->fault installed the pte, not return page 0730 * @VM_FAULT_LOCKED: ->fault locked the returned page 0731 * @VM_FAULT_RETRY: ->fault blocked, must retry 0732 * @VM_FAULT_FALLBACK: huge page fault failed, fall back to small 0733 * @VM_FAULT_DONE_COW: ->fault has fully handled COW 0734 * @VM_FAULT_NEEDDSYNC: ->fault did not modify page tables and needs 0735 * fsync() to complete (for synchronous page faults 0736 * in DAX) 0737 * @VM_FAULT_COMPLETED: ->fault completed, meanwhile mmap lock released 0738 * @VM_FAULT_HINDEX_MASK: mask HINDEX value 0739 * 0740 */ 0741 enum vm_fault_reason { 0742 VM_FAULT_OOM = (__force vm_fault_t)0x000001, 0743 VM_FAULT_SIGBUS = (__force vm_fault_t)0x000002, 0744 VM_FAULT_MAJOR = (__force vm_fault_t)0x000004, 0745 VM_FAULT_WRITE = (__force vm_fault_t)0x000008, 0746 VM_FAULT_HWPOISON = (__force vm_fault_t)0x000010, 0747 VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020, 0748 VM_FAULT_SIGSEGV = (__force vm_fault_t)0x000040, 0749 VM_FAULT_NOPAGE = (__force vm_fault_t)0x000100, 0750 VM_FAULT_LOCKED = (__force vm_fault_t)0x000200, 0751 VM_FAULT_RETRY = (__force vm_fault_t)0x000400, 0752 VM_FAULT_FALLBACK = (__force vm_fault_t)0x000800, 0753 VM_FAULT_DONE_COW = (__force vm_fault_t)0x001000, 0754 VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000, 0755 VM_FAULT_COMPLETED = (__force vm_fault_t)0x004000, 0756 VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000, 0757 }; 0758 0759 /* Encode hstate index for a hwpoisoned large page */ 0760 #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16)) 0761 #define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf) 0762 0763 #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | \ 0764 VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON | \ 0765 VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK) 0766 0767 #define VM_FAULT_RESULT_TRACE \ 0768 { VM_FAULT_OOM, "OOM" }, \ 0769 { VM_FAULT_SIGBUS, "SIGBUS" }, \ 0770 { VM_FAULT_MAJOR, "MAJOR" }, \ 0771 { VM_FAULT_WRITE, "WRITE" }, \ 0772 { VM_FAULT_HWPOISON, "HWPOISON" }, \ 0773 { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \ 0774 { VM_FAULT_SIGSEGV, "SIGSEGV" }, \ 0775 { VM_FAULT_NOPAGE, "NOPAGE" }, \ 0776 { VM_FAULT_LOCKED, "LOCKED" }, \ 0777 { VM_FAULT_RETRY, "RETRY" }, \ 0778 { VM_FAULT_FALLBACK, "FALLBACK" }, \ 0779 { VM_FAULT_DONE_COW, "DONE_COW" }, \ 0780 { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" } 0781 0782 struct vm_special_mapping { 0783 const char *name; /* The name, e.g. "[vdso]". */ 0784 0785 /* 0786 * If .fault is not provided, this points to a 0787 * NULL-terminated array of pages that back the special mapping. 0788 * 0789 * This must not be NULL unless .fault is provided. 0790 */ 0791 struct page **pages; 0792 0793 /* 0794 * If non-NULL, then this is called to resolve page faults 0795 * on the special mapping. If used, .pages is not checked. 0796 */ 0797 vm_fault_t (*fault)(const struct vm_special_mapping *sm, 0798 struct vm_area_struct *vma, 0799 struct vm_fault *vmf); 0800 0801 int (*mremap)(const struct vm_special_mapping *sm, 0802 struct vm_area_struct *new_vma); 0803 }; 0804 0805 enum tlb_flush_reason { 0806 TLB_FLUSH_ON_TASK_SWITCH, 0807 TLB_REMOTE_SHOOTDOWN, 0808 TLB_LOCAL_SHOOTDOWN, 0809 TLB_LOCAL_MM_SHOOTDOWN, 0810 TLB_REMOTE_SEND_IPI, 0811 NR_TLB_FLUSH_REASONS, 0812 }; 0813 0814 /* 0815 * A swap entry has to fit into a "unsigned long", as the entry is hidden 0816 * in the "index" field of the swapper address space. 0817 */ 0818 typedef struct { 0819 unsigned long val; 0820 } swp_entry_t; 0821 0822 /** 0823 * enum fault_flag - Fault flag definitions. 0824 * @FAULT_FLAG_WRITE: Fault was a write fault. 0825 * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE. 0826 * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked. 0827 * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying. 0828 * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region. 0829 * @FAULT_FLAG_TRIED: The fault has been tried once. 0830 * @FAULT_FLAG_USER: The fault originated in userspace. 0831 * @FAULT_FLAG_REMOTE: The fault is not for current task/mm. 0832 * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch. 0833 * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals. 0834 * @FAULT_FLAG_UNSHARE: The fault is an unsharing request to unshare (and mark 0835 * exclusive) a possibly shared anonymous page that is 0836 * mapped R/O. 0837 * @FAULT_FLAG_ORIG_PTE_VALID: whether the fault has vmf->orig_pte cached. 0838 * We should only access orig_pte if this flag set. 0839 * 0840 * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify 0841 * whether we would allow page faults to retry by specifying these two 0842 * fault flags correctly. Currently there can be three legal combinations: 0843 * 0844 * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and 0845 * this is the first try 0846 * 0847 * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and 0848 * we've already tried at least once 0849 * 0850 * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry 0851 * 0852 * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never 0853 * be used. Note that page faults can be allowed to retry for multiple times, 0854 * in which case we'll have an initial fault with flags (a) then later on 0855 * continuous faults with flags (b). We should always try to detect pending 0856 * signals before a retry to make sure the continuous page faults can still be 0857 * interrupted if necessary. 0858 * 0859 * The combination FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE is illegal. 0860 * FAULT_FLAG_UNSHARE is ignored and treated like an ordinary read fault when 0861 * no existing R/O-mapped anonymous page is encountered. 0862 */ 0863 enum fault_flag { 0864 FAULT_FLAG_WRITE = 1 << 0, 0865 FAULT_FLAG_MKWRITE = 1 << 1, 0866 FAULT_FLAG_ALLOW_RETRY = 1 << 2, 0867 FAULT_FLAG_RETRY_NOWAIT = 1 << 3, 0868 FAULT_FLAG_KILLABLE = 1 << 4, 0869 FAULT_FLAG_TRIED = 1 << 5, 0870 FAULT_FLAG_USER = 1 << 6, 0871 FAULT_FLAG_REMOTE = 1 << 7, 0872 FAULT_FLAG_INSTRUCTION = 1 << 8, 0873 FAULT_FLAG_INTERRUPTIBLE = 1 << 9, 0874 FAULT_FLAG_UNSHARE = 1 << 10, 0875 FAULT_FLAG_ORIG_PTE_VALID = 1 << 11, 0876 }; 0877 0878 typedef unsigned int __bitwise zap_flags_t; 0879 0880 #endif /* _LINUX_MM_TYPES_H */
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |