0001
0002 #ifndef _LINUX_HUGETLB_H
0003 #define _LINUX_HUGETLB_H
0004
0005 #include <linux/mm_types.h>
0006 #include <linux/mmdebug.h>
0007 #include <linux/fs.h>
0008 #include <linux/hugetlb_inline.h>
0009 #include <linux/cgroup.h>
0010 #include <linux/list.h>
0011 #include <linux/kref.h>
0012 #include <linux/pgtable.h>
0013 #include <linux/gfp.h>
0014 #include <linux/userfaultfd_k.h>
0015
0016 struct ctl_table;
0017 struct user_struct;
0018 struct mmu_gather;
0019
0020 #ifndef is_hugepd
0021 typedef struct { unsigned long pd; } hugepd_t;
0022 #define is_hugepd(hugepd) (0)
0023 #define __hugepd(x) ((hugepd_t) { (x) })
0024 #endif
0025
0026 #ifdef CONFIG_HUGETLB_PAGE
0027
0028 #include <linux/mempolicy.h>
0029 #include <linux/shm.h>
0030 #include <asm/tlbflush.h>
0031
0032
0033
0034
0035
0036
0037
0038
0039 enum {
0040 SUBPAGE_INDEX_SUBPOOL = 1,
0041 #ifdef CONFIG_CGROUP_HUGETLB
0042 SUBPAGE_INDEX_CGROUP,
0043 SUBPAGE_INDEX_CGROUP_RSVD,
0044 __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
0045 #endif
0046 #ifdef CONFIG_MEMORY_FAILURE
0047 SUBPAGE_INDEX_HWPOISON,
0048 #endif
0049 __NR_USED_SUBPAGE,
0050 };
0051
0052 struct hugepage_subpool {
0053 spinlock_t lock;
0054 long count;
0055 long max_hpages;
0056 long used_hpages;
0057
0058 struct hstate *hstate;
0059 long min_hpages;
0060 long rsv_hpages;
0061
0062 };
0063
0064 struct resv_map {
0065 struct kref refs;
0066 spinlock_t lock;
0067 struct list_head regions;
0068 long adds_in_progress;
0069 struct list_head region_cache;
0070 long region_cache_count;
0071 #ifdef CONFIG_CGROUP_HUGETLB
0072
0073
0074
0075
0076
0077 struct page_counter *reservation_counter;
0078 unsigned long pages_per_hpage;
0079 struct cgroup_subsys_state *css;
0080 #endif
0081 };
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102 struct file_region {
0103 struct list_head link;
0104 long from;
0105 long to;
0106 #ifdef CONFIG_CGROUP_HUGETLB
0107
0108
0109
0110
0111
0112 struct page_counter *reservation_counter;
0113 struct cgroup_subsys_state *css;
0114 #endif
0115 };
0116
0117 extern struct resv_map *resv_map_alloc(void);
0118 void resv_map_release(struct kref *ref);
0119
0120 extern spinlock_t hugetlb_lock;
0121 extern int hugetlb_max_hstate __read_mostly;
0122 #define for_each_hstate(h) \
0123 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
0124
0125 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
0126 long min_hpages);
0127 void hugepage_put_subpool(struct hugepage_subpool *spool);
0128
0129 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
0130 void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
0131 int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
0132 int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
0133 loff_t *);
0134 int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
0135 loff_t *);
0136 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
0137 loff_t *);
0138
0139 int move_hugetlb_page_tables(struct vm_area_struct *vma,
0140 struct vm_area_struct *new_vma,
0141 unsigned long old_addr, unsigned long new_addr,
0142 unsigned long len);
0143 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
0144 struct vm_area_struct *, struct vm_area_struct *);
0145 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
0146 struct page **, struct vm_area_struct **,
0147 unsigned long *, unsigned long *, long, unsigned int,
0148 int *);
0149 void unmap_hugepage_range(struct vm_area_struct *,
0150 unsigned long, unsigned long, struct page *,
0151 zap_flags_t);
0152 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
0153 struct vm_area_struct *vma,
0154 unsigned long start, unsigned long end,
0155 struct page *ref_page, zap_flags_t zap_flags);
0156 void hugetlb_report_meminfo(struct seq_file *);
0157 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
0158 void hugetlb_show_meminfo_node(int nid);
0159 unsigned long hugetlb_total_pages(void);
0160 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
0161 unsigned long address, unsigned int flags);
0162 #ifdef CONFIG_USERFAULTFD
0163 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
0164 struct vm_area_struct *dst_vma,
0165 unsigned long dst_addr,
0166 unsigned long src_addr,
0167 enum mcopy_atomic_mode mode,
0168 struct page **pagep,
0169 bool wp_copy);
0170 #endif
0171 bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
0172 struct vm_area_struct *vma,
0173 vm_flags_t vm_flags);
0174 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
0175 long freed);
0176 int isolate_hugetlb(struct page *page, struct list_head *list);
0177 int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
0178 int get_huge_page_for_hwpoison(unsigned long pfn, int flags);
0179 void putback_active_hugepage(struct page *page);
0180 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
0181 void free_huge_page(struct page *page);
0182 void hugetlb_fix_reserve_counts(struct inode *inode);
0183 extern struct mutex *hugetlb_fault_mutex_table;
0184 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
0185
0186 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
0187 unsigned long addr, pud_t *pud);
0188
0189 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
0190
0191 extern int sysctl_hugetlb_shm_group;
0192 extern struct list_head huge_boot_pages;
0193
0194
0195
0196 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
0197 unsigned long addr, unsigned long sz);
0198 pte_t *huge_pte_offset(struct mm_struct *mm,
0199 unsigned long addr, unsigned long sz);
0200 unsigned long hugetlb_mask_last_page(struct hstate *h);
0201 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
0202 unsigned long addr, pte_t *ptep);
0203 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
0204 unsigned long *start, unsigned long *end);
0205 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
0206 int write);
0207 struct page *follow_huge_pd(struct vm_area_struct *vma,
0208 unsigned long address, hugepd_t hpd,
0209 int flags, int pdshift);
0210 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
0211 pmd_t *pmd, int flags);
0212 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
0213 pud_t *pud, int flags);
0214 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
0215 pgd_t *pgd, int flags);
0216
0217 int pmd_huge(pmd_t pmd);
0218 int pud_huge(pud_t pud);
0219 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
0220 unsigned long address, unsigned long end, pgprot_t newprot,
0221 unsigned long cp_flags);
0222
0223 bool is_hugetlb_entry_migration(pte_t pte);
0224 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
0225
0226 #else
0227
0228 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
0229 {
0230 }
0231
0232 static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
0233 {
0234 }
0235
0236 static inline unsigned long hugetlb_total_pages(void)
0237 {
0238 return 0;
0239 }
0240
0241 static inline struct address_space *hugetlb_page_mapping_lock_write(
0242 struct page *hpage)
0243 {
0244 return NULL;
0245 }
0246
0247 static inline int huge_pmd_unshare(struct mm_struct *mm,
0248 struct vm_area_struct *vma,
0249 unsigned long addr, pte_t *ptep)
0250 {
0251 return 0;
0252 }
0253
0254 static inline void adjust_range_if_pmd_sharing_possible(
0255 struct vm_area_struct *vma,
0256 unsigned long *start, unsigned long *end)
0257 {
0258 }
0259
0260 static inline long follow_hugetlb_page(struct mm_struct *mm,
0261 struct vm_area_struct *vma, struct page **pages,
0262 struct vm_area_struct **vmas, unsigned long *position,
0263 unsigned long *nr_pages, long i, unsigned int flags,
0264 int *nonblocking)
0265 {
0266 BUG();
0267 return 0;
0268 }
0269
0270 static inline struct page *follow_huge_addr(struct mm_struct *mm,
0271 unsigned long address, int write)
0272 {
0273 return ERR_PTR(-EINVAL);
0274 }
0275
0276 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
0277 struct mm_struct *src,
0278 struct vm_area_struct *dst_vma,
0279 struct vm_area_struct *src_vma)
0280 {
0281 BUG();
0282 return 0;
0283 }
0284
0285 static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
0286 struct vm_area_struct *new_vma,
0287 unsigned long old_addr,
0288 unsigned long new_addr,
0289 unsigned long len)
0290 {
0291 BUG();
0292 return 0;
0293 }
0294
0295 static inline void hugetlb_report_meminfo(struct seq_file *m)
0296 {
0297 }
0298
0299 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
0300 {
0301 return 0;
0302 }
0303
0304 static inline void hugetlb_show_meminfo_node(int nid)
0305 {
0306 }
0307
0308 static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
0309 unsigned long address, hugepd_t hpd, int flags,
0310 int pdshift)
0311 {
0312 return NULL;
0313 }
0314
0315 static inline struct page *follow_huge_pmd(struct mm_struct *mm,
0316 unsigned long address, pmd_t *pmd, int flags)
0317 {
0318 return NULL;
0319 }
0320
0321 static inline struct page *follow_huge_pud(struct mm_struct *mm,
0322 unsigned long address, pud_t *pud, int flags)
0323 {
0324 return NULL;
0325 }
0326
0327 static inline struct page *follow_huge_pgd(struct mm_struct *mm,
0328 unsigned long address, pgd_t *pgd, int flags)
0329 {
0330 return NULL;
0331 }
0332
0333 static inline int prepare_hugepage_range(struct file *file,
0334 unsigned long addr, unsigned long len)
0335 {
0336 return -EINVAL;
0337 }
0338
0339 static inline int pmd_huge(pmd_t pmd)
0340 {
0341 return 0;
0342 }
0343
0344 static inline int pud_huge(pud_t pud)
0345 {
0346 return 0;
0347 }
0348
0349 static inline int is_hugepage_only_range(struct mm_struct *mm,
0350 unsigned long addr, unsigned long len)
0351 {
0352 return 0;
0353 }
0354
0355 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
0356 unsigned long addr, unsigned long end,
0357 unsigned long floor, unsigned long ceiling)
0358 {
0359 BUG();
0360 }
0361
0362 #ifdef CONFIG_USERFAULTFD
0363 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
0364 pte_t *dst_pte,
0365 struct vm_area_struct *dst_vma,
0366 unsigned long dst_addr,
0367 unsigned long src_addr,
0368 enum mcopy_atomic_mode mode,
0369 struct page **pagep,
0370 bool wp_copy)
0371 {
0372 BUG();
0373 return 0;
0374 }
0375 #endif
0376
0377 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
0378 unsigned long sz)
0379 {
0380 return NULL;
0381 }
0382
0383 static inline int isolate_hugetlb(struct page *page, struct list_head *list)
0384 {
0385 return -EBUSY;
0386 }
0387
0388 static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
0389 {
0390 return 0;
0391 }
0392
0393 static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
0394 {
0395 return 0;
0396 }
0397
0398 static inline void putback_active_hugepage(struct page *page)
0399 {
0400 }
0401
0402 static inline void move_hugetlb_state(struct page *oldpage,
0403 struct page *newpage, int reason)
0404 {
0405 }
0406
0407 static inline unsigned long hugetlb_change_protection(
0408 struct vm_area_struct *vma, unsigned long address,
0409 unsigned long end, pgprot_t newprot,
0410 unsigned long cp_flags)
0411 {
0412 return 0;
0413 }
0414
0415 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
0416 struct vm_area_struct *vma, unsigned long start,
0417 unsigned long end, struct page *ref_page,
0418 zap_flags_t zap_flags)
0419 {
0420 BUG();
0421 }
0422
0423 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
0424 struct vm_area_struct *vma, unsigned long address,
0425 unsigned int flags)
0426 {
0427 BUG();
0428 return 0;
0429 }
0430
0431 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
0432
0433 #endif
0434
0435
0436
0437
0438 #ifndef pgd_huge
0439 #define pgd_huge(x) 0
0440 #endif
0441 #ifndef p4d_huge
0442 #define p4d_huge(x) 0
0443 #endif
0444
0445 #ifndef pgd_write
0446 static inline int pgd_write(pgd_t pgd)
0447 {
0448 BUG();
0449 return 0;
0450 }
0451 #endif
0452
0453 #define HUGETLB_ANON_FILE "anon_hugepage"
0454
0455 enum {
0456
0457
0458
0459
0460 HUGETLB_SHMFS_INODE = 1,
0461
0462
0463
0464
0465 HUGETLB_ANONHUGE_INODE = 2,
0466 };
0467
0468 #ifdef CONFIG_HUGETLBFS
0469 struct hugetlbfs_sb_info {
0470 long max_inodes;
0471 long free_inodes;
0472 spinlock_t stat_lock;
0473 struct hstate *hstate;
0474 struct hugepage_subpool *spool;
0475 kuid_t uid;
0476 kgid_t gid;
0477 umode_t mode;
0478 };
0479
0480 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
0481 {
0482 return sb->s_fs_info;
0483 }
0484
0485 struct hugetlbfs_inode_info {
0486 struct shared_policy policy;
0487 struct inode vfs_inode;
0488 unsigned int seals;
0489 };
0490
0491 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
0492 {
0493 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
0494 }
0495
0496 extern const struct file_operations hugetlbfs_file_operations;
0497 extern const struct vm_operations_struct hugetlb_vm_ops;
0498 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
0499 int creat_flags, int page_size_log);
0500
0501 static inline bool is_file_hugepages(struct file *file)
0502 {
0503 if (file->f_op == &hugetlbfs_file_operations)
0504 return true;
0505
0506 return is_file_shm_hugepages(file);
0507 }
0508
0509 static inline struct hstate *hstate_inode(struct inode *i)
0510 {
0511 return HUGETLBFS_SB(i->i_sb)->hstate;
0512 }
0513 #else
0514
0515 #define is_file_hugepages(file) false
0516 static inline struct file *
0517 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
0518 int creat_flags, int page_size_log)
0519 {
0520 return ERR_PTR(-ENOSYS);
0521 }
0522
0523 static inline struct hstate *hstate_inode(struct inode *i)
0524 {
0525 return NULL;
0526 }
0527 #endif
0528
0529 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
0530 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
0531 unsigned long len, unsigned long pgoff,
0532 unsigned long flags);
0533 #endif
0534
0535 unsigned long
0536 generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
0537 unsigned long len, unsigned long pgoff,
0538 unsigned long flags);
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570 enum hugetlb_page_flags {
0571 HPG_restore_reserve = 0,
0572 HPG_migratable,
0573 HPG_temporary,
0574 HPG_freed,
0575 HPG_vmemmap_optimized,
0576 HPG_raw_hwp_unreliable,
0577 __NR_HPAGEFLAGS,
0578 };
0579
0580
0581
0582
0583
0584 #ifdef CONFIG_HUGETLB_PAGE
0585 #define TESTHPAGEFLAG(uname, flname) \
0586 static inline int HPage##uname(struct page *page) \
0587 { return test_bit(HPG_##flname, &(page->private)); }
0588
0589 #define SETHPAGEFLAG(uname, flname) \
0590 static inline void SetHPage##uname(struct page *page) \
0591 { set_bit(HPG_##flname, &(page->private)); }
0592
0593 #define CLEARHPAGEFLAG(uname, flname) \
0594 static inline void ClearHPage##uname(struct page *page) \
0595 { clear_bit(HPG_##flname, &(page->private)); }
0596 #else
0597 #define TESTHPAGEFLAG(uname, flname) \
0598 static inline int HPage##uname(struct page *page) \
0599 { return 0; }
0600
0601 #define SETHPAGEFLAG(uname, flname) \
0602 static inline void SetHPage##uname(struct page *page) \
0603 { }
0604
0605 #define CLEARHPAGEFLAG(uname, flname) \
0606 static inline void ClearHPage##uname(struct page *page) \
0607 { }
0608 #endif
0609
0610 #define HPAGEFLAG(uname, flname) \
0611 TESTHPAGEFLAG(uname, flname) \
0612 SETHPAGEFLAG(uname, flname) \
0613 CLEARHPAGEFLAG(uname, flname) \
0614
0615
0616
0617
0618 HPAGEFLAG(RestoreReserve, restore_reserve)
0619 HPAGEFLAG(Migratable, migratable)
0620 HPAGEFLAG(Temporary, temporary)
0621 HPAGEFLAG(Freed, freed)
0622 HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
0623 HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
0624
0625 #ifdef CONFIG_HUGETLB_PAGE
0626
0627 #define HSTATE_NAME_LEN 32
0628
0629 struct hstate {
0630 struct mutex resize_lock;
0631 int next_nid_to_alloc;
0632 int next_nid_to_free;
0633 unsigned int order;
0634 unsigned int demote_order;
0635 unsigned long mask;
0636 unsigned long max_huge_pages;
0637 unsigned long nr_huge_pages;
0638 unsigned long free_huge_pages;
0639 unsigned long resv_huge_pages;
0640 unsigned long surplus_huge_pages;
0641 unsigned long nr_overcommit_huge_pages;
0642 struct list_head hugepage_activelist;
0643 struct list_head hugepage_freelists[MAX_NUMNODES];
0644 unsigned int max_huge_pages_node[MAX_NUMNODES];
0645 unsigned int nr_huge_pages_node[MAX_NUMNODES];
0646 unsigned int free_huge_pages_node[MAX_NUMNODES];
0647 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
0648 #ifdef CONFIG_CGROUP_HUGETLB
0649
0650 struct cftype cgroup_files_dfl[8];
0651 struct cftype cgroup_files_legacy[10];
0652 #endif
0653 char name[HSTATE_NAME_LEN];
0654 };
0655
0656 struct huge_bootmem_page {
0657 struct list_head list;
0658 struct hstate *hstate;
0659 };
0660
0661 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
0662 struct page *alloc_huge_page(struct vm_area_struct *vma,
0663 unsigned long addr, int avoid_reserve);
0664 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
0665 nodemask_t *nmask, gfp_t gfp_mask);
0666 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
0667 unsigned long address);
0668 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
0669 pgoff_t idx);
0670 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
0671 unsigned long address, struct page *page);
0672
0673
0674 int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
0675 int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
0676 bool __init hugetlb_node_alloc_supported(void);
0677
0678 void __init hugetlb_add_hstate(unsigned order);
0679 bool __init arch_hugetlb_valid_size(unsigned long size);
0680 struct hstate *size_to_hstate(unsigned long size);
0681
0682 #ifndef HUGE_MAX_HSTATE
0683 #define HUGE_MAX_HSTATE 1
0684 #endif
0685
0686 extern struct hstate hstates[HUGE_MAX_HSTATE];
0687 extern unsigned int default_hstate_idx;
0688
0689 #define default_hstate (hstates[default_hstate_idx])
0690
0691
0692
0693
0694 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
0695 {
0696 return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
0697 }
0698
0699 static inline void hugetlb_set_page_subpool(struct page *hpage,
0700 struct hugepage_subpool *subpool)
0701 {
0702 set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
0703 }
0704
0705 static inline struct hstate *hstate_file(struct file *f)
0706 {
0707 return hstate_inode(file_inode(f));
0708 }
0709
0710 static inline struct hstate *hstate_sizelog(int page_size_log)
0711 {
0712 if (!page_size_log)
0713 return &default_hstate;
0714
0715 return size_to_hstate(1UL << page_size_log);
0716 }
0717
0718 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
0719 {
0720 return hstate_file(vma->vm_file);
0721 }
0722
0723 static inline unsigned long huge_page_size(const struct hstate *h)
0724 {
0725 return (unsigned long)PAGE_SIZE << h->order;
0726 }
0727
0728 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
0729
0730 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
0731
0732 static inline unsigned long huge_page_mask(struct hstate *h)
0733 {
0734 return h->mask;
0735 }
0736
0737 static inline unsigned int huge_page_order(struct hstate *h)
0738 {
0739 return h->order;
0740 }
0741
0742 static inline unsigned huge_page_shift(struct hstate *h)
0743 {
0744 return h->order + PAGE_SHIFT;
0745 }
0746
0747 static inline bool hstate_is_gigantic(struct hstate *h)
0748 {
0749 return huge_page_order(h) >= MAX_ORDER;
0750 }
0751
0752 static inline unsigned int pages_per_huge_page(const struct hstate *h)
0753 {
0754 return 1 << h->order;
0755 }
0756
0757 static inline unsigned int blocks_per_huge_page(struct hstate *h)
0758 {
0759 return huge_page_size(h) / 512;
0760 }
0761
0762 #include <asm/hugetlb.h>
0763
0764 #ifndef is_hugepage_only_range
0765 static inline int is_hugepage_only_range(struct mm_struct *mm,
0766 unsigned long addr, unsigned long len)
0767 {
0768 return 0;
0769 }
0770 #define is_hugepage_only_range is_hugepage_only_range
0771 #endif
0772
0773 #ifndef arch_clear_hugepage_flags
0774 static inline void arch_clear_hugepage_flags(struct page *page) { }
0775 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
0776 #endif
0777
0778 #ifndef arch_make_huge_pte
0779 static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
0780 vm_flags_t flags)
0781 {
0782 return pte_mkhuge(entry);
0783 }
0784 #endif
0785
0786 static inline struct hstate *page_hstate(struct page *page)
0787 {
0788 VM_BUG_ON_PAGE(!PageHuge(page), page);
0789 return size_to_hstate(page_size(page));
0790 }
0791
0792 static inline unsigned hstate_index_to_shift(unsigned index)
0793 {
0794 return hstates[index].order + PAGE_SHIFT;
0795 }
0796
0797 static inline int hstate_index(struct hstate *h)
0798 {
0799 return h - hstates;
0800 }
0801
0802 extern int dissolve_free_huge_page(struct page *page);
0803 extern int dissolve_free_huge_pages(unsigned long start_pfn,
0804 unsigned long end_pfn);
0805
0806 #ifdef CONFIG_MEMORY_FAILURE
0807 extern void hugetlb_clear_page_hwpoison(struct page *hpage);
0808 #else
0809 static inline void hugetlb_clear_page_hwpoison(struct page *hpage)
0810 {
0811 }
0812 #endif
0813
0814 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
0815 #ifndef arch_hugetlb_migration_supported
0816 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
0817 {
0818 if ((huge_page_shift(h) == PMD_SHIFT) ||
0819 (huge_page_shift(h) == PUD_SHIFT) ||
0820 (huge_page_shift(h) == PGDIR_SHIFT))
0821 return true;
0822 else
0823 return false;
0824 }
0825 #endif
0826 #else
0827 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
0828 {
0829 return false;
0830 }
0831 #endif
0832
0833 static inline bool hugepage_migration_supported(struct hstate *h)
0834 {
0835 return arch_hugetlb_migration_supported(h);
0836 }
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853 static inline bool hugepage_movable_supported(struct hstate *h)
0854 {
0855 if (!hugepage_migration_supported(h))
0856 return false;
0857
0858 if (hstate_is_gigantic(h))
0859 return false;
0860 return true;
0861 }
0862
0863
0864 static inline gfp_t htlb_alloc_mask(struct hstate *h)
0865 {
0866 if (hugepage_movable_supported(h))
0867 return GFP_HIGHUSER_MOVABLE;
0868 else
0869 return GFP_HIGHUSER;
0870 }
0871
0872 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
0873 {
0874 gfp_t modified_mask = htlb_alloc_mask(h);
0875
0876
0877 modified_mask |= (gfp_mask & __GFP_THISNODE);
0878
0879 modified_mask |= (gfp_mask & __GFP_NOWARN);
0880
0881 return modified_mask;
0882 }
0883
0884 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
0885 struct mm_struct *mm, pte_t *pte)
0886 {
0887 if (huge_page_size(h) == PMD_SIZE)
0888 return pmd_lockptr(mm, (pmd_t *) pte);
0889 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
0890 return &mm->page_table_lock;
0891 }
0892
0893 #ifndef hugepages_supported
0894
0895
0896
0897
0898
0899 #define hugepages_supported() (HPAGE_SHIFT != 0)
0900 #endif
0901
0902 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
0903
0904 static inline void hugetlb_count_init(struct mm_struct *mm)
0905 {
0906 atomic_long_set(&mm->hugetlb_usage, 0);
0907 }
0908
0909 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
0910 {
0911 atomic_long_add(l, &mm->hugetlb_usage);
0912 }
0913
0914 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
0915 {
0916 atomic_long_sub(l, &mm->hugetlb_usage);
0917 }
0918
0919 #ifndef huge_ptep_modify_prot_start
0920 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
0921 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
0922 unsigned long addr, pte_t *ptep)
0923 {
0924 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
0925 }
0926 #endif
0927
0928 #ifndef huge_ptep_modify_prot_commit
0929 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
0930 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
0931 unsigned long addr, pte_t *ptep,
0932 pte_t old_pte, pte_t pte)
0933 {
0934 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
0935 }
0936 #endif
0937
0938 #else
0939 struct hstate {};
0940
0941 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
0942 {
0943 return NULL;
0944 }
0945
0946 static inline int isolate_or_dissolve_huge_page(struct page *page,
0947 struct list_head *list)
0948 {
0949 return -ENOMEM;
0950 }
0951
0952 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
0953 unsigned long addr,
0954 int avoid_reserve)
0955 {
0956 return NULL;
0957 }
0958
0959 static inline struct page *
0960 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
0961 nodemask_t *nmask, gfp_t gfp_mask)
0962 {
0963 return NULL;
0964 }
0965
0966 static inline struct page *alloc_huge_page_vma(struct hstate *h,
0967 struct vm_area_struct *vma,
0968 unsigned long address)
0969 {
0970 return NULL;
0971 }
0972
0973 static inline int __alloc_bootmem_huge_page(struct hstate *h)
0974 {
0975 return 0;
0976 }
0977
0978 static inline struct hstate *hstate_file(struct file *f)
0979 {
0980 return NULL;
0981 }
0982
0983 static inline struct hstate *hstate_sizelog(int page_size_log)
0984 {
0985 return NULL;
0986 }
0987
0988 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
0989 {
0990 return NULL;
0991 }
0992
0993 static inline struct hstate *page_hstate(struct page *page)
0994 {
0995 return NULL;
0996 }
0997
0998 static inline struct hstate *size_to_hstate(unsigned long size)
0999 {
1000 return NULL;
1001 }
1002
1003 static inline unsigned long huge_page_size(struct hstate *h)
1004 {
1005 return PAGE_SIZE;
1006 }
1007
1008 static inline unsigned long huge_page_mask(struct hstate *h)
1009 {
1010 return PAGE_MASK;
1011 }
1012
1013 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1014 {
1015 return PAGE_SIZE;
1016 }
1017
1018 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1019 {
1020 return PAGE_SIZE;
1021 }
1022
1023 static inline unsigned int huge_page_order(struct hstate *h)
1024 {
1025 return 0;
1026 }
1027
1028 static inline unsigned int huge_page_shift(struct hstate *h)
1029 {
1030 return PAGE_SHIFT;
1031 }
1032
1033 static inline bool hstate_is_gigantic(struct hstate *h)
1034 {
1035 return false;
1036 }
1037
1038 static inline unsigned int pages_per_huge_page(struct hstate *h)
1039 {
1040 return 1;
1041 }
1042
1043 static inline unsigned hstate_index_to_shift(unsigned index)
1044 {
1045 return 0;
1046 }
1047
1048 static inline int hstate_index(struct hstate *h)
1049 {
1050 return 0;
1051 }
1052
1053 static inline int dissolve_free_huge_page(struct page *page)
1054 {
1055 return 0;
1056 }
1057
1058 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1059 unsigned long end_pfn)
1060 {
1061 return 0;
1062 }
1063
1064 static inline bool hugepage_migration_supported(struct hstate *h)
1065 {
1066 return false;
1067 }
1068
1069 static inline bool hugepage_movable_supported(struct hstate *h)
1070 {
1071 return false;
1072 }
1073
1074 static inline gfp_t htlb_alloc_mask(struct hstate *h)
1075 {
1076 return 0;
1077 }
1078
1079 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1080 {
1081 return 0;
1082 }
1083
1084 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1085 struct mm_struct *mm, pte_t *pte)
1086 {
1087 return &mm->page_table_lock;
1088 }
1089
1090 static inline void hugetlb_count_init(struct mm_struct *mm)
1091 {
1092 }
1093
1094 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1095 {
1096 }
1097
1098 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1099 {
1100 }
1101
1102 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
1103 unsigned long addr, pte_t *ptep)
1104 {
1105 return *ptep;
1106 }
1107
1108 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
1109 pte_t *ptep, pte_t pte)
1110 {
1111 }
1112 #endif
1113
1114 static inline spinlock_t *huge_pte_lock(struct hstate *h,
1115 struct mm_struct *mm, pte_t *pte)
1116 {
1117 spinlock_t *ptl;
1118
1119 ptl = huge_pte_lockptr(h, mm, pte);
1120 spin_lock(ptl);
1121 return ptl;
1122 }
1123
1124 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1125 extern void __init hugetlb_cma_reserve(int order);
1126 extern void __init hugetlb_cma_check(void);
1127 #else
1128 static inline __init void hugetlb_cma_reserve(int order)
1129 {
1130 }
1131 static inline __init void hugetlb_cma_check(void)
1132 {
1133 }
1134 #endif
1135
1136 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1137
1138 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1139
1140
1141
1142
1143 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1144 #endif
1145
1146 #endif