0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/fs.h>
0025 #include <linux/init.h>
0026 #include <linux/vfs.h>
0027 #include <linux/mount.h>
0028 #include <linux/ramfs.h>
0029 #include <linux/pagemap.h>
0030 #include <linux/file.h>
0031 #include <linux/fileattr.h>
0032 #include <linux/mm.h>
0033 #include <linux/random.h>
0034 #include <linux/sched/signal.h>
0035 #include <linux/export.h>
0036 #include <linux/swap.h>
0037 #include <linux/uio.h>
0038 #include <linux/hugetlb.h>
0039 #include <linux/fs_parser.h>
0040 #include <linux/swapfile.h>
0041 #include "swap.h"
0042
0043 static struct vfsmount *shm_mnt;
0044
0045 #ifdef CONFIG_SHMEM
0046
0047
0048
0049
0050
0051
0052 #include <linux/xattr.h>
0053 #include <linux/exportfs.h>
0054 #include <linux/posix_acl.h>
0055 #include <linux/posix_acl_xattr.h>
0056 #include <linux/mman.h>
0057 #include <linux/string.h>
0058 #include <linux/slab.h>
0059 #include <linux/backing-dev.h>
0060 #include <linux/shmem_fs.h>
0061 #include <linux/writeback.h>
0062 #include <linux/pagevec.h>
0063 #include <linux/percpu_counter.h>
0064 #include <linux/falloc.h>
0065 #include <linux/splice.h>
0066 #include <linux/security.h>
0067 #include <linux/swapops.h>
0068 #include <linux/mempolicy.h>
0069 #include <linux/namei.h>
0070 #include <linux/ctype.h>
0071 #include <linux/migrate.h>
0072 #include <linux/highmem.h>
0073 #include <linux/seq_file.h>
0074 #include <linux/magic.h>
0075 #include <linux/syscalls.h>
0076 #include <linux/fcntl.h>
0077 #include <uapi/linux/memfd.h>
0078 #include <linux/userfaultfd_k.h>
0079 #include <linux/rmap.h>
0080 #include <linux/uuid.h>
0081
0082 #include <linux/uaccess.h>
0083
0084 #include "internal.h"
0085
0086 #define BLOCKS_PER_PAGE (PAGE_SIZE/512)
0087 #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
0088
0089
0090 #define BOGO_DIRENT_SIZE 20
0091
0092
0093 #define SHORT_SYMLINK_LEN 128
0094
0095
0096
0097
0098
0099
0100 struct shmem_falloc {
0101 wait_queue_head_t *waitq;
0102 pgoff_t start;
0103 pgoff_t next;
0104 pgoff_t nr_falloced;
0105 pgoff_t nr_unswapped;
0106 };
0107
0108 struct shmem_options {
0109 unsigned long long blocks;
0110 unsigned long long inodes;
0111 struct mempolicy *mpol;
0112 kuid_t uid;
0113 kgid_t gid;
0114 umode_t mode;
0115 bool full_inums;
0116 int huge;
0117 int seen;
0118 #define SHMEM_SEEN_BLOCKS 1
0119 #define SHMEM_SEEN_INODES 2
0120 #define SHMEM_SEEN_HUGE 4
0121 #define SHMEM_SEEN_INUMS 8
0122 };
0123
0124 #ifdef CONFIG_TMPFS
0125 static unsigned long shmem_default_max_blocks(void)
0126 {
0127 return totalram_pages() / 2;
0128 }
0129
0130 static unsigned long shmem_default_max_inodes(void)
0131 {
0132 unsigned long nr_pages = totalram_pages();
0133
0134 return min(nr_pages - totalhigh_pages(), nr_pages / 2);
0135 }
0136 #endif
0137
0138 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
0139 struct folio **foliop, enum sgp_type sgp,
0140 gfp_t gfp, struct vm_area_struct *vma,
0141 vm_fault_t *fault_type);
0142 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
0143 struct page **pagep, enum sgp_type sgp,
0144 gfp_t gfp, struct vm_area_struct *vma,
0145 struct vm_fault *vmf, vm_fault_t *fault_type);
0146
0147 int shmem_getpage(struct inode *inode, pgoff_t index,
0148 struct page **pagep, enum sgp_type sgp)
0149 {
0150 return shmem_getpage_gfp(inode, index, pagep, sgp,
0151 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
0152 }
0153
0154 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
0155 {
0156 return sb->s_fs_info;
0157 }
0158
0159
0160
0161
0162
0163
0164
0165 static inline int shmem_acct_size(unsigned long flags, loff_t size)
0166 {
0167 return (flags & VM_NORESERVE) ?
0168 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
0169 }
0170
0171 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
0172 {
0173 if (!(flags & VM_NORESERVE))
0174 vm_unacct_memory(VM_ACCT(size));
0175 }
0176
0177 static inline int shmem_reacct_size(unsigned long flags,
0178 loff_t oldsize, loff_t newsize)
0179 {
0180 if (!(flags & VM_NORESERVE)) {
0181 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
0182 return security_vm_enough_memory_mm(current->mm,
0183 VM_ACCT(newsize) - VM_ACCT(oldsize));
0184 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
0185 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
0186 }
0187 return 0;
0188 }
0189
0190
0191
0192
0193
0194
0195
0196 static inline int shmem_acct_block(unsigned long flags, long pages)
0197 {
0198 if (!(flags & VM_NORESERVE))
0199 return 0;
0200
0201 return security_vm_enough_memory_mm(current->mm,
0202 pages * VM_ACCT(PAGE_SIZE));
0203 }
0204
0205 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
0206 {
0207 if (flags & VM_NORESERVE)
0208 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
0209 }
0210
0211 static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
0212 {
0213 struct shmem_inode_info *info = SHMEM_I(inode);
0214 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
0215
0216 if (shmem_acct_block(info->flags, pages))
0217 return false;
0218
0219 if (sbinfo->max_blocks) {
0220 if (percpu_counter_compare(&sbinfo->used_blocks,
0221 sbinfo->max_blocks - pages) > 0)
0222 goto unacct;
0223 percpu_counter_add(&sbinfo->used_blocks, pages);
0224 }
0225
0226 return true;
0227
0228 unacct:
0229 shmem_unacct_blocks(info->flags, pages);
0230 return false;
0231 }
0232
0233 static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
0234 {
0235 struct shmem_inode_info *info = SHMEM_I(inode);
0236 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
0237
0238 if (sbinfo->max_blocks)
0239 percpu_counter_sub(&sbinfo->used_blocks, pages);
0240 shmem_unacct_blocks(info->flags, pages);
0241 }
0242
0243 static const struct super_operations shmem_ops;
0244 const struct address_space_operations shmem_aops;
0245 static const struct file_operations shmem_file_operations;
0246 static const struct inode_operations shmem_inode_operations;
0247 static const struct inode_operations shmem_dir_inode_operations;
0248 static const struct inode_operations shmem_special_inode_operations;
0249 static const struct vm_operations_struct shmem_vm_ops;
0250 static struct file_system_type shmem_fs_type;
0251
0252 bool vma_is_shmem(struct vm_area_struct *vma)
0253 {
0254 return vma->vm_ops == &shmem_vm_ops;
0255 }
0256
0257 static LIST_HEAD(shmem_swaplist);
0258 static DEFINE_MUTEX(shmem_swaplist_mutex);
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269 #define SHMEM_INO_BATCH 1024
0270 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
0271 {
0272 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
0273 ino_t ino;
0274
0275 if (!(sb->s_flags & SB_KERNMOUNT)) {
0276 raw_spin_lock(&sbinfo->stat_lock);
0277 if (sbinfo->max_inodes) {
0278 if (!sbinfo->free_inodes) {
0279 raw_spin_unlock(&sbinfo->stat_lock);
0280 return -ENOSPC;
0281 }
0282 sbinfo->free_inodes--;
0283 }
0284 if (inop) {
0285 ino = sbinfo->next_ino++;
0286 if (unlikely(is_zero_ino(ino)))
0287 ino = sbinfo->next_ino++;
0288 if (unlikely(!sbinfo->full_inums &&
0289 ino > UINT_MAX)) {
0290
0291
0292
0293
0294 if (IS_ENABLED(CONFIG_64BIT))
0295 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
0296 __func__, MINOR(sb->s_dev));
0297 sbinfo->next_ino = 1;
0298 ino = sbinfo->next_ino++;
0299 }
0300 *inop = ino;
0301 }
0302 raw_spin_unlock(&sbinfo->stat_lock);
0303 } else if (inop) {
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316 ino_t *next_ino;
0317
0318 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
0319 ino = *next_ino;
0320 if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
0321 raw_spin_lock(&sbinfo->stat_lock);
0322 ino = sbinfo->next_ino;
0323 sbinfo->next_ino += SHMEM_INO_BATCH;
0324 raw_spin_unlock(&sbinfo->stat_lock);
0325 if (unlikely(is_zero_ino(ino)))
0326 ino++;
0327 }
0328 *inop = ino;
0329 *next_ino = ++ino;
0330 put_cpu();
0331 }
0332
0333 return 0;
0334 }
0335
0336 static void shmem_free_inode(struct super_block *sb)
0337 {
0338 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
0339 if (sbinfo->max_inodes) {
0340 raw_spin_lock(&sbinfo->stat_lock);
0341 sbinfo->free_inodes++;
0342 raw_spin_unlock(&sbinfo->stat_lock);
0343 }
0344 }
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358 static void shmem_recalc_inode(struct inode *inode)
0359 {
0360 struct shmem_inode_info *info = SHMEM_I(inode);
0361 long freed;
0362
0363 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
0364 if (freed > 0) {
0365 info->alloced -= freed;
0366 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
0367 shmem_inode_unacct_blocks(inode, freed);
0368 }
0369 }
0370
0371 bool shmem_charge(struct inode *inode, long pages)
0372 {
0373 struct shmem_inode_info *info = SHMEM_I(inode);
0374 unsigned long flags;
0375
0376 if (!shmem_inode_acct_block(inode, pages))
0377 return false;
0378
0379
0380 inode->i_mapping->nrpages += pages;
0381
0382 spin_lock_irqsave(&info->lock, flags);
0383 info->alloced += pages;
0384 inode->i_blocks += pages * BLOCKS_PER_PAGE;
0385 shmem_recalc_inode(inode);
0386 spin_unlock_irqrestore(&info->lock, flags);
0387
0388 return true;
0389 }
0390
0391 void shmem_uncharge(struct inode *inode, long pages)
0392 {
0393 struct shmem_inode_info *info = SHMEM_I(inode);
0394 unsigned long flags;
0395
0396
0397
0398 spin_lock_irqsave(&info->lock, flags);
0399 info->alloced -= pages;
0400 inode->i_blocks -= pages * BLOCKS_PER_PAGE;
0401 shmem_recalc_inode(inode);
0402 spin_unlock_irqrestore(&info->lock, flags);
0403
0404 shmem_inode_unacct_blocks(inode, pages);
0405 }
0406
0407
0408
0409
0410 static int shmem_replace_entry(struct address_space *mapping,
0411 pgoff_t index, void *expected, void *replacement)
0412 {
0413 XA_STATE(xas, &mapping->i_pages, index);
0414 void *item;
0415
0416 VM_BUG_ON(!expected);
0417 VM_BUG_ON(!replacement);
0418 item = xas_load(&xas);
0419 if (item != expected)
0420 return -ENOENT;
0421 xas_store(&xas, replacement);
0422 return 0;
0423 }
0424
0425
0426
0427
0428
0429
0430
0431
0432 static bool shmem_confirm_swap(struct address_space *mapping,
0433 pgoff_t index, swp_entry_t swap)
0434 {
0435 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
0436 }
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452 #define SHMEM_HUGE_NEVER 0
0453 #define SHMEM_HUGE_ALWAYS 1
0454 #define SHMEM_HUGE_WITHIN_SIZE 2
0455 #define SHMEM_HUGE_ADVISE 3
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467 #define SHMEM_HUGE_DENY (-1)
0468 #define SHMEM_HUGE_FORCE (-2)
0469
0470 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0471
0472
0473 static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
0474
0475 bool shmem_is_huge(struct vm_area_struct *vma,
0476 struct inode *inode, pgoff_t index)
0477 {
0478 loff_t i_size;
0479
0480 if (!S_ISREG(inode->i_mode))
0481 return false;
0482 if (shmem_huge == SHMEM_HUGE_DENY)
0483 return false;
0484 if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
0485 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
0486 return false;
0487 if (shmem_huge == SHMEM_HUGE_FORCE)
0488 return true;
0489
0490 switch (SHMEM_SB(inode->i_sb)->huge) {
0491 case SHMEM_HUGE_ALWAYS:
0492 return true;
0493 case SHMEM_HUGE_WITHIN_SIZE:
0494 index = round_up(index + 1, HPAGE_PMD_NR);
0495 i_size = round_up(i_size_read(inode), PAGE_SIZE);
0496 if (i_size >> PAGE_SHIFT >= index)
0497 return true;
0498 fallthrough;
0499 case SHMEM_HUGE_ADVISE:
0500 if (vma && (vma->vm_flags & VM_HUGEPAGE))
0501 return true;
0502 fallthrough;
0503 default:
0504 return false;
0505 }
0506 }
0507
0508 #if defined(CONFIG_SYSFS)
0509 static int shmem_parse_huge(const char *str)
0510 {
0511 if (!strcmp(str, "never"))
0512 return SHMEM_HUGE_NEVER;
0513 if (!strcmp(str, "always"))
0514 return SHMEM_HUGE_ALWAYS;
0515 if (!strcmp(str, "within_size"))
0516 return SHMEM_HUGE_WITHIN_SIZE;
0517 if (!strcmp(str, "advise"))
0518 return SHMEM_HUGE_ADVISE;
0519 if (!strcmp(str, "deny"))
0520 return SHMEM_HUGE_DENY;
0521 if (!strcmp(str, "force"))
0522 return SHMEM_HUGE_FORCE;
0523 return -EINVAL;
0524 }
0525 #endif
0526
0527 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
0528 static const char *shmem_format_huge(int huge)
0529 {
0530 switch (huge) {
0531 case SHMEM_HUGE_NEVER:
0532 return "never";
0533 case SHMEM_HUGE_ALWAYS:
0534 return "always";
0535 case SHMEM_HUGE_WITHIN_SIZE:
0536 return "within_size";
0537 case SHMEM_HUGE_ADVISE:
0538 return "advise";
0539 case SHMEM_HUGE_DENY:
0540 return "deny";
0541 case SHMEM_HUGE_FORCE:
0542 return "force";
0543 default:
0544 VM_BUG_ON(1);
0545 return "bad_val";
0546 }
0547 }
0548 #endif
0549
0550 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
0551 struct shrink_control *sc, unsigned long nr_to_split)
0552 {
0553 LIST_HEAD(list), *pos, *next;
0554 LIST_HEAD(to_remove);
0555 struct inode *inode;
0556 struct shmem_inode_info *info;
0557 struct folio *folio;
0558 unsigned long batch = sc ? sc->nr_to_scan : 128;
0559 int split = 0;
0560
0561 if (list_empty(&sbinfo->shrinklist))
0562 return SHRINK_STOP;
0563
0564 spin_lock(&sbinfo->shrinklist_lock);
0565 list_for_each_safe(pos, next, &sbinfo->shrinklist) {
0566 info = list_entry(pos, struct shmem_inode_info, shrinklist);
0567
0568
0569 inode = igrab(&info->vfs_inode);
0570
0571
0572 if (!inode) {
0573 list_del_init(&info->shrinklist);
0574 goto next;
0575 }
0576
0577
0578 if (round_up(inode->i_size, PAGE_SIZE) ==
0579 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
0580 list_move(&info->shrinklist, &to_remove);
0581 goto next;
0582 }
0583
0584 list_move(&info->shrinklist, &list);
0585 next:
0586 sbinfo->shrinklist_len--;
0587 if (!--batch)
0588 break;
0589 }
0590 spin_unlock(&sbinfo->shrinklist_lock);
0591
0592 list_for_each_safe(pos, next, &to_remove) {
0593 info = list_entry(pos, struct shmem_inode_info, shrinklist);
0594 inode = &info->vfs_inode;
0595 list_del_init(&info->shrinklist);
0596 iput(inode);
0597 }
0598
0599 list_for_each_safe(pos, next, &list) {
0600 int ret;
0601 pgoff_t index;
0602
0603 info = list_entry(pos, struct shmem_inode_info, shrinklist);
0604 inode = &info->vfs_inode;
0605
0606 if (nr_to_split && split >= nr_to_split)
0607 goto move_back;
0608
0609 index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
0610 folio = filemap_get_folio(inode->i_mapping, index);
0611 if (!folio)
0612 goto drop;
0613
0614
0615 if (!folio_test_large(folio)) {
0616 folio_put(folio);
0617 goto drop;
0618 }
0619
0620
0621
0622
0623
0624
0625
0626
0627 if (!folio_trylock(folio)) {
0628 folio_put(folio);
0629 goto move_back;
0630 }
0631
0632 ret = split_huge_page(&folio->page);
0633 folio_unlock(folio);
0634 folio_put(folio);
0635
0636
0637 if (ret)
0638 goto move_back;
0639
0640 split++;
0641 drop:
0642 list_del_init(&info->shrinklist);
0643 goto put;
0644 move_back:
0645
0646
0647
0648
0649
0650
0651 spin_lock(&sbinfo->shrinklist_lock);
0652 list_move(&info->shrinklist, &sbinfo->shrinklist);
0653 sbinfo->shrinklist_len++;
0654 spin_unlock(&sbinfo->shrinklist_lock);
0655 put:
0656 iput(inode);
0657 }
0658
0659 return split;
0660 }
0661
0662 static long shmem_unused_huge_scan(struct super_block *sb,
0663 struct shrink_control *sc)
0664 {
0665 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
0666
0667 if (!READ_ONCE(sbinfo->shrinklist_len))
0668 return SHRINK_STOP;
0669
0670 return shmem_unused_huge_shrink(sbinfo, sc, 0);
0671 }
0672
0673 static long shmem_unused_huge_count(struct super_block *sb,
0674 struct shrink_control *sc)
0675 {
0676 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
0677 return READ_ONCE(sbinfo->shrinklist_len);
0678 }
0679 #else
0680
0681 #define shmem_huge SHMEM_HUGE_DENY
0682
0683 bool shmem_is_huge(struct vm_area_struct *vma,
0684 struct inode *inode, pgoff_t index)
0685 {
0686 return false;
0687 }
0688
0689 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
0690 struct shrink_control *sc, unsigned long nr_to_split)
0691 {
0692 return 0;
0693 }
0694 #endif
0695
0696
0697
0698
0699 static int shmem_add_to_page_cache(struct folio *folio,
0700 struct address_space *mapping,
0701 pgoff_t index, void *expected, gfp_t gfp,
0702 struct mm_struct *charge_mm)
0703 {
0704 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
0705 long nr = folio_nr_pages(folio);
0706 int error;
0707
0708 VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
0709 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
0710 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
0711 VM_BUG_ON(expected && folio_test_large(folio));
0712
0713 folio_ref_add(folio, nr);
0714 folio->mapping = mapping;
0715 folio->index = index;
0716
0717 if (!folio_test_swapcache(folio)) {
0718 error = mem_cgroup_charge(folio, charge_mm, gfp);
0719 if (error) {
0720 if (folio_test_pmd_mappable(folio)) {
0721 count_vm_event(THP_FILE_FALLBACK);
0722 count_vm_event(THP_FILE_FALLBACK_CHARGE);
0723 }
0724 goto error;
0725 }
0726 }
0727 folio_throttle_swaprate(folio, gfp);
0728
0729 do {
0730 xas_lock_irq(&xas);
0731 if (expected != xas_find_conflict(&xas)) {
0732 xas_set_err(&xas, -EEXIST);
0733 goto unlock;
0734 }
0735 if (expected && xas_find_conflict(&xas)) {
0736 xas_set_err(&xas, -EEXIST);
0737 goto unlock;
0738 }
0739 xas_store(&xas, folio);
0740 if (xas_error(&xas))
0741 goto unlock;
0742 if (folio_test_pmd_mappable(folio)) {
0743 count_vm_event(THP_FILE_ALLOC);
0744 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
0745 }
0746 mapping->nrpages += nr;
0747 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
0748 __lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
0749 unlock:
0750 xas_unlock_irq(&xas);
0751 } while (xas_nomem(&xas, gfp));
0752
0753 if (xas_error(&xas)) {
0754 error = xas_error(&xas);
0755 goto error;
0756 }
0757
0758 return 0;
0759 error:
0760 folio->mapping = NULL;
0761 folio_ref_sub(folio, nr);
0762 return error;
0763 }
0764
0765
0766
0767
0768 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
0769 {
0770 struct address_space *mapping = page->mapping;
0771 int error;
0772
0773 VM_BUG_ON_PAGE(PageCompound(page), page);
0774
0775 xa_lock_irq(&mapping->i_pages);
0776 error = shmem_replace_entry(mapping, page->index, page, radswap);
0777 page->mapping = NULL;
0778 mapping->nrpages--;
0779 __dec_lruvec_page_state(page, NR_FILE_PAGES);
0780 __dec_lruvec_page_state(page, NR_SHMEM);
0781 xa_unlock_irq(&mapping->i_pages);
0782 put_page(page);
0783 BUG_ON(error);
0784 }
0785
0786
0787
0788
0789 static int shmem_free_swap(struct address_space *mapping,
0790 pgoff_t index, void *radswap)
0791 {
0792 void *old;
0793
0794 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
0795 if (old != radswap)
0796 return -ENOENT;
0797 free_swap_and_cache(radix_to_swp_entry(radswap));
0798 return 0;
0799 }
0800
0801
0802
0803
0804
0805
0806
0807
0808 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
0809 pgoff_t start, pgoff_t end)
0810 {
0811 XA_STATE(xas, &mapping->i_pages, start);
0812 struct page *page;
0813 unsigned long swapped = 0;
0814
0815 rcu_read_lock();
0816 xas_for_each(&xas, page, end - 1) {
0817 if (xas_retry(&xas, page))
0818 continue;
0819 if (xa_is_value(page))
0820 swapped++;
0821
0822 if (need_resched()) {
0823 xas_pause(&xas);
0824 cond_resched_rcu();
0825 }
0826 }
0827
0828 rcu_read_unlock();
0829
0830 return swapped << PAGE_SHIFT;
0831 }
0832
0833
0834
0835
0836
0837
0838
0839
0840 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
0841 {
0842 struct inode *inode = file_inode(vma->vm_file);
0843 struct shmem_inode_info *info = SHMEM_I(inode);
0844 struct address_space *mapping = inode->i_mapping;
0845 unsigned long swapped;
0846
0847
0848 swapped = READ_ONCE(info->swapped);
0849
0850
0851
0852
0853
0854
0855 if (!swapped)
0856 return 0;
0857
0858 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
0859 return swapped << PAGE_SHIFT;
0860
0861
0862 return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
0863 vma->vm_pgoff + vma_pages(vma));
0864 }
0865
0866
0867
0868
0869 void shmem_unlock_mapping(struct address_space *mapping)
0870 {
0871 struct folio_batch fbatch;
0872 pgoff_t index = 0;
0873
0874 folio_batch_init(&fbatch);
0875
0876
0877
0878 while (!mapping_unevictable(mapping) &&
0879 filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
0880 check_move_unevictable_folios(&fbatch);
0881 folio_batch_release(&fbatch);
0882 cond_resched();
0883 }
0884 }
0885
0886 static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
0887 {
0888 struct folio *folio;
0889 struct page *page;
0890
0891
0892
0893
0894
0895 folio = __filemap_get_folio(inode->i_mapping, index,
0896 FGP_ENTRY | FGP_LOCK, 0);
0897 if (!xa_is_value(folio))
0898 return folio;
0899
0900
0901
0902
0903 page = NULL;
0904 shmem_getpage(inode, index, &page, SGP_READ);
0905 return page ? page_folio(page) : NULL;
0906 }
0907
0908
0909
0910
0911
0912 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
0913 bool unfalloc)
0914 {
0915 struct address_space *mapping = inode->i_mapping;
0916 struct shmem_inode_info *info = SHMEM_I(inode);
0917 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
0918 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
0919 struct folio_batch fbatch;
0920 pgoff_t indices[PAGEVEC_SIZE];
0921 struct folio *folio;
0922 bool same_folio;
0923 long nr_swaps_freed = 0;
0924 pgoff_t index;
0925 int i;
0926
0927 if (lend == -1)
0928 end = -1;
0929
0930 if (info->fallocend > start && info->fallocend <= end && !unfalloc)
0931 info->fallocend = start;
0932
0933 folio_batch_init(&fbatch);
0934 index = start;
0935 while (index < end && find_lock_entries(mapping, index, end - 1,
0936 &fbatch, indices)) {
0937 for (i = 0; i < folio_batch_count(&fbatch); i++) {
0938 folio = fbatch.folios[i];
0939
0940 index = indices[i];
0941
0942 if (xa_is_value(folio)) {
0943 if (unfalloc)
0944 continue;
0945 nr_swaps_freed += !shmem_free_swap(mapping,
0946 index, folio);
0947 continue;
0948 }
0949 index += folio_nr_pages(folio) - 1;
0950
0951 if (!unfalloc || !folio_test_uptodate(folio))
0952 truncate_inode_folio(mapping, folio);
0953 folio_unlock(folio);
0954 }
0955 folio_batch_remove_exceptionals(&fbatch);
0956 folio_batch_release(&fbatch);
0957 cond_resched();
0958 index++;
0959 }
0960
0961 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
0962 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
0963 if (folio) {
0964 same_folio = lend < folio_pos(folio) + folio_size(folio);
0965 folio_mark_dirty(folio);
0966 if (!truncate_inode_partial_folio(folio, lstart, lend)) {
0967 start = folio->index + folio_nr_pages(folio);
0968 if (same_folio)
0969 end = folio->index;
0970 }
0971 folio_unlock(folio);
0972 folio_put(folio);
0973 folio = NULL;
0974 }
0975
0976 if (!same_folio)
0977 folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
0978 if (folio) {
0979 folio_mark_dirty(folio);
0980 if (!truncate_inode_partial_folio(folio, lstart, lend))
0981 end = folio->index;
0982 folio_unlock(folio);
0983 folio_put(folio);
0984 }
0985
0986 index = start;
0987 while (index < end) {
0988 cond_resched();
0989
0990 if (!find_get_entries(mapping, index, end - 1, &fbatch,
0991 indices)) {
0992
0993 if (index == start || end != -1)
0994 break;
0995
0996 index = start;
0997 continue;
0998 }
0999 for (i = 0; i < folio_batch_count(&fbatch); i++) {
1000 folio = fbatch.folios[i];
1001
1002 index = indices[i];
1003 if (xa_is_value(folio)) {
1004 if (unfalloc)
1005 continue;
1006 if (shmem_free_swap(mapping, index, folio)) {
1007
1008 index--;
1009 break;
1010 }
1011 nr_swaps_freed++;
1012 continue;
1013 }
1014
1015 folio_lock(folio);
1016
1017 if (!unfalloc || !folio_test_uptodate(folio)) {
1018 if (folio_mapping(folio) != mapping) {
1019
1020 folio_unlock(folio);
1021 index--;
1022 break;
1023 }
1024 VM_BUG_ON_FOLIO(folio_test_writeback(folio),
1025 folio);
1026 truncate_inode_folio(mapping, folio);
1027 }
1028 index = folio->index + folio_nr_pages(folio) - 1;
1029 folio_unlock(folio);
1030 }
1031 folio_batch_remove_exceptionals(&fbatch);
1032 folio_batch_release(&fbatch);
1033 index++;
1034 }
1035
1036 spin_lock_irq(&info->lock);
1037 info->swapped -= nr_swaps_freed;
1038 shmem_recalc_inode(inode);
1039 spin_unlock_irq(&info->lock);
1040 }
1041
1042 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1043 {
1044 shmem_undo_range(inode, lstart, lend, false);
1045 inode->i_ctime = inode->i_mtime = current_time(inode);
1046 }
1047 EXPORT_SYMBOL_GPL(shmem_truncate_range);
1048
1049 static int shmem_getattr(struct user_namespace *mnt_userns,
1050 const struct path *path, struct kstat *stat,
1051 u32 request_mask, unsigned int query_flags)
1052 {
1053 struct inode *inode = path->dentry->d_inode;
1054 struct shmem_inode_info *info = SHMEM_I(inode);
1055
1056 if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
1057 spin_lock_irq(&info->lock);
1058 shmem_recalc_inode(inode);
1059 spin_unlock_irq(&info->lock);
1060 }
1061 if (info->fsflags & FS_APPEND_FL)
1062 stat->attributes |= STATX_ATTR_APPEND;
1063 if (info->fsflags & FS_IMMUTABLE_FL)
1064 stat->attributes |= STATX_ATTR_IMMUTABLE;
1065 if (info->fsflags & FS_NODUMP_FL)
1066 stat->attributes |= STATX_ATTR_NODUMP;
1067 stat->attributes_mask |= (STATX_ATTR_APPEND |
1068 STATX_ATTR_IMMUTABLE |
1069 STATX_ATTR_NODUMP);
1070 generic_fillattr(&init_user_ns, inode, stat);
1071
1072 if (shmem_is_huge(NULL, inode, 0))
1073 stat->blksize = HPAGE_PMD_SIZE;
1074
1075 if (request_mask & STATX_BTIME) {
1076 stat->result_mask |= STATX_BTIME;
1077 stat->btime.tv_sec = info->i_crtime.tv_sec;
1078 stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1079 }
1080
1081 return 0;
1082 }
1083
1084 static int shmem_setattr(struct user_namespace *mnt_userns,
1085 struct dentry *dentry, struct iattr *attr)
1086 {
1087 struct inode *inode = d_inode(dentry);
1088 struct shmem_inode_info *info = SHMEM_I(inode);
1089 int error;
1090
1091 error = setattr_prepare(&init_user_ns, dentry, attr);
1092 if (error)
1093 return error;
1094
1095 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1096 loff_t oldsize = inode->i_size;
1097 loff_t newsize = attr->ia_size;
1098
1099
1100 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1101 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1102 return -EPERM;
1103
1104 if (newsize != oldsize) {
1105 error = shmem_reacct_size(SHMEM_I(inode)->flags,
1106 oldsize, newsize);
1107 if (error)
1108 return error;
1109 i_size_write(inode, newsize);
1110 inode->i_ctime = inode->i_mtime = current_time(inode);
1111 }
1112 if (newsize <= oldsize) {
1113 loff_t holebegin = round_up(newsize, PAGE_SIZE);
1114 if (oldsize > holebegin)
1115 unmap_mapping_range(inode->i_mapping,
1116 holebegin, 0, 1);
1117 if (info->alloced)
1118 shmem_truncate_range(inode,
1119 newsize, (loff_t)-1);
1120
1121 if (oldsize > holebegin)
1122 unmap_mapping_range(inode->i_mapping,
1123 holebegin, 0, 1);
1124 }
1125 }
1126
1127 setattr_copy(&init_user_ns, inode, attr);
1128 if (attr->ia_valid & ATTR_MODE)
1129 error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
1130 return error;
1131 }
1132
1133 static void shmem_evict_inode(struct inode *inode)
1134 {
1135 struct shmem_inode_info *info = SHMEM_I(inode);
1136 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1137
1138 if (shmem_mapping(inode->i_mapping)) {
1139 shmem_unacct_size(info->flags, inode->i_size);
1140 inode->i_size = 0;
1141 mapping_set_exiting(inode->i_mapping);
1142 shmem_truncate_range(inode, 0, (loff_t)-1);
1143 if (!list_empty(&info->shrinklist)) {
1144 spin_lock(&sbinfo->shrinklist_lock);
1145 if (!list_empty(&info->shrinklist)) {
1146 list_del_init(&info->shrinklist);
1147 sbinfo->shrinklist_len--;
1148 }
1149 spin_unlock(&sbinfo->shrinklist_lock);
1150 }
1151 while (!list_empty(&info->swaplist)) {
1152
1153 wait_var_event(&info->stop_eviction,
1154 !atomic_read(&info->stop_eviction));
1155 mutex_lock(&shmem_swaplist_mutex);
1156
1157 if (!atomic_read(&info->stop_eviction))
1158 list_del_init(&info->swaplist);
1159 mutex_unlock(&shmem_swaplist_mutex);
1160 }
1161 }
1162
1163 simple_xattrs_free(&info->xattrs);
1164 WARN_ON(inode->i_blocks);
1165 shmem_free_inode(inode->i_sb);
1166 clear_inode(inode);
1167 }
1168
1169 static int shmem_find_swap_entries(struct address_space *mapping,
1170 pgoff_t start, struct folio_batch *fbatch,
1171 pgoff_t *indices, unsigned int type)
1172 {
1173 XA_STATE(xas, &mapping->i_pages, start);
1174 struct folio *folio;
1175 swp_entry_t entry;
1176
1177 rcu_read_lock();
1178 xas_for_each(&xas, folio, ULONG_MAX) {
1179 if (xas_retry(&xas, folio))
1180 continue;
1181
1182 if (!xa_is_value(folio))
1183 continue;
1184
1185 entry = radix_to_swp_entry(folio);
1186
1187
1188
1189
1190 if (swp_type(entry) != type)
1191 continue;
1192
1193 indices[folio_batch_count(fbatch)] = xas.xa_index;
1194 if (!folio_batch_add(fbatch, folio))
1195 break;
1196
1197 if (need_resched()) {
1198 xas_pause(&xas);
1199 cond_resched_rcu();
1200 }
1201 }
1202 rcu_read_unlock();
1203
1204 return xas.xa_index;
1205 }
1206
1207
1208
1209
1210
1211 static int shmem_unuse_swap_entries(struct inode *inode,
1212 struct folio_batch *fbatch, pgoff_t *indices)
1213 {
1214 int i = 0;
1215 int ret = 0;
1216 int error = 0;
1217 struct address_space *mapping = inode->i_mapping;
1218
1219 for (i = 0; i < folio_batch_count(fbatch); i++) {
1220 struct folio *folio = fbatch->folios[i];
1221
1222 if (!xa_is_value(folio))
1223 continue;
1224 error = shmem_swapin_folio(inode, indices[i],
1225 &folio, SGP_CACHE,
1226 mapping_gfp_mask(mapping),
1227 NULL, NULL);
1228 if (error == 0) {
1229 folio_unlock(folio);
1230 folio_put(folio);
1231 ret++;
1232 }
1233 if (error == -ENOMEM)
1234 break;
1235 error = 0;
1236 }
1237 return error ? error : ret;
1238 }
1239
1240
1241
1242
1243 static int shmem_unuse_inode(struct inode *inode, unsigned int type)
1244 {
1245 struct address_space *mapping = inode->i_mapping;
1246 pgoff_t start = 0;
1247 struct folio_batch fbatch;
1248 pgoff_t indices[PAGEVEC_SIZE];
1249 int ret = 0;
1250
1251 do {
1252 folio_batch_init(&fbatch);
1253 shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
1254 if (folio_batch_count(&fbatch) == 0) {
1255 ret = 0;
1256 break;
1257 }
1258
1259 ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
1260 if (ret < 0)
1261 break;
1262
1263 start = indices[folio_batch_count(&fbatch) - 1];
1264 } while (true);
1265
1266 return ret;
1267 }
1268
1269
1270
1271
1272
1273
1274 int shmem_unuse(unsigned int type)
1275 {
1276 struct shmem_inode_info *info, *next;
1277 int error = 0;
1278
1279 if (list_empty(&shmem_swaplist))
1280 return 0;
1281
1282 mutex_lock(&shmem_swaplist_mutex);
1283 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1284 if (!info->swapped) {
1285 list_del_init(&info->swaplist);
1286 continue;
1287 }
1288
1289
1290
1291
1292
1293
1294 atomic_inc(&info->stop_eviction);
1295 mutex_unlock(&shmem_swaplist_mutex);
1296
1297 error = shmem_unuse_inode(&info->vfs_inode, type);
1298 cond_resched();
1299
1300 mutex_lock(&shmem_swaplist_mutex);
1301 next = list_next_entry(info, swaplist);
1302 if (!info->swapped)
1303 list_del_init(&info->swaplist);
1304 if (atomic_dec_and_test(&info->stop_eviction))
1305 wake_up_var(&info->stop_eviction);
1306 if (error)
1307 break;
1308 }
1309 mutex_unlock(&shmem_swaplist_mutex);
1310
1311 return error;
1312 }
1313
1314
1315
1316
1317 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1318 {
1319 struct folio *folio = page_folio(page);
1320 struct shmem_inode_info *info;
1321 struct address_space *mapping;
1322 struct inode *inode;
1323 swp_entry_t swap;
1324 pgoff_t index;
1325
1326
1327
1328
1329
1330
1331 if (PageTransCompound(page)) {
1332
1333 SetPageDirty(page);
1334 if (split_huge_page(page) < 0)
1335 goto redirty;
1336 ClearPageDirty(page);
1337 }
1338
1339 BUG_ON(!PageLocked(page));
1340 mapping = page->mapping;
1341 index = page->index;
1342 inode = mapping->host;
1343 info = SHMEM_I(inode);
1344 if (info->flags & VM_LOCKED)
1345 goto redirty;
1346 if (!total_swap_pages)
1347 goto redirty;
1348
1349
1350
1351
1352
1353
1354
1355
1356 if (!wbc->for_reclaim) {
1357 WARN_ON_ONCE(1);
1358 goto redirty;
1359 }
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372 if (!PageUptodate(page)) {
1373 if (inode->i_private) {
1374 struct shmem_falloc *shmem_falloc;
1375 spin_lock(&inode->i_lock);
1376 shmem_falloc = inode->i_private;
1377 if (shmem_falloc &&
1378 !shmem_falloc->waitq &&
1379 index >= shmem_falloc->start &&
1380 index < shmem_falloc->next)
1381 shmem_falloc->nr_unswapped++;
1382 else
1383 shmem_falloc = NULL;
1384 spin_unlock(&inode->i_lock);
1385 if (shmem_falloc)
1386 goto redirty;
1387 }
1388 clear_highpage(page);
1389 flush_dcache_page(page);
1390 SetPageUptodate(page);
1391 }
1392
1393 swap = folio_alloc_swap(folio);
1394 if (!swap.val)
1395 goto redirty;
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405 mutex_lock(&shmem_swaplist_mutex);
1406 if (list_empty(&info->swaplist))
1407 list_add(&info->swaplist, &shmem_swaplist);
1408
1409 if (add_to_swap_cache(page, swap,
1410 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1411 NULL) == 0) {
1412 spin_lock_irq(&info->lock);
1413 shmem_recalc_inode(inode);
1414 info->swapped++;
1415 spin_unlock_irq(&info->lock);
1416
1417 swap_shmem_alloc(swap);
1418 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1419
1420 mutex_unlock(&shmem_swaplist_mutex);
1421 BUG_ON(page_mapped(page));
1422 swap_writepage(page, wbc);
1423 return 0;
1424 }
1425
1426 mutex_unlock(&shmem_swaplist_mutex);
1427 put_swap_page(page, swap);
1428 redirty:
1429 set_page_dirty(page);
1430 if (wbc->for_reclaim)
1431 return AOP_WRITEPAGE_ACTIVATE;
1432 unlock_page(page);
1433 return 0;
1434 }
1435
1436 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1437 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1438 {
1439 char buffer[64];
1440
1441 if (!mpol || mpol->mode == MPOL_DEFAULT)
1442 return;
1443
1444 mpol_to_str(buffer, sizeof(buffer), mpol);
1445
1446 seq_printf(seq, ",mpol=%s", buffer);
1447 }
1448
1449 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1450 {
1451 struct mempolicy *mpol = NULL;
1452 if (sbinfo->mpol) {
1453 raw_spin_lock(&sbinfo->stat_lock);
1454 mpol = sbinfo->mpol;
1455 mpol_get(mpol);
1456 raw_spin_unlock(&sbinfo->stat_lock);
1457 }
1458 return mpol;
1459 }
1460 #else
1461 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1462 {
1463 }
1464 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1465 {
1466 return NULL;
1467 }
1468 #endif
1469 #ifndef CONFIG_NUMA
1470 #define vm_policy vm_private_data
1471 #endif
1472
1473 static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1474 struct shmem_inode_info *info, pgoff_t index)
1475 {
1476
1477 vma_init(vma, NULL);
1478
1479 vma->vm_pgoff = index + info->vfs_inode.i_ino;
1480 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1481 }
1482
1483 static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1484 {
1485
1486 mpol_cond_put(vma->vm_policy);
1487 }
1488
1489 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1490 struct shmem_inode_info *info, pgoff_t index)
1491 {
1492 struct vm_area_struct pvma;
1493 struct page *page;
1494 struct vm_fault vmf = {
1495 .vma = &pvma,
1496 };
1497
1498 shmem_pseudo_vma_init(&pvma, info, index);
1499 page = swap_cluster_readahead(swap, gfp, &vmf);
1500 shmem_pseudo_vma_destroy(&pvma);
1501
1502 return page;
1503 }
1504
1505
1506
1507
1508
1509 static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1510 {
1511 gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1512 gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1513 gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1514 gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1515
1516
1517 result |= zoneflags;
1518
1519
1520
1521
1522
1523 result |= (limit_gfp & denyflags);
1524 result |= (huge_gfp & limit_gfp) & allowflags;
1525
1526 return result;
1527 }
1528
1529 static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
1530 struct shmem_inode_info *info, pgoff_t index)
1531 {
1532 struct vm_area_struct pvma;
1533 struct address_space *mapping = info->vfs_inode.i_mapping;
1534 pgoff_t hindex;
1535 struct folio *folio;
1536
1537 hindex = round_down(index, HPAGE_PMD_NR);
1538 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1539 XA_PRESENT))
1540 return NULL;
1541
1542 shmem_pseudo_vma_init(&pvma, info, hindex);
1543 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true);
1544 shmem_pseudo_vma_destroy(&pvma);
1545 if (!folio)
1546 count_vm_event(THP_FILE_FALLBACK);
1547 return folio;
1548 }
1549
1550 static struct folio *shmem_alloc_folio(gfp_t gfp,
1551 struct shmem_inode_info *info, pgoff_t index)
1552 {
1553 struct vm_area_struct pvma;
1554 struct folio *folio;
1555
1556 shmem_pseudo_vma_init(&pvma, info, index);
1557 folio = vma_alloc_folio(gfp, 0, &pvma, 0, false);
1558 shmem_pseudo_vma_destroy(&pvma);
1559
1560 return folio;
1561 }
1562
1563 static struct page *shmem_alloc_page(gfp_t gfp,
1564 struct shmem_inode_info *info, pgoff_t index)
1565 {
1566 return &shmem_alloc_folio(gfp, info, index)->page;
1567 }
1568
1569 static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
1570 pgoff_t index, bool huge)
1571 {
1572 struct shmem_inode_info *info = SHMEM_I(inode);
1573 struct folio *folio;
1574 int nr;
1575 int err = -ENOSPC;
1576
1577 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1578 huge = false;
1579 nr = huge ? HPAGE_PMD_NR : 1;
1580
1581 if (!shmem_inode_acct_block(inode, nr))
1582 goto failed;
1583
1584 if (huge)
1585 folio = shmem_alloc_hugefolio(gfp, info, index);
1586 else
1587 folio = shmem_alloc_folio(gfp, info, index);
1588 if (folio) {
1589 __folio_set_locked(folio);
1590 __folio_set_swapbacked(folio);
1591 return folio;
1592 }
1593
1594 err = -ENOMEM;
1595 shmem_inode_unacct_blocks(inode, nr);
1596 failed:
1597 return ERR_PTR(err);
1598 }
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612 static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
1613 {
1614 return folio_zonenum(folio) > gfp_zone(gfp);
1615 }
1616
1617 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1618 struct shmem_inode_info *info, pgoff_t index)
1619 {
1620 struct page *oldpage, *newpage;
1621 struct folio *old, *new;
1622 struct address_space *swap_mapping;
1623 swp_entry_t entry;
1624 pgoff_t swap_index;
1625 int error;
1626
1627 oldpage = *pagep;
1628 entry.val = page_private(oldpage);
1629 swap_index = swp_offset(entry);
1630 swap_mapping = page_mapping(oldpage);
1631
1632
1633
1634
1635
1636 gfp &= ~GFP_CONSTRAINT_MASK;
1637 newpage = shmem_alloc_page(gfp, info, index);
1638 if (!newpage)
1639 return -ENOMEM;
1640
1641 get_page(newpage);
1642 copy_highpage(newpage, oldpage);
1643 flush_dcache_page(newpage);
1644
1645 __SetPageLocked(newpage);
1646 __SetPageSwapBacked(newpage);
1647 SetPageUptodate(newpage);
1648 set_page_private(newpage, entry.val);
1649 SetPageSwapCache(newpage);
1650
1651
1652
1653
1654
1655 xa_lock_irq(&swap_mapping->i_pages);
1656 error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
1657 if (!error) {
1658 old = page_folio(oldpage);
1659 new = page_folio(newpage);
1660 mem_cgroup_migrate(old, new);
1661 __inc_lruvec_page_state(newpage, NR_FILE_PAGES);
1662 __inc_lruvec_page_state(newpage, NR_SHMEM);
1663 __dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
1664 __dec_lruvec_page_state(oldpage, NR_SHMEM);
1665 }
1666 xa_unlock_irq(&swap_mapping->i_pages);
1667
1668 if (unlikely(error)) {
1669
1670
1671
1672
1673
1674 oldpage = newpage;
1675 } else {
1676 lru_cache_add(newpage);
1677 *pagep = newpage;
1678 }
1679
1680 ClearPageSwapCache(oldpage);
1681 set_page_private(oldpage, 0);
1682
1683 unlock_page(oldpage);
1684 put_page(oldpage);
1685 put_page(oldpage);
1686 return error;
1687 }
1688
1689 static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
1690 struct folio *folio, swp_entry_t swap)
1691 {
1692 struct address_space *mapping = inode->i_mapping;
1693 struct shmem_inode_info *info = SHMEM_I(inode);
1694 swp_entry_t swapin_error;
1695 void *old;
1696
1697 swapin_error = make_swapin_error_entry(&folio->page);
1698 old = xa_cmpxchg_irq(&mapping->i_pages, index,
1699 swp_to_radix_entry(swap),
1700 swp_to_radix_entry(swapin_error), 0);
1701 if (old != swp_to_radix_entry(swap))
1702 return;
1703
1704 folio_wait_writeback(folio);
1705 delete_from_swap_cache(folio);
1706 spin_lock_irq(&info->lock);
1707
1708
1709
1710
1711
1712 info->alloced--;
1713 info->swapped--;
1714 shmem_recalc_inode(inode);
1715 spin_unlock_irq(&info->lock);
1716 swap_free(swap);
1717 }
1718
1719
1720
1721
1722
1723
1724
1725 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
1726 struct folio **foliop, enum sgp_type sgp,
1727 gfp_t gfp, struct vm_area_struct *vma,
1728 vm_fault_t *fault_type)
1729 {
1730 struct address_space *mapping = inode->i_mapping;
1731 struct shmem_inode_info *info = SHMEM_I(inode);
1732 struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
1733 struct page *page;
1734 struct folio *folio = NULL;
1735 swp_entry_t swap;
1736 int error;
1737
1738 VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
1739 swap = radix_to_swp_entry(*foliop);
1740 *foliop = NULL;
1741
1742 if (is_swapin_error_entry(swap))
1743 return -EIO;
1744
1745
1746 page = lookup_swap_cache(swap, NULL, 0);
1747 if (!page) {
1748
1749 if (fault_type) {
1750 *fault_type |= VM_FAULT_MAJOR;
1751 count_vm_event(PGMAJFAULT);
1752 count_memcg_event_mm(charge_mm, PGMAJFAULT);
1753 }
1754
1755 page = shmem_swapin(swap, gfp, info, index);
1756 if (!page) {
1757 error = -ENOMEM;
1758 goto failed;
1759 }
1760 }
1761 folio = page_folio(page);
1762
1763
1764 folio_lock(folio);
1765 if (!folio_test_swapcache(folio) ||
1766 folio_swap_entry(folio).val != swap.val ||
1767 !shmem_confirm_swap(mapping, index, swap)) {
1768 error = -EEXIST;
1769 goto unlock;
1770 }
1771 if (!folio_test_uptodate(folio)) {
1772 error = -EIO;
1773 goto failed;
1774 }
1775 folio_wait_writeback(folio);
1776
1777
1778
1779
1780
1781 arch_swap_restore(swap, folio);
1782
1783 if (shmem_should_replace_folio(folio, gfp)) {
1784 error = shmem_replace_page(&page, gfp, info, index);
1785 folio = page_folio(page);
1786 if (error)
1787 goto failed;
1788 }
1789
1790 error = shmem_add_to_page_cache(folio, mapping, index,
1791 swp_to_radix_entry(swap), gfp,
1792 charge_mm);
1793 if (error)
1794 goto failed;
1795
1796 spin_lock_irq(&info->lock);
1797 info->swapped--;
1798 shmem_recalc_inode(inode);
1799 spin_unlock_irq(&info->lock);
1800
1801 if (sgp == SGP_WRITE)
1802 folio_mark_accessed(folio);
1803
1804 delete_from_swap_cache(folio);
1805 folio_mark_dirty(folio);
1806 swap_free(swap);
1807
1808 *foliop = folio;
1809 return 0;
1810 failed:
1811 if (!shmem_confirm_swap(mapping, index, swap))
1812 error = -EEXIST;
1813 if (error == -EIO)
1814 shmem_set_folio_swapin_error(inode, index, folio, swap);
1815 unlock:
1816 if (folio) {
1817 folio_unlock(folio);
1818 folio_put(folio);
1819 }
1820
1821 return error;
1822 }
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1835 struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1836 struct vm_area_struct *vma, struct vm_fault *vmf,
1837 vm_fault_t *fault_type)
1838 {
1839 struct address_space *mapping = inode->i_mapping;
1840 struct shmem_inode_info *info = SHMEM_I(inode);
1841 struct shmem_sb_info *sbinfo;
1842 struct mm_struct *charge_mm;
1843 struct folio *folio;
1844 pgoff_t hindex = index;
1845 gfp_t huge_gfp;
1846 int error;
1847 int once = 0;
1848 int alloced = 0;
1849
1850 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1851 return -EFBIG;
1852 repeat:
1853 if (sgp <= SGP_CACHE &&
1854 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1855 return -EINVAL;
1856 }
1857
1858 sbinfo = SHMEM_SB(inode->i_sb);
1859 charge_mm = vma ? vma->vm_mm : NULL;
1860
1861 folio = __filemap_get_folio(mapping, index, FGP_ENTRY | FGP_LOCK, 0);
1862 if (folio && vma && userfaultfd_minor(vma)) {
1863 if (!xa_is_value(folio)) {
1864 folio_unlock(folio);
1865 folio_put(folio);
1866 }
1867 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
1868 return 0;
1869 }
1870
1871 if (xa_is_value(folio)) {
1872 error = shmem_swapin_folio(inode, index, &folio,
1873 sgp, gfp, vma, fault_type);
1874 if (error == -EEXIST)
1875 goto repeat;
1876
1877 *pagep = &folio->page;
1878 return error;
1879 }
1880
1881 if (folio) {
1882 hindex = folio->index;
1883 if (sgp == SGP_WRITE)
1884 folio_mark_accessed(folio);
1885 if (folio_test_uptodate(folio))
1886 goto out;
1887
1888 if (sgp != SGP_READ)
1889 goto clear;
1890 folio_unlock(folio);
1891 folio_put(folio);
1892 }
1893
1894
1895
1896
1897
1898 *pagep = NULL;
1899 if (sgp == SGP_READ)
1900 return 0;
1901 if (sgp == SGP_NOALLOC)
1902 return -ENOENT;
1903
1904
1905
1906
1907
1908 if (vma && userfaultfd_missing(vma)) {
1909 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1910 return 0;
1911 }
1912
1913 if (!shmem_is_huge(vma, inode, index))
1914 goto alloc_nohuge;
1915
1916 huge_gfp = vma_thp_gfp_mask(vma);
1917 huge_gfp = limit_gfp_mask(huge_gfp, gfp);
1918 folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true);
1919 if (IS_ERR(folio)) {
1920 alloc_nohuge:
1921 folio = shmem_alloc_and_acct_folio(gfp, inode, index, false);
1922 }
1923 if (IS_ERR(folio)) {
1924 int retry = 5;
1925
1926 error = PTR_ERR(folio);
1927 folio = NULL;
1928 if (error != -ENOSPC)
1929 goto unlock;
1930
1931
1932
1933
1934 while (retry--) {
1935 int ret;
1936
1937 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1938 if (ret == SHRINK_STOP)
1939 break;
1940 if (ret)
1941 goto alloc_nohuge;
1942 }
1943 goto unlock;
1944 }
1945
1946 hindex = round_down(index, folio_nr_pages(folio));
1947
1948 if (sgp == SGP_WRITE)
1949 __folio_set_referenced(folio);
1950
1951 error = shmem_add_to_page_cache(folio, mapping, hindex,
1952 NULL, gfp & GFP_RECLAIM_MASK,
1953 charge_mm);
1954 if (error)
1955 goto unacct;
1956 folio_add_lru(folio);
1957
1958 spin_lock_irq(&info->lock);
1959 info->alloced += folio_nr_pages(folio);
1960 inode->i_blocks += (blkcnt_t)BLOCKS_PER_PAGE << folio_order(folio);
1961 shmem_recalc_inode(inode);
1962 spin_unlock_irq(&info->lock);
1963 alloced = true;
1964
1965 if (folio_test_pmd_mappable(folio) &&
1966 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1967 hindex + HPAGE_PMD_NR - 1) {
1968
1969
1970
1971
1972 spin_lock(&sbinfo->shrinklist_lock);
1973
1974
1975
1976
1977 if (list_empty_careful(&info->shrinklist)) {
1978 list_add_tail(&info->shrinklist,
1979 &sbinfo->shrinklist);
1980 sbinfo->shrinklist_len++;
1981 }
1982 spin_unlock(&sbinfo->shrinklist_lock);
1983 }
1984
1985
1986
1987
1988 if (sgp == SGP_FALLOC)
1989 sgp = SGP_WRITE;
1990 clear:
1991
1992
1993
1994
1995
1996 if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
1997 long i, n = folio_nr_pages(folio);
1998
1999 for (i = 0; i < n; i++)
2000 clear_highpage(folio_page(folio, i));
2001 flush_dcache_folio(folio);
2002 folio_mark_uptodate(folio);
2003 }
2004
2005
2006 if (sgp <= SGP_CACHE &&
2007 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2008 if (alloced) {
2009 folio_clear_dirty(folio);
2010 filemap_remove_folio(folio);
2011 spin_lock_irq(&info->lock);
2012 shmem_recalc_inode(inode);
2013 spin_unlock_irq(&info->lock);
2014 }
2015 error = -EINVAL;
2016 goto unlock;
2017 }
2018 out:
2019 *pagep = folio_page(folio, index - hindex);
2020 return 0;
2021
2022
2023
2024
2025 unacct:
2026 shmem_inode_unacct_blocks(inode, folio_nr_pages(folio));
2027
2028 if (folio_test_large(folio)) {
2029 folio_unlock(folio);
2030 folio_put(folio);
2031 goto alloc_nohuge;
2032 }
2033 unlock:
2034 if (folio) {
2035 folio_unlock(folio);
2036 folio_put(folio);
2037 }
2038 if (error == -ENOSPC && !once++) {
2039 spin_lock_irq(&info->lock);
2040 shmem_recalc_inode(inode);
2041 spin_unlock_irq(&info->lock);
2042 goto repeat;
2043 }
2044 if (error == -EEXIST)
2045 goto repeat;
2046 return error;
2047 }
2048
2049
2050
2051
2052
2053
2054 static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
2055 {
2056 int ret = default_wake_function(wait, mode, sync, key);
2057 list_del_init(&wait->entry);
2058 return ret;
2059 }
2060
2061 static vm_fault_t shmem_fault(struct vm_fault *vmf)
2062 {
2063 struct vm_area_struct *vma = vmf->vma;
2064 struct inode *inode = file_inode(vma->vm_file);
2065 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2066 int err;
2067 vm_fault_t ret = VM_FAULT_LOCKED;
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086 if (unlikely(inode->i_private)) {
2087 struct shmem_falloc *shmem_falloc;
2088
2089 spin_lock(&inode->i_lock);
2090 shmem_falloc = inode->i_private;
2091 if (shmem_falloc &&
2092 shmem_falloc->waitq &&
2093 vmf->pgoff >= shmem_falloc->start &&
2094 vmf->pgoff < shmem_falloc->next) {
2095 struct file *fpin;
2096 wait_queue_head_t *shmem_falloc_waitq;
2097 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2098
2099 ret = VM_FAULT_NOPAGE;
2100 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2101 if (fpin)
2102 ret = VM_FAULT_RETRY;
2103
2104 shmem_falloc_waitq = shmem_falloc->waitq;
2105 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2106 TASK_UNINTERRUPTIBLE);
2107 spin_unlock(&inode->i_lock);
2108 schedule();
2109
2110
2111
2112
2113
2114
2115
2116
2117 spin_lock(&inode->i_lock);
2118 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2119 spin_unlock(&inode->i_lock);
2120
2121 if (fpin)
2122 fput(fpin);
2123 return ret;
2124 }
2125 spin_unlock(&inode->i_lock);
2126 }
2127
2128 err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, SGP_CACHE,
2129 gfp, vma, vmf, &ret);
2130 if (err)
2131 return vmf_error(err);
2132 return ret;
2133 }
2134
2135 unsigned long shmem_get_unmapped_area(struct file *file,
2136 unsigned long uaddr, unsigned long len,
2137 unsigned long pgoff, unsigned long flags)
2138 {
2139 unsigned long (*get_area)(struct file *,
2140 unsigned long, unsigned long, unsigned long, unsigned long);
2141 unsigned long addr;
2142 unsigned long offset;
2143 unsigned long inflated_len;
2144 unsigned long inflated_addr;
2145 unsigned long inflated_offset;
2146
2147 if (len > TASK_SIZE)
2148 return -ENOMEM;
2149
2150 get_area = current->mm->get_unmapped_area;
2151 addr = get_area(file, uaddr, len, pgoff, flags);
2152
2153 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2154 return addr;
2155 if (IS_ERR_VALUE(addr))
2156 return addr;
2157 if (addr & ~PAGE_MASK)
2158 return addr;
2159 if (addr > TASK_SIZE - len)
2160 return addr;
2161
2162 if (shmem_huge == SHMEM_HUGE_DENY)
2163 return addr;
2164 if (len < HPAGE_PMD_SIZE)
2165 return addr;
2166 if (flags & MAP_FIXED)
2167 return addr;
2168
2169
2170
2171
2172
2173
2174 if (uaddr == addr)
2175 return addr;
2176
2177 if (shmem_huge != SHMEM_HUGE_FORCE) {
2178 struct super_block *sb;
2179
2180 if (file) {
2181 VM_BUG_ON(file->f_op != &shmem_file_operations);
2182 sb = file_inode(file)->i_sb;
2183 } else {
2184
2185
2186
2187
2188 if (IS_ERR(shm_mnt))
2189 return addr;
2190 sb = shm_mnt->mnt_sb;
2191 }
2192 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2193 return addr;
2194 }
2195
2196 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2197 if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2198 return addr;
2199 if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2200 return addr;
2201
2202 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2203 if (inflated_len > TASK_SIZE)
2204 return addr;
2205 if (inflated_len < len)
2206 return addr;
2207
2208 inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2209 if (IS_ERR_VALUE(inflated_addr))
2210 return addr;
2211 if (inflated_addr & ~PAGE_MASK)
2212 return addr;
2213
2214 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2215 inflated_addr += offset - inflated_offset;
2216 if (inflated_offset > offset)
2217 inflated_addr += HPAGE_PMD_SIZE;
2218
2219 if (inflated_addr > TASK_SIZE - len)
2220 return addr;
2221 return inflated_addr;
2222 }
2223
2224 #ifdef CONFIG_NUMA
2225 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2226 {
2227 struct inode *inode = file_inode(vma->vm_file);
2228 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2229 }
2230
2231 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2232 unsigned long addr)
2233 {
2234 struct inode *inode = file_inode(vma->vm_file);
2235 pgoff_t index;
2236
2237 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2238 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2239 }
2240 #endif
2241
2242 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
2243 {
2244 struct inode *inode = file_inode(file);
2245 struct shmem_inode_info *info = SHMEM_I(inode);
2246 int retval = -ENOMEM;
2247
2248
2249
2250
2251
2252
2253 if (lock && !(info->flags & VM_LOCKED)) {
2254 if (!user_shm_lock(inode->i_size, ucounts))
2255 goto out_nomem;
2256 info->flags |= VM_LOCKED;
2257 mapping_set_unevictable(file->f_mapping);
2258 }
2259 if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2260 user_shm_unlock(inode->i_size, ucounts);
2261 info->flags &= ~VM_LOCKED;
2262 mapping_clear_unevictable(file->f_mapping);
2263 }
2264 retval = 0;
2265
2266 out_nomem:
2267 return retval;
2268 }
2269
2270 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2271 {
2272 struct shmem_inode_info *info = SHMEM_I(file_inode(file));
2273 int ret;
2274
2275 ret = seal_check_future_write(info->seals, vma);
2276 if (ret)
2277 return ret;
2278
2279
2280 vma->vm_flags |= VM_MTE_ALLOWED;
2281
2282 file_accessed(file);
2283 vma->vm_ops = &shmem_vm_ops;
2284 return 0;
2285 }
2286
2287 #ifdef CONFIG_TMPFS_XATTR
2288 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2289
2290
2291
2292
2293
2294 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2295 {
2296 unsigned int i_flags = 0;
2297
2298 if (fsflags & FS_NOATIME_FL)
2299 i_flags |= S_NOATIME;
2300 if (fsflags & FS_APPEND_FL)
2301 i_flags |= S_APPEND;
2302 if (fsflags & FS_IMMUTABLE_FL)
2303 i_flags |= S_IMMUTABLE;
2304
2305
2306
2307 inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE);
2308 }
2309 #else
2310 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2311 {
2312 }
2313 #define shmem_initxattrs NULL
2314 #endif
2315
2316 static struct inode *shmem_get_inode(struct super_block *sb, struct inode *dir,
2317 umode_t mode, dev_t dev, unsigned long flags)
2318 {
2319 struct inode *inode;
2320 struct shmem_inode_info *info;
2321 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2322 ino_t ino;
2323
2324 if (shmem_reserve_inode(sb, &ino))
2325 return NULL;
2326
2327 inode = new_inode(sb);
2328 if (inode) {
2329 inode->i_ino = ino;
2330 inode_init_owner(&init_user_ns, inode, dir, mode);
2331 inode->i_blocks = 0;
2332 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2333 inode->i_generation = prandom_u32();
2334 info = SHMEM_I(inode);
2335 memset(info, 0, (char *)inode - (char *)info);
2336 spin_lock_init(&info->lock);
2337 atomic_set(&info->stop_eviction, 0);
2338 info->seals = F_SEAL_SEAL;
2339 info->flags = flags & VM_NORESERVE;
2340 info->i_crtime = inode->i_mtime;
2341 info->fsflags = (dir == NULL) ? 0 :
2342 SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
2343 if (info->fsflags)
2344 shmem_set_inode_flags(inode, info->fsflags);
2345 INIT_LIST_HEAD(&info->shrinklist);
2346 INIT_LIST_HEAD(&info->swaplist);
2347 simple_xattrs_init(&info->xattrs);
2348 cache_no_acl(inode);
2349 mapping_set_large_folios(inode->i_mapping);
2350
2351 switch (mode & S_IFMT) {
2352 default:
2353 inode->i_op = &shmem_special_inode_operations;
2354 init_special_inode(inode, mode, dev);
2355 break;
2356 case S_IFREG:
2357 inode->i_mapping->a_ops = &shmem_aops;
2358 inode->i_op = &shmem_inode_operations;
2359 inode->i_fop = &shmem_file_operations;
2360 mpol_shared_policy_init(&info->policy,
2361 shmem_get_sbmpol(sbinfo));
2362 break;
2363 case S_IFDIR:
2364 inc_nlink(inode);
2365
2366 inode->i_size = 2 * BOGO_DIRENT_SIZE;
2367 inode->i_op = &shmem_dir_inode_operations;
2368 inode->i_fop = &simple_dir_operations;
2369 break;
2370 case S_IFLNK:
2371
2372
2373
2374
2375 mpol_shared_policy_init(&info->policy, NULL);
2376 break;
2377 }
2378
2379 lockdep_annotate_inode_mutex_key(inode);
2380 } else
2381 shmem_free_inode(sb);
2382 return inode;
2383 }
2384
2385 #ifdef CONFIG_USERFAULTFD
2386 int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2387 pmd_t *dst_pmd,
2388 struct vm_area_struct *dst_vma,
2389 unsigned long dst_addr,
2390 unsigned long src_addr,
2391 bool zeropage, bool wp_copy,
2392 struct page **pagep)
2393 {
2394 struct inode *inode = file_inode(dst_vma->vm_file);
2395 struct shmem_inode_info *info = SHMEM_I(inode);
2396 struct address_space *mapping = inode->i_mapping;
2397 gfp_t gfp = mapping_gfp_mask(mapping);
2398 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2399 void *page_kaddr;
2400 struct folio *folio;
2401 struct page *page;
2402 int ret;
2403 pgoff_t max_off;
2404
2405 if (!shmem_inode_acct_block(inode, 1)) {
2406
2407
2408
2409
2410
2411 if (unlikely(*pagep)) {
2412 put_page(*pagep);
2413 *pagep = NULL;
2414 }
2415 return -ENOMEM;
2416 }
2417
2418 if (!*pagep) {
2419 ret = -ENOMEM;
2420 page = shmem_alloc_page(gfp, info, pgoff);
2421 if (!page)
2422 goto out_unacct_blocks;
2423
2424 if (!zeropage) {
2425 page_kaddr = kmap_atomic(page);
2426 ret = copy_from_user(page_kaddr,
2427 (const void __user *)src_addr,
2428 PAGE_SIZE);
2429 kunmap_atomic(page_kaddr);
2430
2431
2432 if (unlikely(ret)) {
2433 *pagep = page;
2434 ret = -ENOENT;
2435
2436 goto out_unacct_blocks;
2437 }
2438
2439 flush_dcache_page(page);
2440 } else {
2441 clear_user_highpage(page, dst_addr);
2442 }
2443 } else {
2444 page = *pagep;
2445 *pagep = NULL;
2446 }
2447
2448 VM_BUG_ON(PageLocked(page));
2449 VM_BUG_ON(PageSwapBacked(page));
2450 __SetPageLocked(page);
2451 __SetPageSwapBacked(page);
2452 __SetPageUptodate(page);
2453
2454 ret = -EFAULT;
2455 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2456 if (unlikely(pgoff >= max_off))
2457 goto out_release;
2458
2459 folio = page_folio(page);
2460 ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
2461 gfp & GFP_RECLAIM_MASK, dst_mm);
2462 if (ret)
2463 goto out_release;
2464
2465 ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
2466 page, true, wp_copy);
2467 if (ret)
2468 goto out_delete_from_cache;
2469
2470 spin_lock_irq(&info->lock);
2471 info->alloced++;
2472 inode->i_blocks += BLOCKS_PER_PAGE;
2473 shmem_recalc_inode(inode);
2474 spin_unlock_irq(&info->lock);
2475
2476 unlock_page(page);
2477 return 0;
2478 out_delete_from_cache:
2479 delete_from_page_cache(page);
2480 out_release:
2481 unlock_page(page);
2482 put_page(page);
2483 out_unacct_blocks:
2484 shmem_inode_unacct_blocks(inode, 1);
2485 return ret;
2486 }
2487 #endif
2488
2489 #ifdef CONFIG_TMPFS
2490 static const struct inode_operations shmem_symlink_inode_operations;
2491 static const struct inode_operations shmem_short_symlink_operations;
2492
2493 static int
2494 shmem_write_begin(struct file *file, struct address_space *mapping,
2495 loff_t pos, unsigned len,
2496 struct page **pagep, void **fsdata)
2497 {
2498 struct inode *inode = mapping->host;
2499 struct shmem_inode_info *info = SHMEM_I(inode);
2500 pgoff_t index = pos >> PAGE_SHIFT;
2501 int ret = 0;
2502
2503
2504 if (unlikely(info->seals & (F_SEAL_GROW |
2505 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2506 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2507 return -EPERM;
2508 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2509 return -EPERM;
2510 }
2511
2512 ret = shmem_getpage(inode, index, pagep, SGP_WRITE);
2513
2514 if (ret)
2515 return ret;
2516
2517 if (PageHWPoison(*pagep)) {
2518 unlock_page(*pagep);
2519 put_page(*pagep);
2520 *pagep = NULL;
2521 return -EIO;
2522 }
2523
2524 return 0;
2525 }
2526
2527 static int
2528 shmem_write_end(struct file *file, struct address_space *mapping,
2529 loff_t pos, unsigned len, unsigned copied,
2530 struct page *page, void *fsdata)
2531 {
2532 struct inode *inode = mapping->host;
2533
2534 if (pos + copied > inode->i_size)
2535 i_size_write(inode, pos + copied);
2536
2537 if (!PageUptodate(page)) {
2538 struct page *head = compound_head(page);
2539 if (PageTransCompound(page)) {
2540 int i;
2541
2542 for (i = 0; i < HPAGE_PMD_NR; i++) {
2543 if (head + i == page)
2544 continue;
2545 clear_highpage(head + i);
2546 flush_dcache_page(head + i);
2547 }
2548 }
2549 if (copied < PAGE_SIZE) {
2550 unsigned from = pos & (PAGE_SIZE - 1);
2551 zero_user_segments(page, 0, from,
2552 from + copied, PAGE_SIZE);
2553 }
2554 SetPageUptodate(head);
2555 }
2556 set_page_dirty(page);
2557 unlock_page(page);
2558 put_page(page);
2559
2560 return copied;
2561 }
2562
2563 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2564 {
2565 struct file *file = iocb->ki_filp;
2566 struct inode *inode = file_inode(file);
2567 struct address_space *mapping = inode->i_mapping;
2568 pgoff_t index;
2569 unsigned long offset;
2570 int error = 0;
2571 ssize_t retval = 0;
2572 loff_t *ppos = &iocb->ki_pos;
2573
2574 index = *ppos >> PAGE_SHIFT;
2575 offset = *ppos & ~PAGE_MASK;
2576
2577 for (;;) {
2578 struct page *page = NULL;
2579 pgoff_t end_index;
2580 unsigned long nr, ret;
2581 loff_t i_size = i_size_read(inode);
2582
2583 end_index = i_size >> PAGE_SHIFT;
2584 if (index > end_index)
2585 break;
2586 if (index == end_index) {
2587 nr = i_size & ~PAGE_MASK;
2588 if (nr <= offset)
2589 break;
2590 }
2591
2592 error = shmem_getpage(inode, index, &page, SGP_READ);
2593 if (error) {
2594 if (error == -EINVAL)
2595 error = 0;
2596 break;
2597 }
2598 if (page) {
2599 unlock_page(page);
2600
2601 if (PageHWPoison(page)) {
2602 put_page(page);
2603 error = -EIO;
2604 break;
2605 }
2606 }
2607
2608
2609
2610
2611
2612 nr = PAGE_SIZE;
2613 i_size = i_size_read(inode);
2614 end_index = i_size >> PAGE_SHIFT;
2615 if (index == end_index) {
2616 nr = i_size & ~PAGE_MASK;
2617 if (nr <= offset) {
2618 if (page)
2619 put_page(page);
2620 break;
2621 }
2622 }
2623 nr -= offset;
2624
2625 if (page) {
2626
2627
2628
2629
2630
2631 if (mapping_writably_mapped(mapping))
2632 flush_dcache_page(page);
2633
2634
2635
2636 if (!offset)
2637 mark_page_accessed(page);
2638
2639
2640
2641
2642 ret = copy_page_to_iter(page, offset, nr, to);
2643 put_page(page);
2644
2645 } else if (user_backed_iter(to)) {
2646
2647
2648
2649
2650
2651 ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
2652 } else {
2653
2654
2655
2656
2657
2658 ret = iov_iter_zero(nr, to);
2659 }
2660
2661 retval += ret;
2662 offset += ret;
2663 index += offset >> PAGE_SHIFT;
2664 offset &= ~PAGE_MASK;
2665
2666 if (!iov_iter_count(to))
2667 break;
2668 if (ret < nr) {
2669 error = -EFAULT;
2670 break;
2671 }
2672 cond_resched();
2673 }
2674
2675 *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2676 file_accessed(file);
2677 return retval ? retval : error;
2678 }
2679
2680 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2681 {
2682 struct address_space *mapping = file->f_mapping;
2683 struct inode *inode = mapping->host;
2684
2685 if (whence != SEEK_DATA && whence != SEEK_HOLE)
2686 return generic_file_llseek_size(file, offset, whence,
2687 MAX_LFS_FILESIZE, i_size_read(inode));
2688 if (offset < 0)
2689 return -ENXIO;
2690
2691 inode_lock(inode);
2692
2693 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
2694 if (offset >= 0)
2695 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2696 inode_unlock(inode);
2697 return offset;
2698 }
2699
2700 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2701 loff_t len)
2702 {
2703 struct inode *inode = file_inode(file);
2704 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2705 struct shmem_inode_info *info = SHMEM_I(inode);
2706 struct shmem_falloc shmem_falloc;
2707 pgoff_t start, index, end, undo_fallocend;
2708 int error;
2709
2710 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2711 return -EOPNOTSUPP;
2712
2713 inode_lock(inode);
2714
2715 if (mode & FALLOC_FL_PUNCH_HOLE) {
2716 struct address_space *mapping = file->f_mapping;
2717 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2718 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2719 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2720
2721
2722 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
2723 error = -EPERM;
2724 goto out;
2725 }
2726
2727 shmem_falloc.waitq = &shmem_falloc_waitq;
2728 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
2729 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2730 spin_lock(&inode->i_lock);
2731 inode->i_private = &shmem_falloc;
2732 spin_unlock(&inode->i_lock);
2733
2734 if ((u64)unmap_end > (u64)unmap_start)
2735 unmap_mapping_range(mapping, unmap_start,
2736 1 + unmap_end - unmap_start, 0);
2737 shmem_truncate_range(inode, offset, offset + len - 1);
2738
2739
2740 spin_lock(&inode->i_lock);
2741 inode->i_private = NULL;
2742 wake_up_all(&shmem_falloc_waitq);
2743 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
2744 spin_unlock(&inode->i_lock);
2745 error = 0;
2746 goto out;
2747 }
2748
2749
2750 error = inode_newsize_ok(inode, offset + len);
2751 if (error)
2752 goto out;
2753
2754 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2755 error = -EPERM;
2756 goto out;
2757 }
2758
2759 start = offset >> PAGE_SHIFT;
2760 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2761
2762 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2763 error = -ENOSPC;
2764 goto out;
2765 }
2766
2767 shmem_falloc.waitq = NULL;
2768 shmem_falloc.start = start;
2769 shmem_falloc.next = start;
2770 shmem_falloc.nr_falloced = 0;
2771 shmem_falloc.nr_unswapped = 0;
2772 spin_lock(&inode->i_lock);
2773 inode->i_private = &shmem_falloc;
2774 spin_unlock(&inode->i_lock);
2775
2776
2777
2778
2779
2780
2781 undo_fallocend = info->fallocend;
2782 if (info->fallocend < end)
2783 info->fallocend = end;
2784
2785 for (index = start; index < end; ) {
2786 struct page *page;
2787
2788
2789
2790
2791
2792 if (signal_pending(current))
2793 error = -EINTR;
2794 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2795 error = -ENOMEM;
2796 else
2797 error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2798 if (error) {
2799 info->fallocend = undo_fallocend;
2800
2801 if (index > start) {
2802 shmem_undo_range(inode,
2803 (loff_t)start << PAGE_SHIFT,
2804 ((loff_t)index << PAGE_SHIFT) - 1, true);
2805 }
2806 goto undone;
2807 }
2808
2809 index++;
2810
2811
2812
2813
2814
2815 if (PageTransCompound(page)) {
2816 index = round_up(index, HPAGE_PMD_NR);
2817
2818 if (!index)
2819 index--;
2820 }
2821
2822
2823
2824
2825
2826 if (!PageUptodate(page))
2827 shmem_falloc.nr_falloced += index - shmem_falloc.next;
2828 shmem_falloc.next = index;
2829
2830
2831
2832
2833
2834
2835
2836
2837 set_page_dirty(page);
2838 unlock_page(page);
2839 put_page(page);
2840 cond_resched();
2841 }
2842
2843 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2844 i_size_write(inode, offset + len);
2845 undone:
2846 spin_lock(&inode->i_lock);
2847 inode->i_private = NULL;
2848 spin_unlock(&inode->i_lock);
2849 out:
2850 if (!error)
2851 file_modified(file);
2852 inode_unlock(inode);
2853 return error;
2854 }
2855
2856 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2857 {
2858 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2859
2860 buf->f_type = TMPFS_MAGIC;
2861 buf->f_bsize = PAGE_SIZE;
2862 buf->f_namelen = NAME_MAX;
2863 if (sbinfo->max_blocks) {
2864 buf->f_blocks = sbinfo->max_blocks;
2865 buf->f_bavail =
2866 buf->f_bfree = sbinfo->max_blocks -
2867 percpu_counter_sum(&sbinfo->used_blocks);
2868 }
2869 if (sbinfo->max_inodes) {
2870 buf->f_files = sbinfo->max_inodes;
2871 buf->f_ffree = sbinfo->free_inodes;
2872 }
2873
2874
2875 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
2876
2877 return 0;
2878 }
2879
2880
2881
2882
2883 static int
2884 shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir,
2885 struct dentry *dentry, umode_t mode, dev_t dev)
2886 {
2887 struct inode *inode;
2888 int error = -ENOSPC;
2889
2890 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2891 if (inode) {
2892 error = simple_acl_create(dir, inode);
2893 if (error)
2894 goto out_iput;
2895 error = security_inode_init_security(inode, dir,
2896 &dentry->d_name,
2897 shmem_initxattrs, NULL);
2898 if (error && error != -EOPNOTSUPP)
2899 goto out_iput;
2900
2901 error = 0;
2902 dir->i_size += BOGO_DIRENT_SIZE;
2903 dir->i_ctime = dir->i_mtime = current_time(dir);
2904 d_instantiate(dentry, inode);
2905 dget(dentry);
2906 }
2907 return error;
2908 out_iput:
2909 iput(inode);
2910 return error;
2911 }
2912
2913 static int
2914 shmem_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
2915 struct dentry *dentry, umode_t mode)
2916 {
2917 struct inode *inode;
2918 int error = -ENOSPC;
2919
2920 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2921 if (inode) {
2922 error = security_inode_init_security(inode, dir,
2923 NULL,
2924 shmem_initxattrs, NULL);
2925 if (error && error != -EOPNOTSUPP)
2926 goto out_iput;
2927 error = simple_acl_create(dir, inode);
2928 if (error)
2929 goto out_iput;
2930 d_tmpfile(dentry, inode);
2931 }
2932 return error;
2933 out_iput:
2934 iput(inode);
2935 return error;
2936 }
2937
2938 static int shmem_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
2939 struct dentry *dentry, umode_t mode)
2940 {
2941 int error;
2942
2943 if ((error = shmem_mknod(&init_user_ns, dir, dentry,
2944 mode | S_IFDIR, 0)))
2945 return error;
2946 inc_nlink(dir);
2947 return 0;
2948 }
2949
2950 static int shmem_create(struct user_namespace *mnt_userns, struct inode *dir,
2951 struct dentry *dentry, umode_t mode, bool excl)
2952 {
2953 return shmem_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
2954 }
2955
2956
2957
2958
2959 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2960 {
2961 struct inode *inode = d_inode(old_dentry);
2962 int ret = 0;
2963
2964
2965
2966
2967
2968
2969
2970
2971 if (inode->i_nlink) {
2972 ret = shmem_reserve_inode(inode->i_sb, NULL);
2973 if (ret)
2974 goto out;
2975 }
2976
2977 dir->i_size += BOGO_DIRENT_SIZE;
2978 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2979 inc_nlink(inode);
2980 ihold(inode);
2981 dget(dentry);
2982 d_instantiate(dentry, inode);
2983 out:
2984 return ret;
2985 }
2986
2987 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2988 {
2989 struct inode *inode = d_inode(dentry);
2990
2991 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2992 shmem_free_inode(inode->i_sb);
2993
2994 dir->i_size -= BOGO_DIRENT_SIZE;
2995 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2996 drop_nlink(inode);
2997 dput(dentry);
2998 return 0;
2999 }
3000
3001 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
3002 {
3003 if (!simple_empty(dentry))
3004 return -ENOTEMPTY;
3005
3006 drop_nlink(d_inode(dentry));
3007 drop_nlink(dir);
3008 return shmem_unlink(dir, dentry);
3009 }
3010
3011 static int shmem_whiteout(struct user_namespace *mnt_userns,
3012 struct inode *old_dir, struct dentry *old_dentry)
3013 {
3014 struct dentry *whiteout;
3015 int error;
3016
3017 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3018 if (!whiteout)
3019 return -ENOMEM;
3020
3021 error = shmem_mknod(&init_user_ns, old_dir, whiteout,
3022 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3023 dput(whiteout);
3024 if (error)
3025 return error;
3026
3027
3028
3029
3030
3031
3032
3033
3034 d_rehash(whiteout);
3035 return 0;
3036 }
3037
3038
3039
3040
3041
3042
3043
3044 static int shmem_rename2(struct user_namespace *mnt_userns,
3045 struct inode *old_dir, struct dentry *old_dentry,
3046 struct inode *new_dir, struct dentry *new_dentry,
3047 unsigned int flags)
3048 {
3049 struct inode *inode = d_inode(old_dentry);
3050 int they_are_dirs = S_ISDIR(inode->i_mode);
3051
3052 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3053 return -EINVAL;
3054
3055 if (flags & RENAME_EXCHANGE)
3056 return simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
3057
3058 if (!simple_empty(new_dentry))
3059 return -ENOTEMPTY;
3060
3061 if (flags & RENAME_WHITEOUT) {
3062 int error;
3063
3064 error = shmem_whiteout(&init_user_ns, old_dir, old_dentry);
3065 if (error)
3066 return error;
3067 }
3068
3069 if (d_really_is_positive(new_dentry)) {
3070 (void) shmem_unlink(new_dir, new_dentry);
3071 if (they_are_dirs) {
3072 drop_nlink(d_inode(new_dentry));
3073 drop_nlink(old_dir);
3074 }
3075 } else if (they_are_dirs) {
3076 drop_nlink(old_dir);
3077 inc_nlink(new_dir);
3078 }
3079
3080 old_dir->i_size -= BOGO_DIRENT_SIZE;
3081 new_dir->i_size += BOGO_DIRENT_SIZE;
3082 old_dir->i_ctime = old_dir->i_mtime =
3083 new_dir->i_ctime = new_dir->i_mtime =
3084 inode->i_ctime = current_time(old_dir);
3085 return 0;
3086 }
3087
3088 static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir,
3089 struct dentry *dentry, const char *symname)
3090 {
3091 int error;
3092 int len;
3093 struct inode *inode;
3094 struct page *page;
3095
3096 len = strlen(symname) + 1;
3097 if (len > PAGE_SIZE)
3098 return -ENAMETOOLONG;
3099
3100 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
3101 VM_NORESERVE);
3102 if (!inode)
3103 return -ENOSPC;
3104
3105 error = security_inode_init_security(inode, dir, &dentry->d_name,
3106 shmem_initxattrs, NULL);
3107 if (error && error != -EOPNOTSUPP) {
3108 iput(inode);
3109 return error;
3110 }
3111
3112 inode->i_size = len-1;
3113 if (len <= SHORT_SYMLINK_LEN) {
3114 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3115 if (!inode->i_link) {
3116 iput(inode);
3117 return -ENOMEM;
3118 }
3119 inode->i_op = &shmem_short_symlink_operations;
3120 } else {
3121 inode_nohighmem(inode);
3122 error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3123 if (error) {
3124 iput(inode);
3125 return error;
3126 }
3127 inode->i_mapping->a_ops = &shmem_aops;
3128 inode->i_op = &shmem_symlink_inode_operations;
3129 memcpy(page_address(page), symname, len);
3130 SetPageUptodate(page);
3131 set_page_dirty(page);
3132 unlock_page(page);
3133 put_page(page);
3134 }
3135 dir->i_size += BOGO_DIRENT_SIZE;
3136 dir->i_ctime = dir->i_mtime = current_time(dir);
3137 d_instantiate(dentry, inode);
3138 dget(dentry);
3139 return 0;
3140 }
3141
3142 static void shmem_put_link(void *arg)
3143 {
3144 mark_page_accessed(arg);
3145 put_page(arg);
3146 }
3147
3148 static const char *shmem_get_link(struct dentry *dentry,
3149 struct inode *inode,
3150 struct delayed_call *done)
3151 {
3152 struct page *page = NULL;
3153 int error;
3154 if (!dentry) {
3155 page = find_get_page(inode->i_mapping, 0);
3156 if (!page)
3157 return ERR_PTR(-ECHILD);
3158 if (PageHWPoison(page) ||
3159 !PageUptodate(page)) {
3160 put_page(page);
3161 return ERR_PTR(-ECHILD);
3162 }
3163 } else {
3164 error = shmem_getpage(inode, 0, &page, SGP_READ);
3165 if (error)
3166 return ERR_PTR(error);
3167 if (!page)
3168 return ERR_PTR(-ECHILD);
3169 if (PageHWPoison(page)) {
3170 unlock_page(page);
3171 put_page(page);
3172 return ERR_PTR(-ECHILD);
3173 }
3174 unlock_page(page);
3175 }
3176 set_delayed_call(done, shmem_put_link, page);
3177 return page_address(page);
3178 }
3179
3180 #ifdef CONFIG_TMPFS_XATTR
3181
3182 static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3183 {
3184 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3185
3186 fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
3187
3188 return 0;
3189 }
3190
3191 static int shmem_fileattr_set(struct user_namespace *mnt_userns,
3192 struct dentry *dentry, struct fileattr *fa)
3193 {
3194 struct inode *inode = d_inode(dentry);
3195 struct shmem_inode_info *info = SHMEM_I(inode);
3196
3197 if (fileattr_has_fsx(fa))
3198 return -EOPNOTSUPP;
3199 if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
3200 return -EOPNOTSUPP;
3201
3202 info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
3203 (fa->flags & SHMEM_FL_USER_MODIFIABLE);
3204
3205 shmem_set_inode_flags(inode, info->fsflags);
3206 inode->i_ctime = current_time(inode);
3207 return 0;
3208 }
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220 static int shmem_initxattrs(struct inode *inode,
3221 const struct xattr *xattr_array,
3222 void *fs_info)
3223 {
3224 struct shmem_inode_info *info = SHMEM_I(inode);
3225 const struct xattr *xattr;
3226 struct simple_xattr *new_xattr;
3227 size_t len;
3228
3229 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3230 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3231 if (!new_xattr)
3232 return -ENOMEM;
3233
3234 len = strlen(xattr->name) + 1;
3235 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3236 GFP_KERNEL);
3237 if (!new_xattr->name) {
3238 kvfree(new_xattr);
3239 return -ENOMEM;
3240 }
3241
3242 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3243 XATTR_SECURITY_PREFIX_LEN);
3244 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3245 xattr->name, len);
3246
3247 simple_xattr_list_add(&info->xattrs, new_xattr);
3248 }
3249
3250 return 0;
3251 }
3252
3253 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3254 struct dentry *unused, struct inode *inode,
3255 const char *name, void *buffer, size_t size)
3256 {
3257 struct shmem_inode_info *info = SHMEM_I(inode);
3258
3259 name = xattr_full_name(handler, name);
3260 return simple_xattr_get(&info->xattrs, name, buffer, size);
3261 }
3262
3263 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3264 struct user_namespace *mnt_userns,
3265 struct dentry *unused, struct inode *inode,
3266 const char *name, const void *value,
3267 size_t size, int flags)
3268 {
3269 struct shmem_inode_info *info = SHMEM_I(inode);
3270
3271 name = xattr_full_name(handler, name);
3272 return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3273 }
3274
3275 static const struct xattr_handler shmem_security_xattr_handler = {
3276 .prefix = XATTR_SECURITY_PREFIX,
3277 .get = shmem_xattr_handler_get,
3278 .set = shmem_xattr_handler_set,
3279 };
3280
3281 static const struct xattr_handler shmem_trusted_xattr_handler = {
3282 .prefix = XATTR_TRUSTED_PREFIX,
3283 .get = shmem_xattr_handler_get,
3284 .set = shmem_xattr_handler_set,
3285 };
3286
3287 static const struct xattr_handler *shmem_xattr_handlers[] = {
3288 #ifdef CONFIG_TMPFS_POSIX_ACL
3289 &posix_acl_access_xattr_handler,
3290 &posix_acl_default_xattr_handler,
3291 #endif
3292 &shmem_security_xattr_handler,
3293 &shmem_trusted_xattr_handler,
3294 NULL
3295 };
3296
3297 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3298 {
3299 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3300 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3301 }
3302 #endif
3303
3304 static const struct inode_operations shmem_short_symlink_operations = {
3305 .getattr = shmem_getattr,
3306 .get_link = simple_get_link,
3307 #ifdef CONFIG_TMPFS_XATTR
3308 .listxattr = shmem_listxattr,
3309 #endif
3310 };
3311
3312 static const struct inode_operations shmem_symlink_inode_operations = {
3313 .getattr = shmem_getattr,
3314 .get_link = shmem_get_link,
3315 #ifdef CONFIG_TMPFS_XATTR
3316 .listxattr = shmem_listxattr,
3317 #endif
3318 };
3319
3320 static struct dentry *shmem_get_parent(struct dentry *child)
3321 {
3322 return ERR_PTR(-ESTALE);
3323 }
3324
3325 static int shmem_match(struct inode *ino, void *vfh)
3326 {
3327 __u32 *fh = vfh;
3328 __u64 inum = fh[2];
3329 inum = (inum << 32) | fh[1];
3330 return ino->i_ino == inum && fh[0] == ino->i_generation;
3331 }
3332
3333
3334 static struct dentry *shmem_find_alias(struct inode *inode)
3335 {
3336 struct dentry *alias = d_find_alias(inode);
3337
3338 return alias ?: d_find_any_alias(inode);
3339 }
3340
3341
3342 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3343 struct fid *fid, int fh_len, int fh_type)
3344 {
3345 struct inode *inode;
3346 struct dentry *dentry = NULL;
3347 u64 inum;
3348
3349 if (fh_len < 3)
3350 return NULL;
3351
3352 inum = fid->raw[2];
3353 inum = (inum << 32) | fid->raw[1];
3354
3355 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3356 shmem_match, fid->raw);
3357 if (inode) {
3358 dentry = shmem_find_alias(inode);
3359 iput(inode);
3360 }
3361
3362 return dentry;
3363 }
3364
3365 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3366 struct inode *parent)
3367 {
3368 if (*len < 3) {
3369 *len = 3;
3370 return FILEID_INVALID;
3371 }
3372
3373 if (inode_unhashed(inode)) {
3374
3375
3376
3377
3378
3379 static DEFINE_SPINLOCK(lock);
3380 spin_lock(&lock);
3381 if (inode_unhashed(inode))
3382 __insert_inode_hash(inode,
3383 inode->i_ino + inode->i_generation);
3384 spin_unlock(&lock);
3385 }
3386
3387 fh[0] = inode->i_generation;
3388 fh[1] = inode->i_ino;
3389 fh[2] = ((__u64)inode->i_ino) >> 32;
3390
3391 *len = 3;
3392 return 1;
3393 }
3394
3395 static const struct export_operations shmem_export_ops = {
3396 .get_parent = shmem_get_parent,
3397 .encode_fh = shmem_encode_fh,
3398 .fh_to_dentry = shmem_fh_to_dentry,
3399 };
3400
3401 enum shmem_param {
3402 Opt_gid,
3403 Opt_huge,
3404 Opt_mode,
3405 Opt_mpol,
3406 Opt_nr_blocks,
3407 Opt_nr_inodes,
3408 Opt_size,
3409 Opt_uid,
3410 Opt_inode32,
3411 Opt_inode64,
3412 };
3413
3414 static const struct constant_table shmem_param_enums_huge[] = {
3415 {"never", SHMEM_HUGE_NEVER },
3416 {"always", SHMEM_HUGE_ALWAYS },
3417 {"within_size", SHMEM_HUGE_WITHIN_SIZE },
3418 {"advise", SHMEM_HUGE_ADVISE },
3419 {}
3420 };
3421
3422 const struct fs_parameter_spec shmem_fs_parameters[] = {
3423 fsparam_u32 ("gid", Opt_gid),
3424 fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge),
3425 fsparam_u32oct("mode", Opt_mode),
3426 fsparam_string("mpol", Opt_mpol),
3427 fsparam_string("nr_blocks", Opt_nr_blocks),
3428 fsparam_string("nr_inodes", Opt_nr_inodes),
3429 fsparam_string("size", Opt_size),
3430 fsparam_u32 ("uid", Opt_uid),
3431 fsparam_flag ("inode32", Opt_inode32),
3432 fsparam_flag ("inode64", Opt_inode64),
3433 {}
3434 };
3435
3436 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3437 {
3438 struct shmem_options *ctx = fc->fs_private;
3439 struct fs_parse_result result;
3440 unsigned long long size;
3441 char *rest;
3442 int opt;
3443
3444 opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3445 if (opt < 0)
3446 return opt;
3447
3448 switch (opt) {
3449 case Opt_size:
3450 size = memparse(param->string, &rest);
3451 if (*rest == '%') {
3452 size <<= PAGE_SHIFT;
3453 size *= totalram_pages();
3454 do_div(size, 100);
3455 rest++;
3456 }
3457 if (*rest)
3458 goto bad_value;
3459 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3460 ctx->seen |= SHMEM_SEEN_BLOCKS;
3461 break;
3462 case Opt_nr_blocks:
3463 ctx->blocks = memparse(param->string, &rest);
3464 if (*rest || ctx->blocks > S64_MAX)
3465 goto bad_value;
3466 ctx->seen |= SHMEM_SEEN_BLOCKS;
3467 break;
3468 case Opt_nr_inodes:
3469 ctx->inodes = memparse(param->string, &rest);
3470 if (*rest)
3471 goto bad_value;
3472 ctx->seen |= SHMEM_SEEN_INODES;
3473 break;
3474 case Opt_mode:
3475 ctx->mode = result.uint_32 & 07777;
3476 break;
3477 case Opt_uid:
3478 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3479 if (!uid_valid(ctx->uid))
3480 goto bad_value;
3481 break;
3482 case Opt_gid:
3483 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3484 if (!gid_valid(ctx->gid))
3485 goto bad_value;
3486 break;
3487 case Opt_huge:
3488 ctx->huge = result.uint_32;
3489 if (ctx->huge != SHMEM_HUGE_NEVER &&
3490 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3491 has_transparent_hugepage()))
3492 goto unsupported_parameter;
3493 ctx->seen |= SHMEM_SEEN_HUGE;
3494 break;
3495 case Opt_mpol:
3496 if (IS_ENABLED(CONFIG_NUMA)) {
3497 mpol_put(ctx->mpol);
3498 ctx->mpol = NULL;
3499 if (mpol_parse_str(param->string, &ctx->mpol))
3500 goto bad_value;
3501 break;
3502 }
3503 goto unsupported_parameter;
3504 case Opt_inode32:
3505 ctx->full_inums = false;
3506 ctx->seen |= SHMEM_SEEN_INUMS;
3507 break;
3508 case Opt_inode64:
3509 if (sizeof(ino_t) < 8) {
3510 return invalfc(fc,
3511 "Cannot use inode64 with <64bit inums in kernel\n");
3512 }
3513 ctx->full_inums = true;
3514 ctx->seen |= SHMEM_SEEN_INUMS;
3515 break;
3516 }
3517 return 0;
3518
3519 unsupported_parameter:
3520 return invalfc(fc, "Unsupported parameter '%s'", param->key);
3521 bad_value:
3522 return invalfc(fc, "Bad value for '%s'", param->key);
3523 }
3524
3525 static int shmem_parse_options(struct fs_context *fc, void *data)
3526 {
3527 char *options = data;
3528
3529 if (options) {
3530 int err = security_sb_eat_lsm_opts(options, &fc->security);
3531 if (err)
3532 return err;
3533 }
3534
3535 while (options != NULL) {
3536 char *this_char = options;
3537 for (;;) {
3538
3539
3540
3541
3542
3543 options = strchr(options, ',');
3544 if (options == NULL)
3545 break;
3546 options++;
3547 if (!isdigit(*options)) {
3548 options[-1] = '\0';
3549 break;
3550 }
3551 }
3552 if (*this_char) {
3553 char *value = strchr(this_char, '=');
3554 size_t len = 0;
3555 int err;
3556
3557 if (value) {
3558 *value++ = '\0';
3559 len = strlen(value);
3560 }
3561 err = vfs_parse_fs_string(fc, this_char, value, len);
3562 if (err < 0)
3563 return err;
3564 }
3565 }
3566 return 0;
3567 }
3568
3569
3570
3571
3572
3573
3574
3575
3576 static int shmem_reconfigure(struct fs_context *fc)
3577 {
3578 struct shmem_options *ctx = fc->fs_private;
3579 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
3580 unsigned long inodes;
3581 struct mempolicy *mpol = NULL;
3582 const char *err;
3583
3584 raw_spin_lock(&sbinfo->stat_lock);
3585 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3586
3587 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3588 if (!sbinfo->max_blocks) {
3589 err = "Cannot retroactively limit size";
3590 goto out;
3591 }
3592 if (percpu_counter_compare(&sbinfo->used_blocks,
3593 ctx->blocks) > 0) {
3594 err = "Too small a size for current use";
3595 goto out;
3596 }
3597 }
3598 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3599 if (!sbinfo->max_inodes) {
3600 err = "Cannot retroactively limit inodes";
3601 goto out;
3602 }
3603 if (ctx->inodes < inodes) {
3604 err = "Too few inodes for current use";
3605 goto out;
3606 }
3607 }
3608
3609 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3610 sbinfo->next_ino > UINT_MAX) {
3611 err = "Current inum too high to switch to 32-bit inums";
3612 goto out;
3613 }
3614
3615 if (ctx->seen & SHMEM_SEEN_HUGE)
3616 sbinfo->huge = ctx->huge;
3617 if (ctx->seen & SHMEM_SEEN_INUMS)
3618 sbinfo->full_inums = ctx->full_inums;
3619 if (ctx->seen & SHMEM_SEEN_BLOCKS)
3620 sbinfo->max_blocks = ctx->blocks;
3621 if (ctx->seen & SHMEM_SEEN_INODES) {
3622 sbinfo->max_inodes = ctx->inodes;
3623 sbinfo->free_inodes = ctx->inodes - inodes;
3624 }
3625
3626
3627
3628
3629 if (ctx->mpol) {
3630 mpol = sbinfo->mpol;
3631 sbinfo->mpol = ctx->mpol;
3632 ctx->mpol = NULL;
3633 }
3634 raw_spin_unlock(&sbinfo->stat_lock);
3635 mpol_put(mpol);
3636 return 0;
3637 out:
3638 raw_spin_unlock(&sbinfo->stat_lock);
3639 return invalfc(fc, "%s", err);
3640 }
3641
3642 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3643 {
3644 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3645
3646 if (sbinfo->max_blocks != shmem_default_max_blocks())
3647 seq_printf(seq, ",size=%luk",
3648 sbinfo->max_blocks << (PAGE_SHIFT - 10));
3649 if (sbinfo->max_inodes != shmem_default_max_inodes())
3650 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3651 if (sbinfo->mode != (0777 | S_ISVTX))
3652 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
3653 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3654 seq_printf(seq, ",uid=%u",
3655 from_kuid_munged(&init_user_ns, sbinfo->uid));
3656 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3657 seq_printf(seq, ",gid=%u",
3658 from_kgid_munged(&init_user_ns, sbinfo->gid));
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
3681 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
3682 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3683
3684 if (sbinfo->huge)
3685 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3686 #endif
3687 shmem_show_mpol(seq, sbinfo->mpol);
3688 return 0;
3689 }
3690
3691 #endif
3692
3693 static void shmem_put_super(struct super_block *sb)
3694 {
3695 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3696
3697 free_percpu(sbinfo->ino_batch);
3698 percpu_counter_destroy(&sbinfo->used_blocks);
3699 mpol_put(sbinfo->mpol);
3700 kfree(sbinfo);
3701 sb->s_fs_info = NULL;
3702 }
3703
3704 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
3705 {
3706 struct shmem_options *ctx = fc->fs_private;
3707 struct inode *inode;
3708 struct shmem_sb_info *sbinfo;
3709
3710
3711 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3712 L1_CACHE_BYTES), GFP_KERNEL);
3713 if (!sbinfo)
3714 return -ENOMEM;
3715
3716 sb->s_fs_info = sbinfo;
3717
3718 #ifdef CONFIG_TMPFS
3719
3720
3721
3722
3723
3724 if (!(sb->s_flags & SB_KERNMOUNT)) {
3725 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3726 ctx->blocks = shmem_default_max_blocks();
3727 if (!(ctx->seen & SHMEM_SEEN_INODES))
3728 ctx->inodes = shmem_default_max_inodes();
3729 if (!(ctx->seen & SHMEM_SEEN_INUMS))
3730 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
3731 } else {
3732 sb->s_flags |= SB_NOUSER;
3733 }
3734 sb->s_export_op = &shmem_export_ops;
3735 sb->s_flags |= SB_NOSEC;
3736 #else
3737 sb->s_flags |= SB_NOUSER;
3738 #endif
3739 sbinfo->max_blocks = ctx->blocks;
3740 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3741 if (sb->s_flags & SB_KERNMOUNT) {
3742 sbinfo->ino_batch = alloc_percpu(ino_t);
3743 if (!sbinfo->ino_batch)
3744 goto failed;
3745 }
3746 sbinfo->uid = ctx->uid;
3747 sbinfo->gid = ctx->gid;
3748 sbinfo->full_inums = ctx->full_inums;
3749 sbinfo->mode = ctx->mode;
3750 sbinfo->huge = ctx->huge;
3751 sbinfo->mpol = ctx->mpol;
3752 ctx->mpol = NULL;
3753
3754 raw_spin_lock_init(&sbinfo->stat_lock);
3755 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3756 goto failed;
3757 spin_lock_init(&sbinfo->shrinklist_lock);
3758 INIT_LIST_HEAD(&sbinfo->shrinklist);
3759
3760 sb->s_maxbytes = MAX_LFS_FILESIZE;
3761 sb->s_blocksize = PAGE_SIZE;
3762 sb->s_blocksize_bits = PAGE_SHIFT;
3763 sb->s_magic = TMPFS_MAGIC;
3764 sb->s_op = &shmem_ops;
3765 sb->s_time_gran = 1;
3766 #ifdef CONFIG_TMPFS_XATTR
3767 sb->s_xattr = shmem_xattr_handlers;
3768 #endif
3769 #ifdef CONFIG_TMPFS_POSIX_ACL
3770 sb->s_flags |= SB_POSIXACL;
3771 #endif
3772 uuid_gen(&sb->s_uuid);
3773
3774 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3775 if (!inode)
3776 goto failed;
3777 inode->i_uid = sbinfo->uid;
3778 inode->i_gid = sbinfo->gid;
3779 sb->s_root = d_make_root(inode);
3780 if (!sb->s_root)
3781 goto failed;
3782 return 0;
3783
3784 failed:
3785 shmem_put_super(sb);
3786 return -ENOMEM;
3787 }
3788
3789 static int shmem_get_tree(struct fs_context *fc)
3790 {
3791 return get_tree_nodev(fc, shmem_fill_super);
3792 }
3793
3794 static void shmem_free_fc(struct fs_context *fc)
3795 {
3796 struct shmem_options *ctx = fc->fs_private;
3797
3798 if (ctx) {
3799 mpol_put(ctx->mpol);
3800 kfree(ctx);
3801 }
3802 }
3803
3804 static const struct fs_context_operations shmem_fs_context_ops = {
3805 .free = shmem_free_fc,
3806 .get_tree = shmem_get_tree,
3807 #ifdef CONFIG_TMPFS
3808 .parse_monolithic = shmem_parse_options,
3809 .parse_param = shmem_parse_one,
3810 .reconfigure = shmem_reconfigure,
3811 #endif
3812 };
3813
3814 static struct kmem_cache *shmem_inode_cachep;
3815
3816 static struct inode *shmem_alloc_inode(struct super_block *sb)
3817 {
3818 struct shmem_inode_info *info;
3819 info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
3820 if (!info)
3821 return NULL;
3822 return &info->vfs_inode;
3823 }
3824
3825 static void shmem_free_in_core_inode(struct inode *inode)
3826 {
3827 if (S_ISLNK(inode->i_mode))
3828 kfree(inode->i_link);
3829 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3830 }
3831
3832 static void shmem_destroy_inode(struct inode *inode)
3833 {
3834 if (S_ISREG(inode->i_mode))
3835 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3836 }
3837
3838 static void shmem_init_inode(void *foo)
3839 {
3840 struct shmem_inode_info *info = foo;
3841 inode_init_once(&info->vfs_inode);
3842 }
3843
3844 static void shmem_init_inodecache(void)
3845 {
3846 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3847 sizeof(struct shmem_inode_info),
3848 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3849 }
3850
3851 static void shmem_destroy_inodecache(void)
3852 {
3853 kmem_cache_destroy(shmem_inode_cachep);
3854 }
3855
3856
3857 static int shmem_error_remove_page(struct address_space *mapping,
3858 struct page *page)
3859 {
3860 return 0;
3861 }
3862
3863 const struct address_space_operations shmem_aops = {
3864 .writepage = shmem_writepage,
3865 .dirty_folio = noop_dirty_folio,
3866 #ifdef CONFIG_TMPFS
3867 .write_begin = shmem_write_begin,
3868 .write_end = shmem_write_end,
3869 #endif
3870 #ifdef CONFIG_MIGRATION
3871 .migrate_folio = migrate_folio,
3872 #endif
3873 .error_remove_page = shmem_error_remove_page,
3874 };
3875 EXPORT_SYMBOL(shmem_aops);
3876
3877 static const struct file_operations shmem_file_operations = {
3878 .mmap = shmem_mmap,
3879 .get_unmapped_area = shmem_get_unmapped_area,
3880 #ifdef CONFIG_TMPFS
3881 .llseek = shmem_file_llseek,
3882 .read_iter = shmem_file_read_iter,
3883 .write_iter = generic_file_write_iter,
3884 .fsync = noop_fsync,
3885 .splice_read = generic_file_splice_read,
3886 .splice_write = iter_file_splice_write,
3887 .fallocate = shmem_fallocate,
3888 #endif
3889 };
3890
3891 static const struct inode_operations shmem_inode_operations = {
3892 .getattr = shmem_getattr,
3893 .setattr = shmem_setattr,
3894 #ifdef CONFIG_TMPFS_XATTR
3895 .listxattr = shmem_listxattr,
3896 .set_acl = simple_set_acl,
3897 .fileattr_get = shmem_fileattr_get,
3898 .fileattr_set = shmem_fileattr_set,
3899 #endif
3900 };
3901
3902 static const struct inode_operations shmem_dir_inode_operations = {
3903 #ifdef CONFIG_TMPFS
3904 .getattr = shmem_getattr,
3905 .create = shmem_create,
3906 .lookup = simple_lookup,
3907 .link = shmem_link,
3908 .unlink = shmem_unlink,
3909 .symlink = shmem_symlink,
3910 .mkdir = shmem_mkdir,
3911 .rmdir = shmem_rmdir,
3912 .mknod = shmem_mknod,
3913 .rename = shmem_rename2,
3914 .tmpfile = shmem_tmpfile,
3915 #endif
3916 #ifdef CONFIG_TMPFS_XATTR
3917 .listxattr = shmem_listxattr,
3918 .fileattr_get = shmem_fileattr_get,
3919 .fileattr_set = shmem_fileattr_set,
3920 #endif
3921 #ifdef CONFIG_TMPFS_POSIX_ACL
3922 .setattr = shmem_setattr,
3923 .set_acl = simple_set_acl,
3924 #endif
3925 };
3926
3927 static const struct inode_operations shmem_special_inode_operations = {
3928 .getattr = shmem_getattr,
3929 #ifdef CONFIG_TMPFS_XATTR
3930 .listxattr = shmem_listxattr,
3931 #endif
3932 #ifdef CONFIG_TMPFS_POSIX_ACL
3933 .setattr = shmem_setattr,
3934 .set_acl = simple_set_acl,
3935 #endif
3936 };
3937
3938 static const struct super_operations shmem_ops = {
3939 .alloc_inode = shmem_alloc_inode,
3940 .free_inode = shmem_free_in_core_inode,
3941 .destroy_inode = shmem_destroy_inode,
3942 #ifdef CONFIG_TMPFS
3943 .statfs = shmem_statfs,
3944 .show_options = shmem_show_options,
3945 #endif
3946 .evict_inode = shmem_evict_inode,
3947 .drop_inode = generic_delete_inode,
3948 .put_super = shmem_put_super,
3949 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3950 .nr_cached_objects = shmem_unused_huge_count,
3951 .free_cached_objects = shmem_unused_huge_scan,
3952 #endif
3953 };
3954
3955 static const struct vm_operations_struct shmem_vm_ops = {
3956 .fault = shmem_fault,
3957 .map_pages = filemap_map_pages,
3958 #ifdef CONFIG_NUMA
3959 .set_policy = shmem_set_policy,
3960 .get_policy = shmem_get_policy,
3961 #endif
3962 };
3963
3964 int shmem_init_fs_context(struct fs_context *fc)
3965 {
3966 struct shmem_options *ctx;
3967
3968 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3969 if (!ctx)
3970 return -ENOMEM;
3971
3972 ctx->mode = 0777 | S_ISVTX;
3973 ctx->uid = current_fsuid();
3974 ctx->gid = current_fsgid();
3975
3976 fc->fs_private = ctx;
3977 fc->ops = &shmem_fs_context_ops;
3978 return 0;
3979 }
3980
3981 static struct file_system_type shmem_fs_type = {
3982 .owner = THIS_MODULE,
3983 .name = "tmpfs",
3984 .init_fs_context = shmem_init_fs_context,
3985 #ifdef CONFIG_TMPFS
3986 .parameters = shmem_fs_parameters,
3987 #endif
3988 .kill_sb = kill_litter_super,
3989 .fs_flags = FS_USERNS_MOUNT,
3990 };
3991
3992 void __init shmem_init(void)
3993 {
3994 int error;
3995
3996 shmem_init_inodecache();
3997
3998 error = register_filesystem(&shmem_fs_type);
3999 if (error) {
4000 pr_err("Could not register tmpfs\n");
4001 goto out2;
4002 }
4003
4004 shm_mnt = kern_mount(&shmem_fs_type);
4005 if (IS_ERR(shm_mnt)) {
4006 error = PTR_ERR(shm_mnt);
4007 pr_err("Could not kern_mount tmpfs\n");
4008 goto out1;
4009 }
4010
4011 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4012 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
4013 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4014 else
4015 shmem_huge = SHMEM_HUGE_NEVER;
4016 #endif
4017 return;
4018
4019 out1:
4020 unregister_filesystem(&shmem_fs_type);
4021 out2:
4022 shmem_destroy_inodecache();
4023 shm_mnt = ERR_PTR(error);
4024 }
4025
4026 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
4027 static ssize_t shmem_enabled_show(struct kobject *kobj,
4028 struct kobj_attribute *attr, char *buf)
4029 {
4030 static const int values[] = {
4031 SHMEM_HUGE_ALWAYS,
4032 SHMEM_HUGE_WITHIN_SIZE,
4033 SHMEM_HUGE_ADVISE,
4034 SHMEM_HUGE_NEVER,
4035 SHMEM_HUGE_DENY,
4036 SHMEM_HUGE_FORCE,
4037 };
4038 int len = 0;
4039 int i;
4040
4041 for (i = 0; i < ARRAY_SIZE(values); i++) {
4042 len += sysfs_emit_at(buf, len,
4043 shmem_huge == values[i] ? "%s[%s]" : "%s%s",
4044 i ? " " : "",
4045 shmem_format_huge(values[i]));
4046 }
4047
4048 len += sysfs_emit_at(buf, len, "\n");
4049
4050 return len;
4051 }
4052
4053 static ssize_t shmem_enabled_store(struct kobject *kobj,
4054 struct kobj_attribute *attr, const char *buf, size_t count)
4055 {
4056 char tmp[16];
4057 int huge;
4058
4059 if (count + 1 > sizeof(tmp))
4060 return -EINVAL;
4061 memcpy(tmp, buf, count);
4062 tmp[count] = '\0';
4063 if (count && tmp[count - 1] == '\n')
4064 tmp[count - 1] = '\0';
4065
4066 huge = shmem_parse_huge(tmp);
4067 if (huge == -EINVAL)
4068 return -EINVAL;
4069 if (!has_transparent_hugepage() &&
4070 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
4071 return -EINVAL;
4072
4073 shmem_huge = huge;
4074 if (shmem_huge > SHMEM_HUGE_DENY)
4075 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4076 return count;
4077 }
4078
4079 struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
4080 #endif
4081
4082 #else
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093 static struct file_system_type shmem_fs_type = {
4094 .name = "tmpfs",
4095 .init_fs_context = ramfs_init_fs_context,
4096 .parameters = ramfs_fs_parameters,
4097 .kill_sb = kill_litter_super,
4098 .fs_flags = FS_USERNS_MOUNT,
4099 };
4100
4101 void __init shmem_init(void)
4102 {
4103 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4104
4105 shm_mnt = kern_mount(&shmem_fs_type);
4106 BUG_ON(IS_ERR(shm_mnt));
4107 }
4108
4109 int shmem_unuse(unsigned int type)
4110 {
4111 return 0;
4112 }
4113
4114 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
4115 {
4116 return 0;
4117 }
4118
4119 void shmem_unlock_mapping(struct address_space *mapping)
4120 {
4121 }
4122
4123 #ifdef CONFIG_MMU
4124 unsigned long shmem_get_unmapped_area(struct file *file,
4125 unsigned long addr, unsigned long len,
4126 unsigned long pgoff, unsigned long flags)
4127 {
4128 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4129 }
4130 #endif
4131
4132 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4133 {
4134 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4135 }
4136 EXPORT_SYMBOL_GPL(shmem_truncate_range);
4137
4138 #define shmem_vm_ops generic_file_vm_ops
4139 #define shmem_file_operations ramfs_file_operations
4140 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
4141 #define shmem_acct_size(flags, size) 0
4142 #define shmem_unacct_size(flags, size) do {} while (0)
4143
4144 #endif
4145
4146
4147
4148 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4149 unsigned long flags, unsigned int i_flags)
4150 {
4151 struct inode *inode;
4152 struct file *res;
4153
4154 if (IS_ERR(mnt))
4155 return ERR_CAST(mnt);
4156
4157 if (size < 0 || size > MAX_LFS_FILESIZE)
4158 return ERR_PTR(-EINVAL);
4159
4160 if (shmem_acct_size(flags, size))
4161 return ERR_PTR(-ENOMEM);
4162
4163 inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
4164 flags);
4165 if (unlikely(!inode)) {
4166 shmem_unacct_size(flags, size);
4167 return ERR_PTR(-ENOSPC);
4168 }
4169 inode->i_flags |= i_flags;
4170 inode->i_size = size;
4171 clear_nlink(inode);
4172 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4173 if (!IS_ERR(res))
4174 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4175 &shmem_file_operations);
4176 if (IS_ERR(res))
4177 iput(inode);
4178 return res;
4179 }
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4192 {
4193 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4194 }
4195
4196
4197
4198
4199
4200
4201
4202 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4203 {
4204 return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4205 }
4206 EXPORT_SYMBOL_GPL(shmem_file_setup);
4207
4208
4209
4210
4211
4212
4213
4214
4215 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4216 loff_t size, unsigned long flags)
4217 {
4218 return __shmem_file_setup(mnt, name, size, flags, 0);
4219 }
4220 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4221
4222
4223
4224
4225
4226 int shmem_zero_setup(struct vm_area_struct *vma)
4227 {
4228 struct file *file;
4229 loff_t size = vma->vm_end - vma->vm_start;
4230
4231
4232
4233
4234
4235
4236
4237 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
4238 if (IS_ERR(file))
4239 return PTR_ERR(file);
4240
4241 if (vma->vm_file)
4242 fput(vma->vm_file);
4243 vma->vm_file = file;
4244 vma->vm_ops = &shmem_vm_ops;
4245
4246 return 0;
4247 }
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4265 pgoff_t index, gfp_t gfp)
4266 {
4267 #ifdef CONFIG_SHMEM
4268 struct inode *inode = mapping->host;
4269 struct page *page;
4270 int error;
4271
4272 BUG_ON(!shmem_mapping(mapping));
4273 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4274 gfp, NULL, NULL, NULL);
4275 if (error)
4276 return ERR_PTR(error);
4277
4278 unlock_page(page);
4279 if (PageHWPoison(page)) {
4280 put_page(page);
4281 return ERR_PTR(-EIO);
4282 }
4283
4284 return page;
4285 #else
4286
4287
4288
4289 return read_cache_page_gfp(mapping, index, gfp);
4290 #endif
4291 }
4292 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);