0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/ratelimit.h>
0019 #include <linux/string.h>
0020 #include <linux/mm.h>
0021 #include <linux/fs.h>
0022 #include <linux/fscrypt.h>
0023 #include <linux/fsnotify.h>
0024 #include <linux/slab.h>
0025 #include <linux/init.h>
0026 #include <linux/hash.h>
0027 #include <linux/cache.h>
0028 #include <linux/export.h>
0029 #include <linux/security.h>
0030 #include <linux/seqlock.h>
0031 #include <linux/memblock.h>
0032 #include <linux/bit_spinlock.h>
0033 #include <linux/rculist_bl.h>
0034 #include <linux/list_lru.h>
0035 #include "internal.h"
0036 #include "mount.h"
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074 int sysctl_vfs_cache_pressure __read_mostly = 100;
0075 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
0076
0077 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
0078
0079 EXPORT_SYMBOL(rename_lock);
0080
0081 static struct kmem_cache *dentry_cache __read_mostly;
0082
0083 const struct qstr empty_name = QSTR_INIT("", 0);
0084 EXPORT_SYMBOL(empty_name);
0085 const struct qstr slash_name = QSTR_INIT("/", 1);
0086 EXPORT_SYMBOL(slash_name);
0087 const struct qstr dotdot_name = QSTR_INIT("..", 2);
0088 EXPORT_SYMBOL(dotdot_name);
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 static unsigned int d_hash_shift __read_mostly;
0100
0101 static struct hlist_bl_head *dentry_hashtable __read_mostly;
0102
0103 static inline struct hlist_bl_head *d_hash(unsigned int hash)
0104 {
0105 return dentry_hashtable + (hash >> d_hash_shift);
0106 }
0107
0108 #define IN_LOOKUP_SHIFT 10
0109 static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
0110
0111 static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
0112 unsigned int hash)
0113 {
0114 hash += (unsigned long) parent / L1_CACHE_BYTES;
0115 return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
0116 }
0117
0118 struct dentry_stat_t {
0119 long nr_dentry;
0120 long nr_unused;
0121 long age_limit;
0122 long want_pages;
0123 long nr_negative;
0124 long dummy;
0125 };
0126
0127 static DEFINE_PER_CPU(long, nr_dentry);
0128 static DEFINE_PER_CPU(long, nr_dentry_unused);
0129 static DEFINE_PER_CPU(long, nr_dentry_negative);
0130
0131 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
0132
0133 static struct dentry_stat_t dentry_stat = {
0134 .age_limit = 45,
0135 };
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149 static long get_nr_dentry(void)
0150 {
0151 int i;
0152 long sum = 0;
0153 for_each_possible_cpu(i)
0154 sum += per_cpu(nr_dentry, i);
0155 return sum < 0 ? 0 : sum;
0156 }
0157
0158 static long get_nr_dentry_unused(void)
0159 {
0160 int i;
0161 long sum = 0;
0162 for_each_possible_cpu(i)
0163 sum += per_cpu(nr_dentry_unused, i);
0164 return sum < 0 ? 0 : sum;
0165 }
0166
0167 static long get_nr_dentry_negative(void)
0168 {
0169 int i;
0170 long sum = 0;
0171
0172 for_each_possible_cpu(i)
0173 sum += per_cpu(nr_dentry_negative, i);
0174 return sum < 0 ? 0 : sum;
0175 }
0176
0177 static int proc_nr_dentry(struct ctl_table *table, int write, void *buffer,
0178 size_t *lenp, loff_t *ppos)
0179 {
0180 dentry_stat.nr_dentry = get_nr_dentry();
0181 dentry_stat.nr_unused = get_nr_dentry_unused();
0182 dentry_stat.nr_negative = get_nr_dentry_negative();
0183 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
0184 }
0185
0186 static struct ctl_table fs_dcache_sysctls[] = {
0187 {
0188 .procname = "dentry-state",
0189 .data = &dentry_stat,
0190 .maxlen = 6*sizeof(long),
0191 .mode = 0444,
0192 .proc_handler = proc_nr_dentry,
0193 },
0194 { }
0195 };
0196
0197 static int __init init_fs_dcache_sysctls(void)
0198 {
0199 register_sysctl_init("fs", fs_dcache_sysctls);
0200 return 0;
0201 }
0202 fs_initcall(init_fs_dcache_sysctls);
0203 #endif
0204
0205
0206
0207
0208
0209 #ifdef CONFIG_DCACHE_WORD_ACCESS
0210
0211 #include <asm/word-at-a-time.h>
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
0222 {
0223 unsigned long a,b,mask;
0224
0225 for (;;) {
0226 a = read_word_at_a_time(cs);
0227 b = load_unaligned_zeropad(ct);
0228 if (tcount < sizeof(unsigned long))
0229 break;
0230 if (unlikely(a != b))
0231 return 1;
0232 cs += sizeof(unsigned long);
0233 ct += sizeof(unsigned long);
0234 tcount -= sizeof(unsigned long);
0235 if (!tcount)
0236 return 0;
0237 }
0238 mask = bytemask_from_count(tcount);
0239 return unlikely(!!((a ^ b) & mask));
0240 }
0241
0242 #else
0243
0244 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
0245 {
0246 do {
0247 if (*cs != *ct)
0248 return 1;
0249 cs++;
0250 ct++;
0251 tcount--;
0252 } while (tcount);
0253 return 0;
0254 }
0255
0256 #endif
0257
0258 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
0259 {
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276 const unsigned char *cs = READ_ONCE(dentry->d_name.name);
0277
0278 return dentry_string_cmp(cs, ct, tcount);
0279 }
0280
0281 struct external_name {
0282 union {
0283 atomic_t count;
0284 struct rcu_head head;
0285 } u;
0286 unsigned char name[];
0287 };
0288
0289 static inline struct external_name *external_name(struct dentry *dentry)
0290 {
0291 return container_of(dentry->d_name.name, struct external_name, name[0]);
0292 }
0293
0294 static void __d_free(struct rcu_head *head)
0295 {
0296 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
0297
0298 kmem_cache_free(dentry_cache, dentry);
0299 }
0300
0301 static void __d_free_external(struct rcu_head *head)
0302 {
0303 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
0304 kfree(external_name(dentry));
0305 kmem_cache_free(dentry_cache, dentry);
0306 }
0307
0308 static inline int dname_external(const struct dentry *dentry)
0309 {
0310 return dentry->d_name.name != dentry->d_iname;
0311 }
0312
0313 void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
0314 {
0315 spin_lock(&dentry->d_lock);
0316 name->name = dentry->d_name;
0317 if (unlikely(dname_external(dentry))) {
0318 atomic_inc(&external_name(dentry)->u.count);
0319 } else {
0320 memcpy(name->inline_name, dentry->d_iname,
0321 dentry->d_name.len + 1);
0322 name->name.name = name->inline_name;
0323 }
0324 spin_unlock(&dentry->d_lock);
0325 }
0326 EXPORT_SYMBOL(take_dentry_name_snapshot);
0327
0328 void release_dentry_name_snapshot(struct name_snapshot *name)
0329 {
0330 if (unlikely(name->name.name != name->inline_name)) {
0331 struct external_name *p;
0332 p = container_of(name->name.name, struct external_name, name[0]);
0333 if (unlikely(atomic_dec_and_test(&p->u.count)))
0334 kfree_rcu(p, u.head);
0335 }
0336 }
0337 EXPORT_SYMBOL(release_dentry_name_snapshot);
0338
0339 static inline void __d_set_inode_and_type(struct dentry *dentry,
0340 struct inode *inode,
0341 unsigned type_flags)
0342 {
0343 unsigned flags;
0344
0345 dentry->d_inode = inode;
0346 flags = READ_ONCE(dentry->d_flags);
0347 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
0348 flags |= type_flags;
0349 smp_store_release(&dentry->d_flags, flags);
0350 }
0351
0352 static inline void __d_clear_type_and_inode(struct dentry *dentry)
0353 {
0354 unsigned flags = READ_ONCE(dentry->d_flags);
0355
0356 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
0357 WRITE_ONCE(dentry->d_flags, flags);
0358 dentry->d_inode = NULL;
0359 if (dentry->d_flags & DCACHE_LRU_LIST)
0360 this_cpu_inc(nr_dentry_negative);
0361 }
0362
0363 static void dentry_free(struct dentry *dentry)
0364 {
0365 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
0366 if (unlikely(dname_external(dentry))) {
0367 struct external_name *p = external_name(dentry);
0368 if (likely(atomic_dec_and_test(&p->u.count))) {
0369 call_rcu(&dentry->d_u.d_rcu, __d_free_external);
0370 return;
0371 }
0372 }
0373
0374 if (dentry->d_flags & DCACHE_NORCU)
0375 __d_free(&dentry->d_u.d_rcu);
0376 else
0377 call_rcu(&dentry->d_u.d_rcu, __d_free);
0378 }
0379
0380
0381
0382
0383
0384 static void dentry_unlink_inode(struct dentry * dentry)
0385 __releases(dentry->d_lock)
0386 __releases(dentry->d_inode->i_lock)
0387 {
0388 struct inode *inode = dentry->d_inode;
0389
0390 raw_write_seqcount_begin(&dentry->d_seq);
0391 __d_clear_type_and_inode(dentry);
0392 hlist_del_init(&dentry->d_u.d_alias);
0393 raw_write_seqcount_end(&dentry->d_seq);
0394 spin_unlock(&dentry->d_lock);
0395 spin_unlock(&inode->i_lock);
0396 if (!inode->i_nlink)
0397 fsnotify_inoderemove(inode);
0398 if (dentry->d_op && dentry->d_op->d_iput)
0399 dentry->d_op->d_iput(dentry, inode);
0400 else
0401 iput(inode);
0402 }
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
0424 static void d_lru_add(struct dentry *dentry)
0425 {
0426 D_FLAG_VERIFY(dentry, 0);
0427 dentry->d_flags |= DCACHE_LRU_LIST;
0428 this_cpu_inc(nr_dentry_unused);
0429 if (d_is_negative(dentry))
0430 this_cpu_inc(nr_dentry_negative);
0431 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
0432 }
0433
0434 static void d_lru_del(struct dentry *dentry)
0435 {
0436 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
0437 dentry->d_flags &= ~DCACHE_LRU_LIST;
0438 this_cpu_dec(nr_dentry_unused);
0439 if (d_is_negative(dentry))
0440 this_cpu_dec(nr_dentry_negative);
0441 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
0442 }
0443
0444 static void d_shrink_del(struct dentry *dentry)
0445 {
0446 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
0447 list_del_init(&dentry->d_lru);
0448 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
0449 this_cpu_dec(nr_dentry_unused);
0450 }
0451
0452 static void d_shrink_add(struct dentry *dentry, struct list_head *list)
0453 {
0454 D_FLAG_VERIFY(dentry, 0);
0455 list_add(&dentry->d_lru, list);
0456 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
0457 this_cpu_inc(nr_dentry_unused);
0458 }
0459
0460
0461
0462
0463
0464
0465
0466 static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
0467 {
0468 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
0469 dentry->d_flags &= ~DCACHE_LRU_LIST;
0470 this_cpu_dec(nr_dentry_unused);
0471 if (d_is_negative(dentry))
0472 this_cpu_dec(nr_dentry_negative);
0473 list_lru_isolate(lru, &dentry->d_lru);
0474 }
0475
0476 static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
0477 struct list_head *list)
0478 {
0479 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
0480 dentry->d_flags |= DCACHE_SHRINK_LIST;
0481 if (d_is_negative(dentry))
0482 this_cpu_dec(nr_dentry_negative);
0483 list_lru_isolate_move(lru, &dentry->d_lru, list);
0484 }
0485
0486 static void ___d_drop(struct dentry *dentry)
0487 {
0488 struct hlist_bl_head *b;
0489
0490
0491
0492
0493
0494 if (unlikely(IS_ROOT(dentry)))
0495 b = &dentry->d_sb->s_roots;
0496 else
0497 b = d_hash(dentry->d_name.hash);
0498
0499 hlist_bl_lock(b);
0500 __hlist_bl_del(&dentry->d_hash);
0501 hlist_bl_unlock(b);
0502 }
0503
0504 void __d_drop(struct dentry *dentry)
0505 {
0506 if (!d_unhashed(dentry)) {
0507 ___d_drop(dentry);
0508 dentry->d_hash.pprev = NULL;
0509 write_seqcount_invalidate(&dentry->d_seq);
0510 }
0511 }
0512 EXPORT_SYMBOL(__d_drop);
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532 void d_drop(struct dentry *dentry)
0533 {
0534 spin_lock(&dentry->d_lock);
0535 __d_drop(dentry);
0536 spin_unlock(&dentry->d_lock);
0537 }
0538 EXPORT_SYMBOL(d_drop);
0539
0540 static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
0541 {
0542 struct dentry *next;
0543
0544
0545
0546
0547 dentry->d_flags |= DCACHE_DENTRY_KILLED;
0548 if (unlikely(list_empty(&dentry->d_child)))
0549 return;
0550 __list_del_entry(&dentry->d_child);
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570 while (dentry->d_child.next != &parent->d_subdirs) {
0571 next = list_entry(dentry->d_child.next, struct dentry, d_child);
0572 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
0573 break;
0574 dentry->d_child.next = next->d_child.next;
0575 }
0576 }
0577
0578 static void __dentry_kill(struct dentry *dentry)
0579 {
0580 struct dentry *parent = NULL;
0581 bool can_free = true;
0582 if (!IS_ROOT(dentry))
0583 parent = dentry->d_parent;
0584
0585
0586
0587
0588 lockref_mark_dead(&dentry->d_lockref);
0589
0590
0591
0592
0593
0594 if (dentry->d_flags & DCACHE_OP_PRUNE)
0595 dentry->d_op->d_prune(dentry);
0596
0597 if (dentry->d_flags & DCACHE_LRU_LIST) {
0598 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
0599 d_lru_del(dentry);
0600 }
0601
0602 __d_drop(dentry);
0603 dentry_unlist(dentry, parent);
0604 if (parent)
0605 spin_unlock(&parent->d_lock);
0606 if (dentry->d_inode)
0607 dentry_unlink_inode(dentry);
0608 else
0609 spin_unlock(&dentry->d_lock);
0610 this_cpu_dec(nr_dentry);
0611 if (dentry->d_op && dentry->d_op->d_release)
0612 dentry->d_op->d_release(dentry);
0613
0614 spin_lock(&dentry->d_lock);
0615 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
0616 dentry->d_flags |= DCACHE_MAY_FREE;
0617 can_free = false;
0618 }
0619 spin_unlock(&dentry->d_lock);
0620 if (likely(can_free))
0621 dentry_free(dentry);
0622 cond_resched();
0623 }
0624
0625 static struct dentry *__lock_parent(struct dentry *dentry)
0626 {
0627 struct dentry *parent;
0628 rcu_read_lock();
0629 spin_unlock(&dentry->d_lock);
0630 again:
0631 parent = READ_ONCE(dentry->d_parent);
0632 spin_lock(&parent->d_lock);
0633
0634
0635
0636
0637
0638
0639
0640
0641 if (unlikely(parent != dentry->d_parent)) {
0642 spin_unlock(&parent->d_lock);
0643 goto again;
0644 }
0645 rcu_read_unlock();
0646 if (parent != dentry)
0647 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
0648 else
0649 parent = NULL;
0650 return parent;
0651 }
0652
0653 static inline struct dentry *lock_parent(struct dentry *dentry)
0654 {
0655 struct dentry *parent = dentry->d_parent;
0656 if (IS_ROOT(dentry))
0657 return NULL;
0658 if (likely(spin_trylock(&parent->d_lock)))
0659 return parent;
0660 return __lock_parent(dentry);
0661 }
0662
0663 static inline bool retain_dentry(struct dentry *dentry)
0664 {
0665 WARN_ON(d_in_lookup(dentry));
0666
0667
0668 if (unlikely(d_unhashed(dentry)))
0669 return false;
0670
0671 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
0672 return false;
0673
0674 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
0675 if (dentry->d_op->d_delete(dentry))
0676 return false;
0677 }
0678
0679 if (unlikely(dentry->d_flags & DCACHE_DONTCACHE))
0680 return false;
0681
0682
0683 dentry->d_lockref.count--;
0684 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
0685 d_lru_add(dentry);
0686 else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED)))
0687 dentry->d_flags |= DCACHE_REFERENCED;
0688 return true;
0689 }
0690
0691 void d_mark_dontcache(struct inode *inode)
0692 {
0693 struct dentry *de;
0694
0695 spin_lock(&inode->i_lock);
0696 hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) {
0697 spin_lock(&de->d_lock);
0698 de->d_flags |= DCACHE_DONTCACHE;
0699 spin_unlock(&de->d_lock);
0700 }
0701 inode->i_state |= I_DONTCACHE;
0702 spin_unlock(&inode->i_lock);
0703 }
0704 EXPORT_SYMBOL(d_mark_dontcache);
0705
0706
0707
0708
0709
0710
0711 static struct dentry *dentry_kill(struct dentry *dentry)
0712 __releases(dentry->d_lock)
0713 {
0714 struct inode *inode = dentry->d_inode;
0715 struct dentry *parent = NULL;
0716
0717 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
0718 goto slow_positive;
0719
0720 if (!IS_ROOT(dentry)) {
0721 parent = dentry->d_parent;
0722 if (unlikely(!spin_trylock(&parent->d_lock))) {
0723 parent = __lock_parent(dentry);
0724 if (likely(inode || !dentry->d_inode))
0725 goto got_locks;
0726
0727 if (parent)
0728 spin_unlock(&parent->d_lock);
0729 inode = dentry->d_inode;
0730 goto slow_positive;
0731 }
0732 }
0733 __dentry_kill(dentry);
0734 return parent;
0735
0736 slow_positive:
0737 spin_unlock(&dentry->d_lock);
0738 spin_lock(&inode->i_lock);
0739 spin_lock(&dentry->d_lock);
0740 parent = lock_parent(dentry);
0741 got_locks:
0742 if (unlikely(dentry->d_lockref.count != 1)) {
0743 dentry->d_lockref.count--;
0744 } else if (likely(!retain_dentry(dentry))) {
0745 __dentry_kill(dentry);
0746 return parent;
0747 }
0748
0749 if (inode)
0750 spin_unlock(&inode->i_lock);
0751 if (parent)
0752 spin_unlock(&parent->d_lock);
0753 spin_unlock(&dentry->d_lock);
0754 return NULL;
0755 }
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765 static inline bool fast_dput(struct dentry *dentry)
0766 {
0767 int ret;
0768 unsigned int d_flags;
0769
0770
0771
0772
0773
0774 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
0775 return lockref_put_or_lock(&dentry->d_lockref);
0776
0777
0778
0779
0780
0781 ret = lockref_put_return(&dentry->d_lockref);
0782
0783
0784
0785
0786
0787
0788 if (unlikely(ret < 0)) {
0789 spin_lock(&dentry->d_lock);
0790 if (dentry->d_lockref.count > 1) {
0791 dentry->d_lockref.count--;
0792 spin_unlock(&dentry->d_lock);
0793 return true;
0794 }
0795 return false;
0796 }
0797
0798
0799
0800
0801 if (ret)
0802 return true;
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831 smp_rmb();
0832 d_flags = READ_ONCE(dentry->d_flags);
0833 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST |
0834 DCACHE_DISCONNECTED | DCACHE_DONTCACHE;
0835
0836
0837 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
0838 return true;
0839
0840
0841
0842
0843
0844
0845 spin_lock(&dentry->d_lock);
0846
0847
0848
0849
0850
0851
0852
0853 if (dentry->d_lockref.count) {
0854 spin_unlock(&dentry->d_lock);
0855 return true;
0856 }
0857
0858
0859
0860
0861
0862
0863 dentry->d_lockref.count = 1;
0864 return false;
0865 }
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894 void dput(struct dentry *dentry)
0895 {
0896 while (dentry) {
0897 might_sleep();
0898
0899 rcu_read_lock();
0900 if (likely(fast_dput(dentry))) {
0901 rcu_read_unlock();
0902 return;
0903 }
0904
0905
0906 rcu_read_unlock();
0907
0908 if (likely(retain_dentry(dentry))) {
0909 spin_unlock(&dentry->d_lock);
0910 return;
0911 }
0912
0913 dentry = dentry_kill(dentry);
0914 }
0915 }
0916 EXPORT_SYMBOL(dput);
0917
0918 static void __dput_to_list(struct dentry *dentry, struct list_head *list)
0919 __must_hold(&dentry->d_lock)
0920 {
0921 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
0922
0923 --dentry->d_lockref.count;
0924 } else {
0925 if (dentry->d_flags & DCACHE_LRU_LIST)
0926 d_lru_del(dentry);
0927 if (!--dentry->d_lockref.count)
0928 d_shrink_add(dentry, list);
0929 }
0930 }
0931
0932 void dput_to_list(struct dentry *dentry, struct list_head *list)
0933 {
0934 rcu_read_lock();
0935 if (likely(fast_dput(dentry))) {
0936 rcu_read_unlock();
0937 return;
0938 }
0939 rcu_read_unlock();
0940 if (!retain_dentry(dentry))
0941 __dput_to_list(dentry, list);
0942 spin_unlock(&dentry->d_lock);
0943 }
0944
0945
0946 static inline void __dget_dlock(struct dentry *dentry)
0947 {
0948 dentry->d_lockref.count++;
0949 }
0950
0951 static inline void __dget(struct dentry *dentry)
0952 {
0953 lockref_get(&dentry->d_lockref);
0954 }
0955
0956 struct dentry *dget_parent(struct dentry *dentry)
0957 {
0958 int gotref;
0959 struct dentry *ret;
0960 unsigned seq;
0961
0962
0963
0964
0965
0966 rcu_read_lock();
0967 seq = raw_seqcount_begin(&dentry->d_seq);
0968 ret = READ_ONCE(dentry->d_parent);
0969 gotref = lockref_get_not_zero(&ret->d_lockref);
0970 rcu_read_unlock();
0971 if (likely(gotref)) {
0972 if (!read_seqcount_retry(&dentry->d_seq, seq))
0973 return ret;
0974 dput(ret);
0975 }
0976
0977 repeat:
0978
0979
0980
0981
0982 rcu_read_lock();
0983 ret = dentry->d_parent;
0984 spin_lock(&ret->d_lock);
0985 if (unlikely(ret != dentry->d_parent)) {
0986 spin_unlock(&ret->d_lock);
0987 rcu_read_unlock();
0988 goto repeat;
0989 }
0990 rcu_read_unlock();
0991 BUG_ON(!ret->d_lockref.count);
0992 ret->d_lockref.count++;
0993 spin_unlock(&ret->d_lock);
0994 return ret;
0995 }
0996 EXPORT_SYMBOL(dget_parent);
0997
0998 static struct dentry * __d_find_any_alias(struct inode *inode)
0999 {
1000 struct dentry *alias;
1001
1002 if (hlist_empty(&inode->i_dentry))
1003 return NULL;
1004 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1005 __dget(alias);
1006 return alias;
1007 }
1008
1009
1010
1011
1012
1013
1014
1015
1016 struct dentry *d_find_any_alias(struct inode *inode)
1017 {
1018 struct dentry *de;
1019
1020 spin_lock(&inode->i_lock);
1021 de = __d_find_any_alias(inode);
1022 spin_unlock(&inode->i_lock);
1023 return de;
1024 }
1025 EXPORT_SYMBOL(d_find_any_alias);
1026
1027 static struct dentry *__d_find_alias(struct inode *inode)
1028 {
1029 struct dentry *alias;
1030
1031 if (S_ISDIR(inode->i_mode))
1032 return __d_find_any_alias(inode);
1033
1034 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
1035 spin_lock(&alias->d_lock);
1036 if (!d_unhashed(alias)) {
1037 __dget_dlock(alias);
1038 spin_unlock(&alias->d_lock);
1039 return alias;
1040 }
1041 spin_unlock(&alias->d_lock);
1042 }
1043 return NULL;
1044 }
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060 struct dentry *d_find_alias(struct inode *inode)
1061 {
1062 struct dentry *de = NULL;
1063
1064 if (!hlist_empty(&inode->i_dentry)) {
1065 spin_lock(&inode->i_lock);
1066 de = __d_find_alias(inode);
1067 spin_unlock(&inode->i_lock);
1068 }
1069 return de;
1070 }
1071 EXPORT_SYMBOL(d_find_alias);
1072
1073
1074
1075
1076
1077 struct dentry *d_find_alias_rcu(struct inode *inode)
1078 {
1079 struct hlist_head *l = &inode->i_dentry;
1080 struct dentry *de = NULL;
1081
1082 spin_lock(&inode->i_lock);
1083
1084
1085 if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) {
1086 if (S_ISDIR(inode->i_mode)) {
1087 de = hlist_entry(l->first, struct dentry, d_u.d_alias);
1088 } else {
1089 hlist_for_each_entry(de, l, d_u.d_alias)
1090 if (!d_unhashed(de))
1091 break;
1092 }
1093 }
1094 spin_unlock(&inode->i_lock);
1095 return de;
1096 }
1097
1098
1099
1100
1101
1102 void d_prune_aliases(struct inode *inode)
1103 {
1104 struct dentry *dentry;
1105 restart:
1106 spin_lock(&inode->i_lock);
1107 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1108 spin_lock(&dentry->d_lock);
1109 if (!dentry->d_lockref.count) {
1110 struct dentry *parent = lock_parent(dentry);
1111 if (likely(!dentry->d_lockref.count)) {
1112 __dentry_kill(dentry);
1113 dput(parent);
1114 goto restart;
1115 }
1116 if (parent)
1117 spin_unlock(&parent->d_lock);
1118 }
1119 spin_unlock(&dentry->d_lock);
1120 }
1121 spin_unlock(&inode->i_lock);
1122 }
1123 EXPORT_SYMBOL(d_prune_aliases);
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136 static bool shrink_lock_dentry(struct dentry *dentry)
1137 {
1138 struct inode *inode;
1139 struct dentry *parent;
1140
1141 if (dentry->d_lockref.count)
1142 return false;
1143
1144 inode = dentry->d_inode;
1145 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
1146 spin_unlock(&dentry->d_lock);
1147 spin_lock(&inode->i_lock);
1148 spin_lock(&dentry->d_lock);
1149 if (unlikely(dentry->d_lockref.count))
1150 goto out;
1151
1152 if (unlikely(inode != dentry->d_inode))
1153 goto out;
1154 }
1155
1156 parent = dentry->d_parent;
1157 if (IS_ROOT(dentry) || likely(spin_trylock(&parent->d_lock)))
1158 return true;
1159
1160 spin_unlock(&dentry->d_lock);
1161 spin_lock(&parent->d_lock);
1162 if (unlikely(parent != dentry->d_parent)) {
1163 spin_unlock(&parent->d_lock);
1164 spin_lock(&dentry->d_lock);
1165 goto out;
1166 }
1167 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1168 if (likely(!dentry->d_lockref.count))
1169 return true;
1170 spin_unlock(&parent->d_lock);
1171 out:
1172 if (inode)
1173 spin_unlock(&inode->i_lock);
1174 return false;
1175 }
1176
1177 void shrink_dentry_list(struct list_head *list)
1178 {
1179 while (!list_empty(list)) {
1180 struct dentry *dentry, *parent;
1181
1182 dentry = list_entry(list->prev, struct dentry, d_lru);
1183 spin_lock(&dentry->d_lock);
1184 rcu_read_lock();
1185 if (!shrink_lock_dentry(dentry)) {
1186 bool can_free = false;
1187 rcu_read_unlock();
1188 d_shrink_del(dentry);
1189 if (dentry->d_lockref.count < 0)
1190 can_free = dentry->d_flags & DCACHE_MAY_FREE;
1191 spin_unlock(&dentry->d_lock);
1192 if (can_free)
1193 dentry_free(dentry);
1194 continue;
1195 }
1196 rcu_read_unlock();
1197 d_shrink_del(dentry);
1198 parent = dentry->d_parent;
1199 if (parent != dentry)
1200 __dput_to_list(parent, list);
1201 __dentry_kill(dentry);
1202 }
1203 }
1204
1205 static enum lru_status dentry_lru_isolate(struct list_head *item,
1206 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1207 {
1208 struct list_head *freeable = arg;
1209 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1210
1211
1212
1213
1214
1215
1216
1217 if (!spin_trylock(&dentry->d_lock))
1218 return LRU_SKIP;
1219
1220
1221
1222
1223
1224
1225 if (dentry->d_lockref.count) {
1226 d_lru_isolate(lru, dentry);
1227 spin_unlock(&dentry->d_lock);
1228 return LRU_REMOVED;
1229 }
1230
1231 if (dentry->d_flags & DCACHE_REFERENCED) {
1232 dentry->d_flags &= ~DCACHE_REFERENCED;
1233 spin_unlock(&dentry->d_lock);
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254 return LRU_ROTATE;
1255 }
1256
1257 d_lru_shrink_move(lru, dentry, freeable);
1258 spin_unlock(&dentry->d_lock);
1259
1260 return LRU_REMOVED;
1261 }
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275 long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1276 {
1277 LIST_HEAD(dispose);
1278 long freed;
1279
1280 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1281 dentry_lru_isolate, &dispose);
1282 shrink_dentry_list(&dispose);
1283 return freed;
1284 }
1285
1286 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1287 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1288 {
1289 struct list_head *freeable = arg;
1290 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1291
1292
1293
1294
1295
1296
1297 if (!spin_trylock(&dentry->d_lock))
1298 return LRU_SKIP;
1299
1300 d_lru_shrink_move(lru, dentry, freeable);
1301 spin_unlock(&dentry->d_lock);
1302
1303 return LRU_REMOVED;
1304 }
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314 void shrink_dcache_sb(struct super_block *sb)
1315 {
1316 do {
1317 LIST_HEAD(dispose);
1318
1319 list_lru_walk(&sb->s_dentry_lru,
1320 dentry_lru_isolate_shrink, &dispose, 1024);
1321 shrink_dentry_list(&dispose);
1322 } while (list_lru_count(&sb->s_dentry_lru) > 0);
1323 }
1324 EXPORT_SYMBOL(shrink_dcache_sb);
1325
1326
1327
1328
1329
1330
1331
1332
1333 enum d_walk_ret {
1334 D_WALK_CONTINUE,
1335 D_WALK_QUIT,
1336 D_WALK_NORETRY,
1337 D_WALK_SKIP,
1338 };
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348 static void d_walk(struct dentry *parent, void *data,
1349 enum d_walk_ret (*enter)(void *, struct dentry *))
1350 {
1351 struct dentry *this_parent;
1352 struct list_head *next;
1353 unsigned seq = 0;
1354 enum d_walk_ret ret;
1355 bool retry = true;
1356
1357 again:
1358 read_seqbegin_or_lock(&rename_lock, &seq);
1359 this_parent = parent;
1360 spin_lock(&this_parent->d_lock);
1361
1362 ret = enter(data, this_parent);
1363 switch (ret) {
1364 case D_WALK_CONTINUE:
1365 break;
1366 case D_WALK_QUIT:
1367 case D_WALK_SKIP:
1368 goto out_unlock;
1369 case D_WALK_NORETRY:
1370 retry = false;
1371 break;
1372 }
1373 repeat:
1374 next = this_parent->d_subdirs.next;
1375 resume:
1376 while (next != &this_parent->d_subdirs) {
1377 struct list_head *tmp = next;
1378 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1379 next = tmp->next;
1380
1381 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1382 continue;
1383
1384 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1385
1386 ret = enter(data, dentry);
1387 switch (ret) {
1388 case D_WALK_CONTINUE:
1389 break;
1390 case D_WALK_QUIT:
1391 spin_unlock(&dentry->d_lock);
1392 goto out_unlock;
1393 case D_WALK_NORETRY:
1394 retry = false;
1395 break;
1396 case D_WALK_SKIP:
1397 spin_unlock(&dentry->d_lock);
1398 continue;
1399 }
1400
1401 if (!list_empty(&dentry->d_subdirs)) {
1402 spin_unlock(&this_parent->d_lock);
1403 spin_release(&dentry->d_lock.dep_map, _RET_IP_);
1404 this_parent = dentry;
1405 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1406 goto repeat;
1407 }
1408 spin_unlock(&dentry->d_lock);
1409 }
1410
1411
1412
1413 rcu_read_lock();
1414 ascend:
1415 if (this_parent != parent) {
1416 struct dentry *child = this_parent;
1417 this_parent = child->d_parent;
1418
1419 spin_unlock(&child->d_lock);
1420 spin_lock(&this_parent->d_lock);
1421
1422
1423 if (need_seqretry(&rename_lock, seq))
1424 goto rename_retry;
1425
1426 do {
1427 next = child->d_child.next;
1428 if (next == &this_parent->d_subdirs)
1429 goto ascend;
1430 child = list_entry(next, struct dentry, d_child);
1431 } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1432 rcu_read_unlock();
1433 goto resume;
1434 }
1435 if (need_seqretry(&rename_lock, seq))
1436 goto rename_retry;
1437 rcu_read_unlock();
1438
1439 out_unlock:
1440 spin_unlock(&this_parent->d_lock);
1441 done_seqretry(&rename_lock, seq);
1442 return;
1443
1444 rename_retry:
1445 spin_unlock(&this_parent->d_lock);
1446 rcu_read_unlock();
1447 BUG_ON(seq & 1);
1448 if (!retry)
1449 return;
1450 seq = 1;
1451 goto again;
1452 }
1453
1454 struct check_mount {
1455 struct vfsmount *mnt;
1456 unsigned int mounted;
1457 };
1458
1459 static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1460 {
1461 struct check_mount *info = data;
1462 struct path path = { .mnt = info->mnt, .dentry = dentry };
1463
1464 if (likely(!d_mountpoint(dentry)))
1465 return D_WALK_CONTINUE;
1466 if (__path_is_mountpoint(&path)) {
1467 info->mounted = 1;
1468 return D_WALK_QUIT;
1469 }
1470 return D_WALK_CONTINUE;
1471 }
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481 int path_has_submounts(const struct path *parent)
1482 {
1483 struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1484
1485 read_seqlock_excl(&mount_lock);
1486 d_walk(parent->dentry, &data, path_check_mount);
1487 read_sequnlock_excl(&mount_lock);
1488
1489 return data.mounted;
1490 }
1491 EXPORT_SYMBOL(path_has_submounts);
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501 int d_set_mounted(struct dentry *dentry)
1502 {
1503 struct dentry *p;
1504 int ret = -ENOENT;
1505 write_seqlock(&rename_lock);
1506 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1507
1508 spin_lock(&p->d_lock);
1509 if (unlikely(d_unhashed(p))) {
1510 spin_unlock(&p->d_lock);
1511 goto out;
1512 }
1513 spin_unlock(&p->d_lock);
1514 }
1515 spin_lock(&dentry->d_lock);
1516 if (!d_unlinked(dentry)) {
1517 ret = -EBUSY;
1518 if (!d_mountpoint(dentry)) {
1519 dentry->d_flags |= DCACHE_MOUNTED;
1520 ret = 0;
1521 }
1522 }
1523 spin_unlock(&dentry->d_lock);
1524 out:
1525 write_sequnlock(&rename_lock);
1526 return ret;
1527 }
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544 struct select_data {
1545 struct dentry *start;
1546 union {
1547 long found;
1548 struct dentry *victim;
1549 };
1550 struct list_head dispose;
1551 };
1552
1553 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1554 {
1555 struct select_data *data = _data;
1556 enum d_walk_ret ret = D_WALK_CONTINUE;
1557
1558 if (data->start == dentry)
1559 goto out;
1560
1561 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1562 data->found++;
1563 } else {
1564 if (dentry->d_flags & DCACHE_LRU_LIST)
1565 d_lru_del(dentry);
1566 if (!dentry->d_lockref.count) {
1567 d_shrink_add(dentry, &data->dispose);
1568 data->found++;
1569 }
1570 }
1571
1572
1573
1574
1575
1576 if (!list_empty(&data->dispose))
1577 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1578 out:
1579 return ret;
1580 }
1581
1582 static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
1583 {
1584 struct select_data *data = _data;
1585 enum d_walk_ret ret = D_WALK_CONTINUE;
1586
1587 if (data->start == dentry)
1588 goto out;
1589
1590 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1591 if (!dentry->d_lockref.count) {
1592 rcu_read_lock();
1593 data->victim = dentry;
1594 return D_WALK_QUIT;
1595 }
1596 } else {
1597 if (dentry->d_flags & DCACHE_LRU_LIST)
1598 d_lru_del(dentry);
1599 if (!dentry->d_lockref.count)
1600 d_shrink_add(dentry, &data->dispose);
1601 }
1602
1603
1604
1605
1606
1607 if (!list_empty(&data->dispose))
1608 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1609 out:
1610 return ret;
1611 }
1612
1613
1614
1615
1616
1617
1618
1619 void shrink_dcache_parent(struct dentry *parent)
1620 {
1621 for (;;) {
1622 struct select_data data = {.start = parent};
1623
1624 INIT_LIST_HEAD(&data.dispose);
1625 d_walk(parent, &data, select_collect);
1626
1627 if (!list_empty(&data.dispose)) {
1628 shrink_dentry_list(&data.dispose);
1629 continue;
1630 }
1631
1632 cond_resched();
1633 if (!data.found)
1634 break;
1635 data.victim = NULL;
1636 d_walk(parent, &data, select_collect2);
1637 if (data.victim) {
1638 struct dentry *parent;
1639 spin_lock(&data.victim->d_lock);
1640 if (!shrink_lock_dentry(data.victim)) {
1641 spin_unlock(&data.victim->d_lock);
1642 rcu_read_unlock();
1643 } else {
1644 rcu_read_unlock();
1645 parent = data.victim->d_parent;
1646 if (parent != data.victim)
1647 __dput_to_list(parent, &data.dispose);
1648 __dentry_kill(data.victim);
1649 }
1650 }
1651 if (!list_empty(&data.dispose))
1652 shrink_dentry_list(&data.dispose);
1653 }
1654 }
1655 EXPORT_SYMBOL(shrink_dcache_parent);
1656
1657 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1658 {
1659
1660 if (!list_empty(&dentry->d_subdirs))
1661 return D_WALK_CONTINUE;
1662
1663
1664 if (dentry == _data && dentry->d_lockref.count == 1)
1665 return D_WALK_CONTINUE;
1666
1667 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1668 " still in use (%d) [unmount of %s %s]\n",
1669 dentry,
1670 dentry->d_inode ?
1671 dentry->d_inode->i_ino : 0UL,
1672 dentry,
1673 dentry->d_lockref.count,
1674 dentry->d_sb->s_type->name,
1675 dentry->d_sb->s_id);
1676 WARN_ON(1);
1677 return D_WALK_CONTINUE;
1678 }
1679
1680 static void do_one_tree(struct dentry *dentry)
1681 {
1682 shrink_dcache_parent(dentry);
1683 d_walk(dentry, dentry, umount_check);
1684 d_drop(dentry);
1685 dput(dentry);
1686 }
1687
1688
1689
1690
1691 void shrink_dcache_for_umount(struct super_block *sb)
1692 {
1693 struct dentry *dentry;
1694
1695 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1696
1697 dentry = sb->s_root;
1698 sb->s_root = NULL;
1699 do_one_tree(dentry);
1700
1701 while (!hlist_bl_empty(&sb->s_roots)) {
1702 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1703 do_one_tree(dentry);
1704 }
1705 }
1706
1707 static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
1708 {
1709 struct dentry **victim = _data;
1710 if (d_mountpoint(dentry)) {
1711 __dget_dlock(dentry);
1712 *victim = dentry;
1713 return D_WALK_QUIT;
1714 }
1715 return D_WALK_CONTINUE;
1716 }
1717
1718
1719
1720
1721
1722 void d_invalidate(struct dentry *dentry)
1723 {
1724 bool had_submounts = false;
1725 spin_lock(&dentry->d_lock);
1726 if (d_unhashed(dentry)) {
1727 spin_unlock(&dentry->d_lock);
1728 return;
1729 }
1730 __d_drop(dentry);
1731 spin_unlock(&dentry->d_lock);
1732
1733
1734 if (!dentry->d_inode)
1735 return;
1736
1737 shrink_dcache_parent(dentry);
1738 for (;;) {
1739 struct dentry *victim = NULL;
1740 d_walk(dentry, &victim, find_submount);
1741 if (!victim) {
1742 if (had_submounts)
1743 shrink_dcache_parent(dentry);
1744 return;
1745 }
1746 had_submounts = true;
1747 detach_mounts(victim);
1748 dput(victim);
1749 }
1750 }
1751 EXPORT_SYMBOL(d_invalidate);
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763 static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1764 {
1765 struct dentry *dentry;
1766 char *dname;
1767 int err;
1768
1769 dentry = kmem_cache_alloc_lru(dentry_cache, &sb->s_dentry_lru,
1770 GFP_KERNEL);
1771 if (!dentry)
1772 return NULL;
1773
1774
1775
1776
1777
1778
1779
1780 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1781 if (unlikely(!name)) {
1782 name = &slash_name;
1783 dname = dentry->d_iname;
1784 } else if (name->len > DNAME_INLINE_LEN-1) {
1785 size_t size = offsetof(struct external_name, name[1]);
1786 struct external_name *p = kmalloc(size + name->len,
1787 GFP_KERNEL_ACCOUNT |
1788 __GFP_RECLAIMABLE);
1789 if (!p) {
1790 kmem_cache_free(dentry_cache, dentry);
1791 return NULL;
1792 }
1793 atomic_set(&p->u.count, 1);
1794 dname = p->name;
1795 } else {
1796 dname = dentry->d_iname;
1797 }
1798
1799 dentry->d_name.len = name->len;
1800 dentry->d_name.hash = name->hash;
1801 memcpy(dname, name->name, name->len);
1802 dname[name->len] = 0;
1803
1804
1805 smp_store_release(&dentry->d_name.name, dname);
1806
1807 dentry->d_lockref.count = 1;
1808 dentry->d_flags = 0;
1809 spin_lock_init(&dentry->d_lock);
1810 seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
1811 dentry->d_inode = NULL;
1812 dentry->d_parent = dentry;
1813 dentry->d_sb = sb;
1814 dentry->d_op = NULL;
1815 dentry->d_fsdata = NULL;
1816 INIT_HLIST_BL_NODE(&dentry->d_hash);
1817 INIT_LIST_HEAD(&dentry->d_lru);
1818 INIT_LIST_HEAD(&dentry->d_subdirs);
1819 INIT_HLIST_NODE(&dentry->d_u.d_alias);
1820 INIT_LIST_HEAD(&dentry->d_child);
1821 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1822
1823 if (dentry->d_op && dentry->d_op->d_init) {
1824 err = dentry->d_op->d_init(dentry);
1825 if (err) {
1826 if (dname_external(dentry))
1827 kfree(external_name(dentry));
1828 kmem_cache_free(dentry_cache, dentry);
1829 return NULL;
1830 }
1831 }
1832
1833 this_cpu_inc(nr_dentry);
1834
1835 return dentry;
1836 }
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1848 {
1849 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1850 if (!dentry)
1851 return NULL;
1852 spin_lock(&parent->d_lock);
1853
1854
1855
1856
1857 __dget_dlock(parent);
1858 dentry->d_parent = parent;
1859 list_add(&dentry->d_child, &parent->d_subdirs);
1860 spin_unlock(&parent->d_lock);
1861
1862 return dentry;
1863 }
1864 EXPORT_SYMBOL(d_alloc);
1865
1866 struct dentry *d_alloc_anon(struct super_block *sb)
1867 {
1868 return __d_alloc(sb, NULL);
1869 }
1870 EXPORT_SYMBOL(d_alloc_anon);
1871
1872 struct dentry *d_alloc_cursor(struct dentry * parent)
1873 {
1874 struct dentry *dentry = d_alloc_anon(parent->d_sb);
1875 if (dentry) {
1876 dentry->d_flags |= DCACHE_DENTRY_CURSOR;
1877 dentry->d_parent = dget(parent);
1878 }
1879 return dentry;
1880 }
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1898 {
1899 struct dentry *dentry = __d_alloc(sb, name);
1900 if (likely(dentry))
1901 dentry->d_flags |= DCACHE_NORCU;
1902 return dentry;
1903 }
1904
1905 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1906 {
1907 struct qstr q;
1908
1909 q.name = name;
1910 q.hash_len = hashlen_string(parent, name);
1911 return d_alloc(parent, &q);
1912 }
1913 EXPORT_SYMBOL(d_alloc_name);
1914
1915 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1916 {
1917 WARN_ON_ONCE(dentry->d_op);
1918 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1919 DCACHE_OP_COMPARE |
1920 DCACHE_OP_REVALIDATE |
1921 DCACHE_OP_WEAK_REVALIDATE |
1922 DCACHE_OP_DELETE |
1923 DCACHE_OP_REAL));
1924 dentry->d_op = op;
1925 if (!op)
1926 return;
1927 if (op->d_hash)
1928 dentry->d_flags |= DCACHE_OP_HASH;
1929 if (op->d_compare)
1930 dentry->d_flags |= DCACHE_OP_COMPARE;
1931 if (op->d_revalidate)
1932 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1933 if (op->d_weak_revalidate)
1934 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1935 if (op->d_delete)
1936 dentry->d_flags |= DCACHE_OP_DELETE;
1937 if (op->d_prune)
1938 dentry->d_flags |= DCACHE_OP_PRUNE;
1939 if (op->d_real)
1940 dentry->d_flags |= DCACHE_OP_REAL;
1941
1942 }
1943 EXPORT_SYMBOL(d_set_d_op);
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953 void d_set_fallthru(struct dentry *dentry)
1954 {
1955 spin_lock(&dentry->d_lock);
1956 dentry->d_flags |= DCACHE_FALLTHRU;
1957 spin_unlock(&dentry->d_lock);
1958 }
1959 EXPORT_SYMBOL(d_set_fallthru);
1960
1961 static unsigned d_flags_for_inode(struct inode *inode)
1962 {
1963 unsigned add_flags = DCACHE_REGULAR_TYPE;
1964
1965 if (!inode)
1966 return DCACHE_MISS_TYPE;
1967
1968 if (S_ISDIR(inode->i_mode)) {
1969 add_flags = DCACHE_DIRECTORY_TYPE;
1970 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1971 if (unlikely(!inode->i_op->lookup))
1972 add_flags = DCACHE_AUTODIR_TYPE;
1973 else
1974 inode->i_opflags |= IOP_LOOKUP;
1975 }
1976 goto type_determined;
1977 }
1978
1979 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1980 if (unlikely(inode->i_op->get_link)) {
1981 add_flags = DCACHE_SYMLINK_TYPE;
1982 goto type_determined;
1983 }
1984 inode->i_opflags |= IOP_NOFOLLOW;
1985 }
1986
1987 if (unlikely(!S_ISREG(inode->i_mode)))
1988 add_flags = DCACHE_SPECIAL_TYPE;
1989
1990 type_determined:
1991 if (unlikely(IS_AUTOMOUNT(inode)))
1992 add_flags |= DCACHE_NEED_AUTOMOUNT;
1993 return add_flags;
1994 }
1995
1996 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1997 {
1998 unsigned add_flags = d_flags_for_inode(inode);
1999 WARN_ON(d_in_lookup(dentry));
2000
2001 spin_lock(&dentry->d_lock);
2002
2003
2004
2005 if (dentry->d_flags & DCACHE_LRU_LIST)
2006 this_cpu_dec(nr_dentry_negative);
2007 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2008 raw_write_seqcount_begin(&dentry->d_seq);
2009 __d_set_inode_and_type(dentry, inode, add_flags);
2010 raw_write_seqcount_end(&dentry->d_seq);
2011 fsnotify_update_flags(dentry);
2012 spin_unlock(&dentry->d_lock);
2013 }
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030 void d_instantiate(struct dentry *entry, struct inode * inode)
2031 {
2032 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
2033 if (inode) {
2034 security_d_instantiate(entry, inode);
2035 spin_lock(&inode->i_lock);
2036 __d_instantiate(entry, inode);
2037 spin_unlock(&inode->i_lock);
2038 }
2039 }
2040 EXPORT_SYMBOL(d_instantiate);
2041
2042
2043
2044
2045
2046
2047
2048 void d_instantiate_new(struct dentry *entry, struct inode *inode)
2049 {
2050 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
2051 BUG_ON(!inode);
2052 lockdep_annotate_inode_mutex_key(inode);
2053 security_d_instantiate(entry, inode);
2054 spin_lock(&inode->i_lock);
2055 __d_instantiate(entry, inode);
2056 WARN_ON(!(inode->i_state & I_NEW));
2057 inode->i_state &= ~I_NEW & ~I_CREATING;
2058 smp_mb();
2059 wake_up_bit(&inode->i_state, __I_NEW);
2060 spin_unlock(&inode->i_lock);
2061 }
2062 EXPORT_SYMBOL(d_instantiate_new);
2063
2064 struct dentry *d_make_root(struct inode *root_inode)
2065 {
2066 struct dentry *res = NULL;
2067
2068 if (root_inode) {
2069 res = d_alloc_anon(root_inode->i_sb);
2070 if (res)
2071 d_instantiate(res, root_inode);
2072 else
2073 iput(root_inode);
2074 }
2075 return res;
2076 }
2077 EXPORT_SYMBOL(d_make_root);
2078
2079 static struct dentry *__d_instantiate_anon(struct dentry *dentry,
2080 struct inode *inode,
2081 bool disconnected)
2082 {
2083 struct dentry *res;
2084 unsigned add_flags;
2085
2086 security_d_instantiate(dentry, inode);
2087 spin_lock(&inode->i_lock);
2088 res = __d_find_any_alias(inode);
2089 if (res) {
2090 spin_unlock(&inode->i_lock);
2091 dput(dentry);
2092 goto out_iput;
2093 }
2094
2095
2096 add_flags = d_flags_for_inode(inode);
2097
2098 if (disconnected)
2099 add_flags |= DCACHE_DISCONNECTED;
2100
2101 spin_lock(&dentry->d_lock);
2102 __d_set_inode_and_type(dentry, inode, add_flags);
2103 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2104 if (!disconnected) {
2105 hlist_bl_lock(&dentry->d_sb->s_roots);
2106 hlist_bl_add_head(&dentry->d_hash, &dentry->d_sb->s_roots);
2107 hlist_bl_unlock(&dentry->d_sb->s_roots);
2108 }
2109 spin_unlock(&dentry->d_lock);
2110 spin_unlock(&inode->i_lock);
2111
2112 return dentry;
2113
2114 out_iput:
2115 iput(inode);
2116 return res;
2117 }
2118
2119 struct dentry *d_instantiate_anon(struct dentry *dentry, struct inode *inode)
2120 {
2121 return __d_instantiate_anon(dentry, inode, true);
2122 }
2123 EXPORT_SYMBOL(d_instantiate_anon);
2124
2125 static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
2126 {
2127 struct dentry *tmp;
2128 struct dentry *res;
2129
2130 if (!inode)
2131 return ERR_PTR(-ESTALE);
2132 if (IS_ERR(inode))
2133 return ERR_CAST(inode);
2134
2135 res = d_find_any_alias(inode);
2136 if (res)
2137 goto out_iput;
2138
2139 tmp = d_alloc_anon(inode->i_sb);
2140 if (!tmp) {
2141 res = ERR_PTR(-ENOMEM);
2142 goto out_iput;
2143 }
2144
2145 return __d_instantiate_anon(tmp, inode, disconnected);
2146
2147 out_iput:
2148 iput(inode);
2149 return res;
2150 }
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170 struct dentry *d_obtain_alias(struct inode *inode)
2171 {
2172 return __d_obtain_alias(inode, true);
2173 }
2174 EXPORT_SYMBOL(d_obtain_alias);
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191 struct dentry *d_obtain_root(struct inode *inode)
2192 {
2193 return __d_obtain_alias(inode, false);
2194 }
2195 EXPORT_SYMBOL(d_obtain_root);
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2214 struct qstr *name)
2215 {
2216 struct dentry *found, *res;
2217
2218
2219
2220
2221
2222 found = d_hash_and_lookup(dentry->d_parent, name);
2223 if (found) {
2224 iput(inode);
2225 return found;
2226 }
2227 if (d_in_lookup(dentry)) {
2228 found = d_alloc_parallel(dentry->d_parent, name,
2229 dentry->d_wait);
2230 if (IS_ERR(found) || !d_in_lookup(found)) {
2231 iput(inode);
2232 return found;
2233 }
2234 } else {
2235 found = d_alloc(dentry->d_parent, name);
2236 if (!found) {
2237 iput(inode);
2238 return ERR_PTR(-ENOMEM);
2239 }
2240 }
2241 res = d_splice_alias(inode, found);
2242 if (res) {
2243 d_lookup_done(found);
2244 dput(found);
2245 return res;
2246 }
2247 return found;
2248 }
2249 EXPORT_SYMBOL(d_add_ci);
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259 bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
2260 const struct qstr *name)
2261 {
2262 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2263 if (dentry->d_name.len != name->len)
2264 return false;
2265 return dentry_cmp(dentry, name->name, name->len) == 0;
2266 }
2267 return parent->d_op->d_compare(dentry,
2268 dentry->d_name.len, dentry->d_name.name,
2269 name) == 0;
2270 }
2271 EXPORT_SYMBOL_GPL(d_same_name);
2272
2273
2274
2275
2276
2277 static noinline struct dentry *__d_lookup_rcu_op_compare(
2278 const struct dentry *parent,
2279 const struct qstr *name,
2280 unsigned *seqp)
2281 {
2282 u64 hashlen = name->hash_len;
2283 struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2284 struct hlist_bl_node *node;
2285 struct dentry *dentry;
2286
2287 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2288 int tlen;
2289 const char *tname;
2290 unsigned seq;
2291
2292 seqretry:
2293 seq = raw_seqcount_begin(&dentry->d_seq);
2294 if (dentry->d_parent != parent)
2295 continue;
2296 if (d_unhashed(dentry))
2297 continue;
2298 if (dentry->d_name.hash != hashlen_hash(hashlen))
2299 continue;
2300 tlen = dentry->d_name.len;
2301 tname = dentry->d_name.name;
2302
2303 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2304 cpu_relax();
2305 goto seqretry;
2306 }
2307 if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0)
2308 continue;
2309 *seqp = seq;
2310 return dentry;
2311 }
2312 return NULL;
2313 }
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344 struct dentry *__d_lookup_rcu(const struct dentry *parent,
2345 const struct qstr *name,
2346 unsigned *seqp)
2347 {
2348 u64 hashlen = name->hash_len;
2349 const unsigned char *str = name->name;
2350 struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2351 struct hlist_bl_node *node;
2352 struct dentry *dentry;
2353
2354
2355
2356
2357
2358
2359
2360
2361 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE))
2362 return __d_lookup_rcu_op_compare(parent, name, seqp);
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2378 unsigned seq;
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397 seq = raw_seqcount_begin(&dentry->d_seq);
2398 if (dentry->d_parent != parent)
2399 continue;
2400 if (d_unhashed(dentry))
2401 continue;
2402 if (dentry->d_name.hash_len != hashlen)
2403 continue;
2404 if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2405 continue;
2406 *seqp = seq;
2407 return dentry;
2408 }
2409 return NULL;
2410 }
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2424 {
2425 struct dentry *dentry;
2426 unsigned seq;
2427
2428 do {
2429 seq = read_seqbegin(&rename_lock);
2430 dentry = __d_lookup(parent, name);
2431 if (dentry)
2432 break;
2433 } while (read_seqretry(&rename_lock, seq));
2434 return dentry;
2435 }
2436 EXPORT_SYMBOL(d_lookup);
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2454 {
2455 unsigned int hash = name->hash;
2456 struct hlist_bl_head *b = d_hash(hash);
2457 struct hlist_bl_node *node;
2458 struct dentry *found = NULL;
2459 struct dentry *dentry;
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481 rcu_read_lock();
2482
2483 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2484
2485 if (dentry->d_name.hash != hash)
2486 continue;
2487
2488 spin_lock(&dentry->d_lock);
2489 if (dentry->d_parent != parent)
2490 goto next;
2491 if (d_unhashed(dentry))
2492 goto next;
2493
2494 if (!d_same_name(dentry, parent, name))
2495 goto next;
2496
2497 dentry->d_lockref.count++;
2498 found = dentry;
2499 spin_unlock(&dentry->d_lock);
2500 break;
2501 next:
2502 spin_unlock(&dentry->d_lock);
2503 }
2504 rcu_read_unlock();
2505
2506 return found;
2507 }
2508
2509
2510
2511
2512
2513
2514
2515
2516 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2517 {
2518
2519
2520
2521
2522
2523 name->hash = full_name_hash(dir, name->name, name->len);
2524 if (dir->d_flags & DCACHE_OP_HASH) {
2525 int err = dir->d_op->d_hash(dir, name);
2526 if (unlikely(err < 0))
2527 return ERR_PTR(err);
2528 }
2529 return d_lookup(dir, name);
2530 }
2531 EXPORT_SYMBOL(d_hash_and_lookup);
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554 void d_delete(struct dentry * dentry)
2555 {
2556 struct inode *inode = dentry->d_inode;
2557
2558 spin_lock(&inode->i_lock);
2559 spin_lock(&dentry->d_lock);
2560
2561
2562
2563 if (dentry->d_lockref.count == 1) {
2564 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2565 dentry_unlink_inode(dentry);
2566 } else {
2567 __d_drop(dentry);
2568 spin_unlock(&dentry->d_lock);
2569 spin_unlock(&inode->i_lock);
2570 }
2571 }
2572 EXPORT_SYMBOL(d_delete);
2573
2574 static void __d_rehash(struct dentry *entry)
2575 {
2576 struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2577
2578 hlist_bl_lock(b);
2579 hlist_bl_add_head_rcu(&entry->d_hash, b);
2580 hlist_bl_unlock(b);
2581 }
2582
2583
2584
2585
2586
2587
2588
2589
2590 void d_rehash(struct dentry * entry)
2591 {
2592 spin_lock(&entry->d_lock);
2593 __d_rehash(entry);
2594 spin_unlock(&entry->d_lock);
2595 }
2596 EXPORT_SYMBOL(d_rehash);
2597
2598 static inline unsigned start_dir_add(struct inode *dir)
2599 {
2600
2601
2602
2603
2604
2605
2606
2607 if (IS_ENABLED(CONFIG_PREEMPT_RT))
2608 preempt_disable();
2609 for (;;) {
2610 unsigned n = dir->i_dir_seq;
2611 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2612 return n;
2613 cpu_relax();
2614 }
2615 }
2616
2617 static inline void end_dir_add(struct inode *dir, unsigned int n,
2618 wait_queue_head_t *d_wait)
2619 {
2620 smp_store_release(&dir->i_dir_seq, n + 2);
2621 if (IS_ENABLED(CONFIG_PREEMPT_RT))
2622 preempt_enable();
2623 wake_up_all(d_wait);
2624 }
2625
2626 static void d_wait_lookup(struct dentry *dentry)
2627 {
2628 if (d_in_lookup(dentry)) {
2629 DECLARE_WAITQUEUE(wait, current);
2630 add_wait_queue(dentry->d_wait, &wait);
2631 do {
2632 set_current_state(TASK_UNINTERRUPTIBLE);
2633 spin_unlock(&dentry->d_lock);
2634 schedule();
2635 spin_lock(&dentry->d_lock);
2636 } while (d_in_lookup(dentry));
2637 }
2638 }
2639
2640 struct dentry *d_alloc_parallel(struct dentry *parent,
2641 const struct qstr *name,
2642 wait_queue_head_t *wq)
2643 {
2644 unsigned int hash = name->hash;
2645 struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2646 struct hlist_bl_node *node;
2647 struct dentry *new = d_alloc(parent, name);
2648 struct dentry *dentry;
2649 unsigned seq, r_seq, d_seq;
2650
2651 if (unlikely(!new))
2652 return ERR_PTR(-ENOMEM);
2653
2654 retry:
2655 rcu_read_lock();
2656 seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2657 r_seq = read_seqbegin(&rename_lock);
2658 dentry = __d_lookup_rcu(parent, name, &d_seq);
2659 if (unlikely(dentry)) {
2660 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2661 rcu_read_unlock();
2662 goto retry;
2663 }
2664 if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2665 rcu_read_unlock();
2666 dput(dentry);
2667 goto retry;
2668 }
2669 rcu_read_unlock();
2670 dput(new);
2671 return dentry;
2672 }
2673 if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2674 rcu_read_unlock();
2675 goto retry;
2676 }
2677
2678 if (unlikely(seq & 1)) {
2679 rcu_read_unlock();
2680 goto retry;
2681 }
2682
2683 hlist_bl_lock(b);
2684 if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2685 hlist_bl_unlock(b);
2686 rcu_read_unlock();
2687 goto retry;
2688 }
2689
2690
2691
2692
2693
2694
2695
2696 hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2697 if (dentry->d_name.hash != hash)
2698 continue;
2699 if (dentry->d_parent != parent)
2700 continue;
2701 if (!d_same_name(dentry, parent, name))
2702 continue;
2703 hlist_bl_unlock(b);
2704
2705 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2706 rcu_read_unlock();
2707 goto retry;
2708 }
2709
2710 rcu_read_unlock();
2711
2712
2713
2714
2715 spin_lock(&dentry->d_lock);
2716 d_wait_lookup(dentry);
2717
2718
2719
2720
2721
2722
2723 if (unlikely(dentry->d_name.hash != hash))
2724 goto mismatch;
2725 if (unlikely(dentry->d_parent != parent))
2726 goto mismatch;
2727 if (unlikely(d_unhashed(dentry)))
2728 goto mismatch;
2729 if (unlikely(!d_same_name(dentry, parent, name)))
2730 goto mismatch;
2731
2732 spin_unlock(&dentry->d_lock);
2733 dput(new);
2734 return dentry;
2735 }
2736 rcu_read_unlock();
2737
2738 new->d_flags |= DCACHE_PAR_LOOKUP;
2739 new->d_wait = wq;
2740 hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2741 hlist_bl_unlock(b);
2742 return new;
2743 mismatch:
2744 spin_unlock(&dentry->d_lock);
2745 dput(dentry);
2746 goto retry;
2747 }
2748 EXPORT_SYMBOL(d_alloc_parallel);
2749
2750
2751
2752
2753
2754
2755 static wait_queue_head_t *__d_lookup_unhash(struct dentry *dentry)
2756 {
2757 wait_queue_head_t *d_wait;
2758 struct hlist_bl_head *b;
2759
2760 lockdep_assert_held(&dentry->d_lock);
2761
2762 b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash);
2763 hlist_bl_lock(b);
2764 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2765 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2766 d_wait = dentry->d_wait;
2767 dentry->d_wait = NULL;
2768 hlist_bl_unlock(b);
2769 INIT_HLIST_NODE(&dentry->d_u.d_alias);
2770 INIT_LIST_HEAD(&dentry->d_lru);
2771 return d_wait;
2772 }
2773
2774 void __d_lookup_unhash_wake(struct dentry *dentry)
2775 {
2776 spin_lock(&dentry->d_lock);
2777 wake_up_all(__d_lookup_unhash(dentry));
2778 spin_unlock(&dentry->d_lock);
2779 }
2780 EXPORT_SYMBOL(__d_lookup_unhash_wake);
2781
2782
2783
2784 static inline void __d_add(struct dentry *dentry, struct inode *inode)
2785 {
2786 wait_queue_head_t *d_wait;
2787 struct inode *dir = NULL;
2788 unsigned n;
2789 spin_lock(&dentry->d_lock);
2790 if (unlikely(d_in_lookup(dentry))) {
2791 dir = dentry->d_parent->d_inode;
2792 n = start_dir_add(dir);
2793 d_wait = __d_lookup_unhash(dentry);
2794 }
2795 if (inode) {
2796 unsigned add_flags = d_flags_for_inode(inode);
2797 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2798 raw_write_seqcount_begin(&dentry->d_seq);
2799 __d_set_inode_and_type(dentry, inode, add_flags);
2800 raw_write_seqcount_end(&dentry->d_seq);
2801 fsnotify_update_flags(dentry);
2802 }
2803 __d_rehash(dentry);
2804 if (dir)
2805 end_dir_add(dir, n, d_wait);
2806 spin_unlock(&dentry->d_lock);
2807 if (inode)
2808 spin_unlock(&inode->i_lock);
2809 }
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820 void d_add(struct dentry *entry, struct inode *inode)
2821 {
2822 if (inode) {
2823 security_d_instantiate(entry, inode);
2824 spin_lock(&inode->i_lock);
2825 }
2826 __d_add(entry, inode);
2827 }
2828 EXPORT_SYMBOL(d_add);
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841 struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2842 {
2843 struct dentry *alias;
2844 unsigned int hash = entry->d_name.hash;
2845
2846 spin_lock(&inode->i_lock);
2847 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2848
2849
2850
2851
2852
2853 if (alias->d_name.hash != hash)
2854 continue;
2855 if (alias->d_parent != entry->d_parent)
2856 continue;
2857 if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2858 continue;
2859 spin_lock(&alias->d_lock);
2860 if (!d_unhashed(alias)) {
2861 spin_unlock(&alias->d_lock);
2862 alias = NULL;
2863 } else {
2864 __dget_dlock(alias);
2865 __d_rehash(alias);
2866 spin_unlock(&alias->d_lock);
2867 }
2868 spin_unlock(&inode->i_lock);
2869 return alias;
2870 }
2871 spin_unlock(&inode->i_lock);
2872 return NULL;
2873 }
2874 EXPORT_SYMBOL(d_exact_alias);
2875
2876 static void swap_names(struct dentry *dentry, struct dentry *target)
2877 {
2878 if (unlikely(dname_external(target))) {
2879 if (unlikely(dname_external(dentry))) {
2880
2881
2882
2883 swap(target->d_name.name, dentry->d_name.name);
2884 } else {
2885
2886
2887
2888
2889 memcpy(target->d_iname, dentry->d_name.name,
2890 dentry->d_name.len + 1);
2891 dentry->d_name.name = target->d_name.name;
2892 target->d_name.name = target->d_iname;
2893 }
2894 } else {
2895 if (unlikely(dname_external(dentry))) {
2896
2897
2898
2899
2900 memcpy(dentry->d_iname, target->d_name.name,
2901 target->d_name.len + 1);
2902 target->d_name.name = dentry->d_name.name;
2903 dentry->d_name.name = dentry->d_iname;
2904 } else {
2905
2906
2907
2908 unsigned int i;
2909 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2910 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2911 swap(((long *) &dentry->d_iname)[i],
2912 ((long *) &target->d_iname)[i]);
2913 }
2914 }
2915 }
2916 swap(dentry->d_name.hash_len, target->d_name.hash_len);
2917 }
2918
2919 static void copy_name(struct dentry *dentry, struct dentry *target)
2920 {
2921 struct external_name *old_name = NULL;
2922 if (unlikely(dname_external(dentry)))
2923 old_name = external_name(dentry);
2924 if (unlikely(dname_external(target))) {
2925 atomic_inc(&external_name(target)->u.count);
2926 dentry->d_name = target->d_name;
2927 } else {
2928 memcpy(dentry->d_iname, target->d_name.name,
2929 target->d_name.len + 1);
2930 dentry->d_name.name = dentry->d_iname;
2931 dentry->d_name.hash_len = target->d_name.hash_len;
2932 }
2933 if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2934 kfree_rcu(old_name, u.head);
2935 }
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948 static void __d_move(struct dentry *dentry, struct dentry *target,
2949 bool exchange)
2950 {
2951 struct dentry *old_parent, *p;
2952 wait_queue_head_t *d_wait;
2953 struct inode *dir = NULL;
2954 unsigned n;
2955
2956 WARN_ON(!dentry->d_inode);
2957 if (WARN_ON(dentry == target))
2958 return;
2959
2960 BUG_ON(d_ancestor(target, dentry));
2961 old_parent = dentry->d_parent;
2962 p = d_ancestor(old_parent, target);
2963 if (IS_ROOT(dentry)) {
2964 BUG_ON(p);
2965 spin_lock(&target->d_parent->d_lock);
2966 } else if (!p) {
2967
2968 spin_lock(&target->d_parent->d_lock);
2969 spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED);
2970 } else {
2971 BUG_ON(p == dentry);
2972 spin_lock(&old_parent->d_lock);
2973 if (p != target)
2974 spin_lock_nested(&target->d_parent->d_lock,
2975 DENTRY_D_LOCK_NESTED);
2976 }
2977 spin_lock_nested(&dentry->d_lock, 2);
2978 spin_lock_nested(&target->d_lock, 3);
2979
2980 if (unlikely(d_in_lookup(target))) {
2981 dir = target->d_parent->d_inode;
2982 n = start_dir_add(dir);
2983 d_wait = __d_lookup_unhash(target);
2984 }
2985
2986 write_seqcount_begin(&dentry->d_seq);
2987 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2988
2989
2990 if (!d_unhashed(dentry))
2991 ___d_drop(dentry);
2992 if (!d_unhashed(target))
2993 ___d_drop(target);
2994
2995
2996 dentry->d_parent = target->d_parent;
2997 if (!exchange) {
2998 copy_name(dentry, target);
2999 target->d_hash.pprev = NULL;
3000 dentry->d_parent->d_lockref.count++;
3001 if (dentry != old_parent)
3002 WARN_ON(!--old_parent->d_lockref.count);
3003 } else {
3004 target->d_parent = old_parent;
3005 swap_names(dentry, target);
3006 list_move(&target->d_child, &target->d_parent->d_subdirs);
3007 __d_rehash(target);
3008 fsnotify_update_flags(target);
3009 }
3010 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
3011 __d_rehash(dentry);
3012 fsnotify_update_flags(dentry);
3013 fscrypt_handle_d_move(dentry);
3014
3015 write_seqcount_end(&target->d_seq);
3016 write_seqcount_end(&dentry->d_seq);
3017
3018 if (dir)
3019 end_dir_add(dir, n, d_wait);
3020
3021 if (dentry->d_parent != old_parent)
3022 spin_unlock(&dentry->d_parent->d_lock);
3023 if (dentry != old_parent)
3024 spin_unlock(&old_parent->d_lock);
3025 spin_unlock(&target->d_lock);
3026 spin_unlock(&dentry->d_lock);
3027 }
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038 void d_move(struct dentry *dentry, struct dentry *target)
3039 {
3040 write_seqlock(&rename_lock);
3041 __d_move(dentry, target, false);
3042 write_sequnlock(&rename_lock);
3043 }
3044 EXPORT_SYMBOL(d_move);
3045
3046
3047
3048
3049
3050
3051 void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
3052 {
3053 write_seqlock(&rename_lock);
3054
3055 WARN_ON(!dentry1->d_inode);
3056 WARN_ON(!dentry2->d_inode);
3057 WARN_ON(IS_ROOT(dentry1));
3058 WARN_ON(IS_ROOT(dentry2));
3059
3060 __d_move(dentry1, dentry2, true);
3061
3062 write_sequnlock(&rename_lock);
3063 }
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
3074 {
3075 struct dentry *p;
3076
3077 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
3078 if (p->d_parent == p1)
3079 return p;
3080 }
3081 return NULL;
3082 }
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093 static int __d_unalias(struct inode *inode,
3094 struct dentry *dentry, struct dentry *alias)
3095 {
3096 struct mutex *m1 = NULL;
3097 struct rw_semaphore *m2 = NULL;
3098 int ret = -ESTALE;
3099
3100
3101 if (alias->d_parent == dentry->d_parent)
3102 goto out_unalias;
3103
3104
3105 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
3106 goto out_err;
3107 m1 = &dentry->d_sb->s_vfs_rename_mutex;
3108 if (!inode_trylock_shared(alias->d_parent->d_inode))
3109 goto out_err;
3110 m2 = &alias->d_parent->d_inode->i_rwsem;
3111 out_unalias:
3112 __d_move(alias, dentry, false);
3113 ret = 0;
3114 out_err:
3115 if (m2)
3116 up_read(m2);
3117 if (m1)
3118 mutex_unlock(m1);
3119 return ret;
3120 }
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
3146 {
3147 if (IS_ERR(inode))
3148 return ERR_CAST(inode);
3149
3150 BUG_ON(!d_unhashed(dentry));
3151
3152 if (!inode)
3153 goto out;
3154
3155 security_d_instantiate(dentry, inode);
3156 spin_lock(&inode->i_lock);
3157 if (S_ISDIR(inode->i_mode)) {
3158 struct dentry *new = __d_find_any_alias(inode);
3159 if (unlikely(new)) {
3160
3161 spin_unlock(&inode->i_lock);
3162 write_seqlock(&rename_lock);
3163 if (unlikely(d_ancestor(new, dentry))) {
3164 write_sequnlock(&rename_lock);
3165 dput(new);
3166 new = ERR_PTR(-ELOOP);
3167 pr_warn_ratelimited(
3168 "VFS: Lookup of '%s' in %s %s"
3169 " would have caused loop\n",
3170 dentry->d_name.name,
3171 inode->i_sb->s_type->name,
3172 inode->i_sb->s_id);
3173 } else if (!IS_ROOT(new)) {
3174 struct dentry *old_parent = dget(new->d_parent);
3175 int err = __d_unalias(inode, dentry, new);
3176 write_sequnlock(&rename_lock);
3177 if (err) {
3178 dput(new);
3179 new = ERR_PTR(err);
3180 }
3181 dput(old_parent);
3182 } else {
3183 __d_move(new, dentry, false);
3184 write_sequnlock(&rename_lock);
3185 }
3186 iput(inode);
3187 return new;
3188 }
3189 }
3190 out:
3191 __d_add(dentry, inode);
3192 return NULL;
3193 }
3194 EXPORT_SYMBOL(d_splice_alias);
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212 bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3213 {
3214 bool result;
3215 unsigned seq;
3216
3217 if (new_dentry == old_dentry)
3218 return true;
3219
3220 do {
3221
3222 seq = read_seqbegin(&rename_lock);
3223
3224
3225
3226
3227 rcu_read_lock();
3228 if (d_ancestor(old_dentry, new_dentry))
3229 result = true;
3230 else
3231 result = false;
3232 rcu_read_unlock();
3233 } while (read_seqretry(&rename_lock, seq));
3234
3235 return result;
3236 }
3237 EXPORT_SYMBOL(is_subdir);
3238
3239 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3240 {
3241 struct dentry *root = data;
3242 if (dentry != root) {
3243 if (d_unhashed(dentry) || !dentry->d_inode)
3244 return D_WALK_SKIP;
3245
3246 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3247 dentry->d_flags |= DCACHE_GENOCIDE;
3248 dentry->d_lockref.count--;
3249 }
3250 }
3251 return D_WALK_CONTINUE;
3252 }
3253
3254 void d_genocide(struct dentry *parent)
3255 {
3256 d_walk(parent, parent, d_genocide_kill);
3257 }
3258
3259 EXPORT_SYMBOL(d_genocide);
3260
3261 void d_tmpfile(struct dentry *dentry, struct inode *inode)
3262 {
3263 inode_dec_link_count(inode);
3264 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3265 !hlist_unhashed(&dentry->d_u.d_alias) ||
3266 !d_unlinked(dentry));
3267 spin_lock(&dentry->d_parent->d_lock);
3268 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3269 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3270 (unsigned long long)inode->i_ino);
3271 spin_unlock(&dentry->d_lock);
3272 spin_unlock(&dentry->d_parent->d_lock);
3273 d_instantiate(dentry, inode);
3274 }
3275 EXPORT_SYMBOL(d_tmpfile);
3276
3277 static __initdata unsigned long dhash_entries;
3278 static int __init set_dhash_entries(char *str)
3279 {
3280 if (!str)
3281 return 0;
3282 dhash_entries = simple_strtoul(str, &str, 0);
3283 return 1;
3284 }
3285 __setup("dhash_entries=", set_dhash_entries);
3286
3287 static void __init dcache_init_early(void)
3288 {
3289
3290
3291
3292 if (hashdist)
3293 return;
3294
3295 dentry_hashtable =
3296 alloc_large_system_hash("Dentry cache",
3297 sizeof(struct hlist_bl_head),
3298 dhash_entries,
3299 13,
3300 HASH_EARLY | HASH_ZERO,
3301 &d_hash_shift,
3302 NULL,
3303 0,
3304 0);
3305 d_hash_shift = 32 - d_hash_shift;
3306 }
3307
3308 static void __init dcache_init(void)
3309 {
3310
3311
3312
3313
3314
3315 dentry_cache = KMEM_CACHE_USERCOPY(dentry,
3316 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
3317 d_iname);
3318
3319
3320 if (!hashdist)
3321 return;
3322
3323 dentry_hashtable =
3324 alloc_large_system_hash("Dentry cache",
3325 sizeof(struct hlist_bl_head),
3326 dhash_entries,
3327 13,
3328 HASH_ZERO,
3329 &d_hash_shift,
3330 NULL,
3331 0,
3332 0);
3333 d_hash_shift = 32 - d_hash_shift;
3334 }
3335
3336
3337 struct kmem_cache *names_cachep __read_mostly;
3338 EXPORT_SYMBOL(names_cachep);
3339
3340 void __init vfs_caches_init_early(void)
3341 {
3342 int i;
3343
3344 for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3345 INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3346
3347 dcache_init_early();
3348 inode_init_early();
3349 }
3350
3351 void __init vfs_caches_init(void)
3352 {
3353 names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
3354 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
3355
3356 dcache_init();
3357 inode_init();
3358 files_init();
3359 files_maxfiles_init();
3360 mnt_init();
3361 bdev_cache_init();
3362 chrdev_init();
3363 }