Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/ceph/ceph_debug.h>
0003 
0004 #include <linux/spinlock.h>
0005 #include <linux/namei.h>
0006 #include <linux/slab.h>
0007 #include <linux/sched.h>
0008 #include <linux/xattr.h>
0009 
0010 #include "super.h"
0011 #include "mds_client.h"
0012 
0013 /*
0014  * Directory operations: readdir, lookup, create, link, unlink,
0015  * rename, etc.
0016  */
0017 
0018 /*
0019  * Ceph MDS operations are specified in terms of a base ino and
0020  * relative path.  Thus, the client can specify an operation on a
0021  * specific inode (e.g., a getattr due to fstat(2)), or as a path
0022  * relative to, say, the root directory.
0023  *
0024  * Normally, we limit ourselves to strict inode ops (no path component)
0025  * or dentry operations (a single path component relative to an ino).  The
0026  * exception to this is open_root_dentry(), which will open the mount
0027  * point by name.
0028  */
0029 
0030 const struct dentry_operations ceph_dentry_ops;
0031 
0032 static bool __dentry_lease_is_valid(struct ceph_dentry_info *di);
0033 static int __dir_lease_try_check(const struct dentry *dentry);
0034 
0035 /*
0036  * Initialize ceph dentry state.
0037  */
0038 static int ceph_d_init(struct dentry *dentry)
0039 {
0040     struct ceph_dentry_info *di;
0041     struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dentry->d_sb);
0042 
0043     di = kmem_cache_zalloc(ceph_dentry_cachep, GFP_KERNEL);
0044     if (!di)
0045         return -ENOMEM;          /* oh well */
0046 
0047     di->dentry = dentry;
0048     di->lease_session = NULL;
0049     di->time = jiffies;
0050     dentry->d_fsdata = di;
0051     INIT_LIST_HEAD(&di->lease_list);
0052 
0053     atomic64_inc(&mdsc->metric.total_dentries);
0054 
0055     return 0;
0056 }
0057 
0058 /*
0059  * for f_pos for readdir:
0060  * - hash order:
0061  *  (0xff << 52) | ((24 bits hash) << 28) |
0062  *  (the nth entry has hash collision);
0063  * - frag+name order;
0064  *  ((frag value) << 28) | (the nth entry in frag);
0065  */
0066 #define OFFSET_BITS 28
0067 #define OFFSET_MASK ((1 << OFFSET_BITS) - 1)
0068 #define HASH_ORDER  (0xffull << (OFFSET_BITS + 24))
0069 loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order)
0070 {
0071     loff_t fpos = ((loff_t)high << 28) | (loff_t)off;
0072     if (hash_order)
0073         fpos |= HASH_ORDER;
0074     return fpos;
0075 }
0076 
0077 static bool is_hash_order(loff_t p)
0078 {
0079     return (p & HASH_ORDER) == HASH_ORDER;
0080 }
0081 
0082 static unsigned fpos_frag(loff_t p)
0083 {
0084     return p >> OFFSET_BITS;
0085 }
0086 
0087 static unsigned fpos_hash(loff_t p)
0088 {
0089     return ceph_frag_value(fpos_frag(p));
0090 }
0091 
0092 static unsigned fpos_off(loff_t p)
0093 {
0094     return p & OFFSET_MASK;
0095 }
0096 
0097 static int fpos_cmp(loff_t l, loff_t r)
0098 {
0099     int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
0100     if (v)
0101         return v;
0102     return (int)(fpos_off(l) - fpos_off(r));
0103 }
0104 
0105 /*
0106  * make note of the last dentry we read, so we can
0107  * continue at the same lexicographical point,
0108  * regardless of what dir changes take place on the
0109  * server.
0110  */
0111 static int note_last_dentry(struct ceph_dir_file_info *dfi, const char *name,
0112                     int len, unsigned next_offset)
0113 {
0114     char *buf = kmalloc(len+1, GFP_KERNEL);
0115     if (!buf)
0116         return -ENOMEM;
0117     kfree(dfi->last_name);
0118     dfi->last_name = buf;
0119     memcpy(dfi->last_name, name, len);
0120     dfi->last_name[len] = 0;
0121     dfi->next_offset = next_offset;
0122     dout("note_last_dentry '%s'\n", dfi->last_name);
0123     return 0;
0124 }
0125 
0126 
0127 static struct dentry *
0128 __dcache_find_get_entry(struct dentry *parent, u64 idx,
0129             struct ceph_readdir_cache_control *cache_ctl)
0130 {
0131     struct inode *dir = d_inode(parent);
0132     struct dentry *dentry;
0133     unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1;
0134     loff_t ptr_pos = idx * sizeof(struct dentry *);
0135     pgoff_t ptr_pgoff = ptr_pos >> PAGE_SHIFT;
0136 
0137     if (ptr_pos >= i_size_read(dir))
0138         return NULL;
0139 
0140     if (!cache_ctl->page || ptr_pgoff != page_index(cache_ctl->page)) {
0141         ceph_readdir_cache_release(cache_ctl);
0142         cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
0143         if (!cache_ctl->page) {
0144             dout(" page %lu not found\n", ptr_pgoff);
0145             return ERR_PTR(-EAGAIN);
0146         }
0147         /* reading/filling the cache are serialized by
0148            i_rwsem, no need to use page lock */
0149         unlock_page(cache_ctl->page);
0150         cache_ctl->dentries = kmap(cache_ctl->page);
0151     }
0152 
0153     cache_ctl->index = idx & idx_mask;
0154 
0155     rcu_read_lock();
0156     spin_lock(&parent->d_lock);
0157     /* check i_size again here, because empty directory can be
0158      * marked as complete while not holding the i_rwsem. */
0159     if (ceph_dir_is_complete_ordered(dir) && ptr_pos < i_size_read(dir))
0160         dentry = cache_ctl->dentries[cache_ctl->index];
0161     else
0162         dentry = NULL;
0163     spin_unlock(&parent->d_lock);
0164     if (dentry && !lockref_get_not_dead(&dentry->d_lockref))
0165         dentry = NULL;
0166     rcu_read_unlock();
0167     return dentry ? : ERR_PTR(-EAGAIN);
0168 }
0169 
0170 /*
0171  * When possible, we try to satisfy a readdir by peeking at the
0172  * dcache.  We make this work by carefully ordering dentries on
0173  * d_child when we initially get results back from the MDS, and
0174  * falling back to a "normal" sync readdir if any dentries in the dir
0175  * are dropped.
0176  *
0177  * Complete dir indicates that we have all dentries in the dir.  It is
0178  * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
0179  * the MDS if/when the directory is modified).
0180  */
0181 static int __dcache_readdir(struct file *file,  struct dir_context *ctx,
0182                 int shared_gen)
0183 {
0184     struct ceph_dir_file_info *dfi = file->private_data;
0185     struct dentry *parent = file->f_path.dentry;
0186     struct inode *dir = d_inode(parent);
0187     struct dentry *dentry, *last = NULL;
0188     struct ceph_dentry_info *di;
0189     struct ceph_readdir_cache_control cache_ctl = {};
0190     u64 idx = 0;
0191     int err = 0;
0192 
0193     dout("__dcache_readdir %p v%u at %llx\n", dir, (unsigned)shared_gen, ctx->pos);
0194 
0195     /* search start position */
0196     if (ctx->pos > 2) {
0197         u64 count = div_u64(i_size_read(dir), sizeof(struct dentry *));
0198         while (count > 0) {
0199             u64 step = count >> 1;
0200             dentry = __dcache_find_get_entry(parent, idx + step,
0201                              &cache_ctl);
0202             if (!dentry) {
0203                 /* use linar search */
0204                 idx = 0;
0205                 break;
0206             }
0207             if (IS_ERR(dentry)) {
0208                 err = PTR_ERR(dentry);
0209                 goto out;
0210             }
0211             di = ceph_dentry(dentry);
0212             spin_lock(&dentry->d_lock);
0213             if (fpos_cmp(di->offset, ctx->pos) < 0) {
0214                 idx += step + 1;
0215                 count -= step + 1;
0216             } else {
0217                 count = step;
0218             }
0219             spin_unlock(&dentry->d_lock);
0220             dput(dentry);
0221         }
0222 
0223         dout("__dcache_readdir %p cache idx %llu\n", dir, idx);
0224     }
0225 
0226 
0227     for (;;) {
0228         bool emit_dentry = false;
0229         dentry = __dcache_find_get_entry(parent, idx++, &cache_ctl);
0230         if (!dentry) {
0231             dfi->file_info.flags |= CEPH_F_ATEND;
0232             err = 0;
0233             break;
0234         }
0235         if (IS_ERR(dentry)) {
0236             err = PTR_ERR(dentry);
0237             goto out;
0238         }
0239 
0240         spin_lock(&dentry->d_lock);
0241         di = ceph_dentry(dentry);
0242         if (d_unhashed(dentry) ||
0243             d_really_is_negative(dentry) ||
0244             di->lease_shared_gen != shared_gen) {
0245             spin_unlock(&dentry->d_lock);
0246             dput(dentry);
0247             err = -EAGAIN;
0248             goto out;
0249         }
0250         if (fpos_cmp(ctx->pos, di->offset) <= 0) {
0251             __ceph_dentry_dir_lease_touch(di);
0252             emit_dentry = true;
0253         }
0254         spin_unlock(&dentry->d_lock);
0255 
0256         if (emit_dentry) {
0257             dout(" %llx dentry %p %pd %p\n", di->offset,
0258                  dentry, dentry, d_inode(dentry));
0259             ctx->pos = di->offset;
0260             if (!dir_emit(ctx, dentry->d_name.name,
0261                       dentry->d_name.len, ceph_present_inode(d_inode(dentry)),
0262                       d_inode(dentry)->i_mode >> 12)) {
0263                 dput(dentry);
0264                 err = 0;
0265                 break;
0266             }
0267             ctx->pos++;
0268 
0269             if (last)
0270                 dput(last);
0271             last = dentry;
0272         } else {
0273             dput(dentry);
0274         }
0275     }
0276 out:
0277     ceph_readdir_cache_release(&cache_ctl);
0278     if (last) {
0279         int ret;
0280         di = ceph_dentry(last);
0281         ret = note_last_dentry(dfi, last->d_name.name, last->d_name.len,
0282                        fpos_off(di->offset) + 1);
0283         if (ret < 0)
0284             err = ret;
0285         dput(last);
0286         /* last_name no longer match cache index */
0287         if (dfi->readdir_cache_idx >= 0) {
0288             dfi->readdir_cache_idx = -1;
0289             dfi->dir_release_count = 0;
0290         }
0291     }
0292     return err;
0293 }
0294 
0295 static bool need_send_readdir(struct ceph_dir_file_info *dfi, loff_t pos)
0296 {
0297     if (!dfi->last_readdir)
0298         return true;
0299     if (is_hash_order(pos))
0300         return !ceph_frag_contains_value(dfi->frag, fpos_hash(pos));
0301     else
0302         return dfi->frag != fpos_frag(pos);
0303 }
0304 
0305 static int ceph_readdir(struct file *file, struct dir_context *ctx)
0306 {
0307     struct ceph_dir_file_info *dfi = file->private_data;
0308     struct inode *inode = file_inode(file);
0309     struct ceph_inode_info *ci = ceph_inode(inode);
0310     struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
0311     struct ceph_mds_client *mdsc = fsc->mdsc;
0312     int i;
0313     int err;
0314     unsigned frag = -1;
0315     struct ceph_mds_reply_info_parsed *rinfo;
0316 
0317     dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos);
0318     if (dfi->file_info.flags & CEPH_F_ATEND)
0319         return 0;
0320 
0321     /* always start with . and .. */
0322     if (ctx->pos == 0) {
0323         dout("readdir off 0 -> '.'\n");
0324         if (!dir_emit(ctx, ".", 1, ceph_present_inode(inode),
0325                 inode->i_mode >> 12))
0326             return 0;
0327         ctx->pos = 1;
0328     }
0329     if (ctx->pos == 1) {
0330         u64 ino;
0331         struct dentry *dentry = file->f_path.dentry;
0332 
0333         spin_lock(&dentry->d_lock);
0334         ino = ceph_present_inode(dentry->d_parent->d_inode);
0335         spin_unlock(&dentry->d_lock);
0336 
0337         dout("readdir off 1 -> '..'\n");
0338         if (!dir_emit(ctx, "..", 2, ino, inode->i_mode >> 12))
0339             return 0;
0340         ctx->pos = 2;
0341     }
0342 
0343     spin_lock(&ci->i_ceph_lock);
0344     /* request Fx cap. if have Fx, we don't need to release Fs cap
0345      * for later create/unlink. */
0346     __ceph_touch_fmode(ci, mdsc, CEPH_FILE_MODE_WR);
0347     /* can we use the dcache? */
0348     if (ceph_test_mount_opt(fsc, DCACHE) &&
0349         !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
0350         ceph_snap(inode) != CEPH_SNAPDIR &&
0351         __ceph_dir_is_complete_ordered(ci) &&
0352         __ceph_caps_issued_mask_metric(ci, CEPH_CAP_FILE_SHARED, 1)) {
0353         int shared_gen = atomic_read(&ci->i_shared_gen);
0354 
0355         spin_unlock(&ci->i_ceph_lock);
0356         err = __dcache_readdir(file, ctx, shared_gen);
0357         if (err != -EAGAIN)
0358             return err;
0359     } else {
0360         spin_unlock(&ci->i_ceph_lock);
0361     }
0362 
0363     /* proceed with a normal readdir */
0364 more:
0365     /* do we have the correct frag content buffered? */
0366     if (need_send_readdir(dfi, ctx->pos)) {
0367         struct ceph_mds_request *req;
0368         int op = ceph_snap(inode) == CEPH_SNAPDIR ?
0369             CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
0370 
0371         /* discard old result, if any */
0372         if (dfi->last_readdir) {
0373             ceph_mdsc_put_request(dfi->last_readdir);
0374             dfi->last_readdir = NULL;
0375         }
0376 
0377         if (is_hash_order(ctx->pos)) {
0378             /* fragtree isn't always accurate. choose frag
0379              * based on previous reply when possible. */
0380             if (frag == (unsigned)-1)
0381                 frag = ceph_choose_frag(ci, fpos_hash(ctx->pos),
0382                             NULL, NULL);
0383         } else {
0384             frag = fpos_frag(ctx->pos);
0385         }
0386 
0387         dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
0388              ceph_vinop(inode), frag, dfi->last_name);
0389         req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
0390         if (IS_ERR(req))
0391             return PTR_ERR(req);
0392         err = ceph_alloc_readdir_reply_buffer(req, inode);
0393         if (err) {
0394             ceph_mdsc_put_request(req);
0395             return err;
0396         }
0397         /* hints to request -> mds selection code */
0398         req->r_direct_mode = USE_AUTH_MDS;
0399         if (op == CEPH_MDS_OP_READDIR) {
0400             req->r_direct_hash = ceph_frag_value(frag);
0401             __set_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
0402             req->r_inode_drop = CEPH_CAP_FILE_EXCL;
0403         }
0404         if (dfi->last_name) {
0405             req->r_path2 = kstrdup(dfi->last_name, GFP_KERNEL);
0406             if (!req->r_path2) {
0407                 ceph_mdsc_put_request(req);
0408                 return -ENOMEM;
0409             }
0410         } else if (is_hash_order(ctx->pos)) {
0411             req->r_args.readdir.offset_hash =
0412                 cpu_to_le32(fpos_hash(ctx->pos));
0413         }
0414 
0415         req->r_dir_release_cnt = dfi->dir_release_count;
0416         req->r_dir_ordered_cnt = dfi->dir_ordered_count;
0417         req->r_readdir_cache_idx = dfi->readdir_cache_idx;
0418         req->r_readdir_offset = dfi->next_offset;
0419         req->r_args.readdir.frag = cpu_to_le32(frag);
0420         req->r_args.readdir.flags =
0421                 cpu_to_le16(CEPH_READDIR_REPLY_BITFLAGS);
0422 
0423         req->r_inode = inode;
0424         ihold(inode);
0425         req->r_dentry = dget(file->f_path.dentry);
0426         err = ceph_mdsc_do_request(mdsc, NULL, req);
0427         if (err < 0) {
0428             ceph_mdsc_put_request(req);
0429             return err;
0430         }
0431         dout("readdir got and parsed readdir result=%d on "
0432              "frag %x, end=%d, complete=%d, hash_order=%d\n",
0433              err, frag,
0434              (int)req->r_reply_info.dir_end,
0435              (int)req->r_reply_info.dir_complete,
0436              (int)req->r_reply_info.hash_order);
0437 
0438         rinfo = &req->r_reply_info;
0439         if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
0440             frag = le32_to_cpu(rinfo->dir_dir->frag);
0441             if (!rinfo->hash_order) {
0442                 dfi->next_offset = req->r_readdir_offset;
0443                 /* adjust ctx->pos to beginning of frag */
0444                 ctx->pos = ceph_make_fpos(frag,
0445                               dfi->next_offset,
0446                               false);
0447             }
0448         }
0449 
0450         dfi->frag = frag;
0451         dfi->last_readdir = req;
0452 
0453         if (test_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags)) {
0454             dfi->readdir_cache_idx = req->r_readdir_cache_idx;
0455             if (dfi->readdir_cache_idx < 0) {
0456                 /* preclude from marking dir ordered */
0457                 dfi->dir_ordered_count = 0;
0458             } else if (ceph_frag_is_leftmost(frag) &&
0459                    dfi->next_offset == 2) {
0460                 /* note dir version at start of readdir so
0461                  * we can tell if any dentries get dropped */
0462                 dfi->dir_release_count = req->r_dir_release_cnt;
0463                 dfi->dir_ordered_count = req->r_dir_ordered_cnt;
0464             }
0465         } else {
0466             dout("readdir !did_prepopulate\n");
0467             /* disable readdir cache */
0468             dfi->readdir_cache_idx = -1;
0469             /* preclude from marking dir complete */
0470             dfi->dir_release_count = 0;
0471         }
0472 
0473         /* note next offset and last dentry name */
0474         if (rinfo->dir_nr > 0) {
0475             struct ceph_mds_reply_dir_entry *rde =
0476                     rinfo->dir_entries + (rinfo->dir_nr-1);
0477             unsigned next_offset = req->r_reply_info.dir_end ?
0478                     2 : (fpos_off(rde->offset) + 1);
0479             err = note_last_dentry(dfi, rde->name, rde->name_len,
0480                            next_offset);
0481             if (err) {
0482                 ceph_mdsc_put_request(dfi->last_readdir);
0483                 dfi->last_readdir = NULL;
0484                 return err;
0485             }
0486         } else if (req->r_reply_info.dir_end) {
0487             dfi->next_offset = 2;
0488             /* keep last name */
0489         }
0490     }
0491 
0492     rinfo = &dfi->last_readdir->r_reply_info;
0493     dout("readdir frag %x num %d pos %llx chunk first %llx\n",
0494          dfi->frag, rinfo->dir_nr, ctx->pos,
0495          rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL);
0496 
0497     i = 0;
0498     /* search start position */
0499     if (rinfo->dir_nr > 0) {
0500         int step, nr = rinfo->dir_nr;
0501         while (nr > 0) {
0502             step = nr >> 1;
0503             if (rinfo->dir_entries[i + step].offset < ctx->pos) {
0504                 i +=  step + 1;
0505                 nr -= step + 1;
0506             } else {
0507                 nr = step;
0508             }
0509         }
0510     }
0511     for (; i < rinfo->dir_nr; i++) {
0512         struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
0513 
0514         BUG_ON(rde->offset < ctx->pos);
0515 
0516         ctx->pos = rde->offset;
0517         dout("readdir (%d/%d) -> %llx '%.*s' %p\n",
0518              i, rinfo->dir_nr, ctx->pos,
0519              rde->name_len, rde->name, &rde->inode.in);
0520 
0521         BUG_ON(!rde->inode.in);
0522 
0523         if (!dir_emit(ctx, rde->name, rde->name_len,
0524                   ceph_present_ino(inode->i_sb, le64_to_cpu(rde->inode.in->ino)),
0525                   le32_to_cpu(rde->inode.in->mode) >> 12)) {
0526             /*
0527              * NOTE: Here no need to put the 'dfi->last_readdir',
0528              * because when dir_emit stops us it's most likely
0529              * doesn't have enough memory, etc. So for next readdir
0530              * it will continue.
0531              */
0532             dout("filldir stopping us...\n");
0533             return 0;
0534         }
0535         ctx->pos++;
0536     }
0537 
0538     ceph_mdsc_put_request(dfi->last_readdir);
0539     dfi->last_readdir = NULL;
0540 
0541     if (dfi->next_offset > 2) {
0542         frag = dfi->frag;
0543         goto more;
0544     }
0545 
0546     /* more frags? */
0547     if (!ceph_frag_is_rightmost(dfi->frag)) {
0548         frag = ceph_frag_next(dfi->frag);
0549         if (is_hash_order(ctx->pos)) {
0550             loff_t new_pos = ceph_make_fpos(ceph_frag_value(frag),
0551                             dfi->next_offset, true);
0552             if (new_pos > ctx->pos)
0553                 ctx->pos = new_pos;
0554             /* keep last_name */
0555         } else {
0556             ctx->pos = ceph_make_fpos(frag, dfi->next_offset,
0557                             false);
0558             kfree(dfi->last_name);
0559             dfi->last_name = NULL;
0560         }
0561         dout("readdir next frag is %x\n", frag);
0562         goto more;
0563     }
0564     dfi->file_info.flags |= CEPH_F_ATEND;
0565 
0566     /*
0567      * if dir_release_count still matches the dir, no dentries
0568      * were released during the whole readdir, and we should have
0569      * the complete dir contents in our cache.
0570      */
0571     if (atomic64_read(&ci->i_release_count) ==
0572                     dfi->dir_release_count) {
0573         spin_lock(&ci->i_ceph_lock);
0574         if (dfi->dir_ordered_count ==
0575                 atomic64_read(&ci->i_ordered_count)) {
0576             dout(" marking %p complete and ordered\n", inode);
0577             /* use i_size to track number of entries in
0578              * readdir cache */
0579             BUG_ON(dfi->readdir_cache_idx < 0);
0580             i_size_write(inode, dfi->readdir_cache_idx *
0581                      sizeof(struct dentry*));
0582         } else {
0583             dout(" marking %p complete\n", inode);
0584         }
0585         __ceph_dir_set_complete(ci, dfi->dir_release_count,
0586                     dfi->dir_ordered_count);
0587         spin_unlock(&ci->i_ceph_lock);
0588     }
0589 
0590     dout("readdir %p file %p done.\n", inode, file);
0591     return 0;
0592 }
0593 
0594 static void reset_readdir(struct ceph_dir_file_info *dfi)
0595 {
0596     if (dfi->last_readdir) {
0597         ceph_mdsc_put_request(dfi->last_readdir);
0598         dfi->last_readdir = NULL;
0599     }
0600     kfree(dfi->last_name);
0601     dfi->last_name = NULL;
0602     dfi->dir_release_count = 0;
0603     dfi->readdir_cache_idx = -1;
0604     dfi->next_offset = 2;  /* compensate for . and .. */
0605     dfi->file_info.flags &= ~CEPH_F_ATEND;
0606 }
0607 
0608 /*
0609  * discard buffered readdir content on seekdir(0), or seek to new frag,
0610  * or seek prior to current chunk
0611  */
0612 static bool need_reset_readdir(struct ceph_dir_file_info *dfi, loff_t new_pos)
0613 {
0614     struct ceph_mds_reply_info_parsed *rinfo;
0615     loff_t chunk_offset;
0616     if (new_pos == 0)
0617         return true;
0618     if (is_hash_order(new_pos)) {
0619         /* no need to reset last_name for a forward seek when
0620          * dentries are sotred in hash order */
0621     } else if (dfi->frag != fpos_frag(new_pos)) {
0622         return true;
0623     }
0624     rinfo = dfi->last_readdir ? &dfi->last_readdir->r_reply_info : NULL;
0625     if (!rinfo || !rinfo->dir_nr)
0626         return true;
0627     chunk_offset = rinfo->dir_entries[0].offset;
0628     return new_pos < chunk_offset ||
0629            is_hash_order(new_pos) != is_hash_order(chunk_offset);
0630 }
0631 
0632 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
0633 {
0634     struct ceph_dir_file_info *dfi = file->private_data;
0635     struct inode *inode = file->f_mapping->host;
0636     loff_t retval;
0637 
0638     inode_lock(inode);
0639     retval = -EINVAL;
0640     switch (whence) {
0641     case SEEK_CUR:
0642         offset += file->f_pos;
0643         break;
0644     case SEEK_SET:
0645         break;
0646     case SEEK_END:
0647         retval = -EOPNOTSUPP;
0648         goto out;
0649     default:
0650         goto out;
0651     }
0652 
0653     if (offset >= 0) {
0654         if (need_reset_readdir(dfi, offset)) {
0655             dout("dir_llseek dropping %p content\n", file);
0656             reset_readdir(dfi);
0657         } else if (is_hash_order(offset) && offset > file->f_pos) {
0658             /* for hash offset, we don't know if a forward seek
0659              * is within same frag */
0660             dfi->dir_release_count = 0;
0661             dfi->readdir_cache_idx = -1;
0662         }
0663 
0664         if (offset != file->f_pos) {
0665             file->f_pos = offset;
0666             file->f_version = 0;
0667             dfi->file_info.flags &= ~CEPH_F_ATEND;
0668         }
0669         retval = offset;
0670     }
0671 out:
0672     inode_unlock(inode);
0673     return retval;
0674 }
0675 
0676 /*
0677  * Handle lookups for the hidden .snap directory.
0678  */
0679 struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
0680                    struct dentry *dentry)
0681 {
0682     struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
0683     struct inode *parent = d_inode(dentry->d_parent); /* we hold i_rwsem */
0684 
0685     /* .snap dir? */
0686     if (ceph_snap(parent) == CEPH_NOSNAP &&
0687         strcmp(dentry->d_name.name, fsc->mount_options->snapdir_name) == 0) {
0688         struct dentry *res;
0689         struct inode *inode = ceph_get_snapdir(parent);
0690 
0691         res = d_splice_alias(inode, dentry);
0692         dout("ENOENT on snapdir %p '%pd', linking to snapdir %p. Spliced dentry %p\n",
0693              dentry, dentry, inode, res);
0694         if (res)
0695             dentry = res;
0696     }
0697     return dentry;
0698 }
0699 
0700 /*
0701  * Figure out final result of a lookup/open request.
0702  *
0703  * Mainly, make sure we return the final req->r_dentry (if it already
0704  * existed) in place of the original VFS-provided dentry when they
0705  * differ.
0706  *
0707  * Gracefully handle the case where the MDS replies with -ENOENT and
0708  * no trace (which it may do, at its discretion, e.g., if it doesn't
0709  * care to issue a lease on the negative dentry).
0710  */
0711 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
0712                   struct dentry *dentry, int err)
0713 {
0714     if (err == -ENOENT) {
0715         /* no trace? */
0716         err = 0;
0717         if (!req->r_reply_info.head->is_dentry) {
0718             dout("ENOENT and no trace, dentry %p inode %p\n",
0719                  dentry, d_inode(dentry));
0720             if (d_really_is_positive(dentry)) {
0721                 d_drop(dentry);
0722                 err = -ENOENT;
0723             } else {
0724                 d_add(dentry, NULL);
0725             }
0726         }
0727     }
0728     if (err)
0729         dentry = ERR_PTR(err);
0730     else if (dentry != req->r_dentry)
0731         dentry = dget(req->r_dentry);   /* we got spliced */
0732     else
0733         dentry = NULL;
0734     return dentry;
0735 }
0736 
0737 static bool is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
0738 {
0739     return ceph_ino(inode) == CEPH_INO_ROOT &&
0740         strncmp(dentry->d_name.name, ".ceph", 5) == 0;
0741 }
0742 
0743 /*
0744  * Look up a single dir entry.  If there is a lookup intent, inform
0745  * the MDS so that it gets our 'caps wanted' value in a single op.
0746  */
0747 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
0748                   unsigned int flags)
0749 {
0750     struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
0751     struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
0752     struct ceph_mds_request *req;
0753     int op;
0754     int mask;
0755     int err;
0756 
0757     dout("lookup %p dentry %p '%pd'\n",
0758          dir, dentry, dentry);
0759 
0760     if (dentry->d_name.len > NAME_MAX)
0761         return ERR_PTR(-ENAMETOOLONG);
0762 
0763     /* can we conclude ENOENT locally? */
0764     if (d_really_is_negative(dentry)) {
0765         struct ceph_inode_info *ci = ceph_inode(dir);
0766         struct ceph_dentry_info *di = ceph_dentry(dentry);
0767 
0768         spin_lock(&ci->i_ceph_lock);
0769         dout(" dir %p flags are 0x%lx\n", dir, ci->i_ceph_flags);
0770         if (strncmp(dentry->d_name.name,
0771                 fsc->mount_options->snapdir_name,
0772                 dentry->d_name.len) &&
0773             !is_root_ceph_dentry(dir, dentry) &&
0774             ceph_test_mount_opt(fsc, DCACHE) &&
0775             __ceph_dir_is_complete(ci) &&
0776             __ceph_caps_issued_mask_metric(ci, CEPH_CAP_FILE_SHARED, 1)) {
0777             __ceph_touch_fmode(ci, mdsc, CEPH_FILE_MODE_RD);
0778             spin_unlock(&ci->i_ceph_lock);
0779             dout(" dir %p complete, -ENOENT\n", dir);
0780             d_add(dentry, NULL);
0781             di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
0782             return NULL;
0783         }
0784         spin_unlock(&ci->i_ceph_lock);
0785     }
0786 
0787     op = ceph_snap(dir) == CEPH_SNAPDIR ?
0788         CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
0789     req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
0790     if (IS_ERR(req))
0791         return ERR_CAST(req);
0792     req->r_dentry = dget(dentry);
0793     req->r_num_caps = 2;
0794 
0795     mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
0796     if (ceph_security_xattr_wanted(dir))
0797         mask |= CEPH_CAP_XATTR_SHARED;
0798     req->r_args.getattr.mask = cpu_to_le32(mask);
0799 
0800     ihold(dir);
0801     req->r_parent = dir;
0802     set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
0803     err = ceph_mdsc_do_request(mdsc, NULL, req);
0804     if (err == -ENOENT) {
0805         struct dentry *res;
0806 
0807         res = ceph_handle_snapdir(req, dentry);
0808         if (IS_ERR(res)) {
0809             err = PTR_ERR(res);
0810         } else {
0811             dentry = res;
0812             err = 0;
0813         }
0814     }
0815     dentry = ceph_finish_lookup(req, dentry, err);
0816     ceph_mdsc_put_request(req);  /* will dput(dentry) */
0817     dout("lookup result=%p\n", dentry);
0818     return dentry;
0819 }
0820 
0821 /*
0822  * If we do a create but get no trace back from the MDS, follow up with
0823  * a lookup (the VFS expects us to link up the provided dentry).
0824  */
0825 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
0826 {
0827     struct dentry *result = ceph_lookup(dir, dentry, 0);
0828 
0829     if (result && !IS_ERR(result)) {
0830         /*
0831          * We created the item, then did a lookup, and found
0832          * it was already linked to another inode we already
0833          * had in our cache (and thus got spliced). To not
0834          * confuse VFS (especially when inode is a directory),
0835          * we don't link our dentry to that inode, return an
0836          * error instead.
0837          *
0838          * This event should be rare and it happens only when
0839          * we talk to old MDS. Recent MDS does not send traceless
0840          * reply for request that creates new inode.
0841          */
0842         d_drop(result);
0843         return -ESTALE;
0844     }
0845     return PTR_ERR(result);
0846 }
0847 
0848 static int ceph_mknod(struct user_namespace *mnt_userns, struct inode *dir,
0849               struct dentry *dentry, umode_t mode, dev_t rdev)
0850 {
0851     struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
0852     struct ceph_mds_request *req;
0853     struct ceph_acl_sec_ctx as_ctx = {};
0854     int err;
0855 
0856     if (ceph_snap(dir) != CEPH_NOSNAP)
0857         return -EROFS;
0858 
0859     err = ceph_wait_on_conflict_unlink(dentry);
0860     if (err)
0861         return err;
0862 
0863     if (ceph_quota_is_max_files_exceeded(dir)) {
0864         err = -EDQUOT;
0865         goto out;
0866     }
0867 
0868     err = ceph_pre_init_acls(dir, &mode, &as_ctx);
0869     if (err < 0)
0870         goto out;
0871     err = ceph_security_init_secctx(dentry, mode, &as_ctx);
0872     if (err < 0)
0873         goto out;
0874 
0875     dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
0876          dir, dentry, mode, rdev);
0877     req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
0878     if (IS_ERR(req)) {
0879         err = PTR_ERR(req);
0880         goto out;
0881     }
0882     req->r_dentry = dget(dentry);
0883     req->r_num_caps = 2;
0884     req->r_parent = dir;
0885     ihold(dir);
0886     set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
0887     req->r_args.mknod.mode = cpu_to_le32(mode);
0888     req->r_args.mknod.rdev = cpu_to_le32(rdev);
0889     req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
0890     req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
0891     if (as_ctx.pagelist) {
0892         req->r_pagelist = as_ctx.pagelist;
0893         as_ctx.pagelist = NULL;
0894     }
0895     err = ceph_mdsc_do_request(mdsc, dir, req);
0896     if (!err && !req->r_reply_info.head->is_dentry)
0897         err = ceph_handle_notrace_create(dir, dentry);
0898     ceph_mdsc_put_request(req);
0899 out:
0900     if (!err)
0901         ceph_init_inode_acls(d_inode(dentry), &as_ctx);
0902     else
0903         d_drop(dentry);
0904     ceph_release_acl_sec_ctx(&as_ctx);
0905     return err;
0906 }
0907 
0908 static int ceph_create(struct user_namespace *mnt_userns, struct inode *dir,
0909                struct dentry *dentry, umode_t mode, bool excl)
0910 {
0911     return ceph_mknod(mnt_userns, dir, dentry, mode, 0);
0912 }
0913 
0914 static int ceph_symlink(struct user_namespace *mnt_userns, struct inode *dir,
0915             struct dentry *dentry, const char *dest)
0916 {
0917     struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
0918     struct ceph_mds_request *req;
0919     struct ceph_acl_sec_ctx as_ctx = {};
0920     int err;
0921 
0922     if (ceph_snap(dir) != CEPH_NOSNAP)
0923         return -EROFS;
0924 
0925     err = ceph_wait_on_conflict_unlink(dentry);
0926     if (err)
0927         return err;
0928 
0929     if (ceph_quota_is_max_files_exceeded(dir)) {
0930         err = -EDQUOT;
0931         goto out;
0932     }
0933 
0934     err = ceph_security_init_secctx(dentry, S_IFLNK | 0777, &as_ctx);
0935     if (err < 0)
0936         goto out;
0937 
0938     dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
0939     req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
0940     if (IS_ERR(req)) {
0941         err = PTR_ERR(req);
0942         goto out;
0943     }
0944     req->r_path2 = kstrdup(dest, GFP_KERNEL);
0945     if (!req->r_path2) {
0946         err = -ENOMEM;
0947         ceph_mdsc_put_request(req);
0948         goto out;
0949     }
0950     req->r_parent = dir;
0951     ihold(dir);
0952 
0953     set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
0954     req->r_dentry = dget(dentry);
0955     req->r_num_caps = 2;
0956     req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
0957     req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
0958     if (as_ctx.pagelist) {
0959         req->r_pagelist = as_ctx.pagelist;
0960         as_ctx.pagelist = NULL;
0961     }
0962     err = ceph_mdsc_do_request(mdsc, dir, req);
0963     if (!err && !req->r_reply_info.head->is_dentry)
0964         err = ceph_handle_notrace_create(dir, dentry);
0965     ceph_mdsc_put_request(req);
0966 out:
0967     if (err)
0968         d_drop(dentry);
0969     ceph_release_acl_sec_ctx(&as_ctx);
0970     return err;
0971 }
0972 
0973 static int ceph_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
0974               struct dentry *dentry, umode_t mode)
0975 {
0976     struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
0977     struct ceph_mds_request *req;
0978     struct ceph_acl_sec_ctx as_ctx = {};
0979     int err;
0980     int op;
0981 
0982     err = ceph_wait_on_conflict_unlink(dentry);
0983     if (err)
0984         return err;
0985 
0986     if (ceph_snap(dir) == CEPH_SNAPDIR) {
0987         /* mkdir .snap/foo is a MKSNAP */
0988         op = CEPH_MDS_OP_MKSNAP;
0989         dout("mksnap dir %p snap '%pd' dn %p\n", dir,
0990              dentry, dentry);
0991     } else if (ceph_snap(dir) == CEPH_NOSNAP) {
0992         dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
0993         op = CEPH_MDS_OP_MKDIR;
0994     } else {
0995         err = -EROFS;
0996         goto out;
0997     }
0998 
0999     if (op == CEPH_MDS_OP_MKDIR &&
1000         ceph_quota_is_max_files_exceeded(dir)) {
1001         err = -EDQUOT;
1002         goto out;
1003     }
1004 
1005     mode |= S_IFDIR;
1006     err = ceph_pre_init_acls(dir, &mode, &as_ctx);
1007     if (err < 0)
1008         goto out;
1009     err = ceph_security_init_secctx(dentry, mode, &as_ctx);
1010     if (err < 0)
1011         goto out;
1012 
1013     req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
1014     if (IS_ERR(req)) {
1015         err = PTR_ERR(req);
1016         goto out;
1017     }
1018 
1019     req->r_dentry = dget(dentry);
1020     req->r_num_caps = 2;
1021     req->r_parent = dir;
1022     ihold(dir);
1023     set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
1024     req->r_args.mkdir.mode = cpu_to_le32(mode);
1025     req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
1026     req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
1027     if (as_ctx.pagelist) {
1028         req->r_pagelist = as_ctx.pagelist;
1029         as_ctx.pagelist = NULL;
1030     }
1031     err = ceph_mdsc_do_request(mdsc, dir, req);
1032     if (!err &&
1033         !req->r_reply_info.head->is_target &&
1034         !req->r_reply_info.head->is_dentry)
1035         err = ceph_handle_notrace_create(dir, dentry);
1036     ceph_mdsc_put_request(req);
1037 out:
1038     if (!err)
1039         ceph_init_inode_acls(d_inode(dentry), &as_ctx);
1040     else
1041         d_drop(dentry);
1042     ceph_release_acl_sec_ctx(&as_ctx);
1043     return err;
1044 }
1045 
1046 static int ceph_link(struct dentry *old_dentry, struct inode *dir,
1047              struct dentry *dentry)
1048 {
1049     struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
1050     struct ceph_mds_request *req;
1051     int err;
1052 
1053     err = ceph_wait_on_conflict_unlink(dentry);
1054     if (err)
1055         return err;
1056 
1057     if (ceph_snap(dir) != CEPH_NOSNAP)
1058         return -EROFS;
1059 
1060     dout("link in dir %p old_dentry %p dentry %p\n", dir,
1061          old_dentry, dentry);
1062     req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
1063     if (IS_ERR(req)) {
1064         d_drop(dentry);
1065         return PTR_ERR(req);
1066     }
1067     req->r_dentry = dget(dentry);
1068     req->r_num_caps = 2;
1069     req->r_old_dentry = dget(old_dentry);
1070     req->r_parent = dir;
1071     ihold(dir);
1072     set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
1073     req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
1074     req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
1075     /* release LINK_SHARED on source inode (mds will lock it) */
1076     req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
1077     err = ceph_mdsc_do_request(mdsc, dir, req);
1078     if (err) {
1079         d_drop(dentry);
1080     } else if (!req->r_reply_info.head->is_dentry) {
1081         ihold(d_inode(old_dentry));
1082         d_instantiate(dentry, d_inode(old_dentry));
1083     }
1084     ceph_mdsc_put_request(req);
1085     return err;
1086 }
1087 
1088 static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
1089                  struct ceph_mds_request *req)
1090 {
1091     struct dentry *dentry = req->r_dentry;
1092     struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
1093     struct ceph_dentry_info *di = ceph_dentry(dentry);
1094     int result = req->r_err ? req->r_err :
1095             le32_to_cpu(req->r_reply_info.head->result);
1096 
1097     if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags))
1098         pr_warn("%s dentry %p:%pd async unlink bit is not set\n",
1099             __func__, dentry, dentry);
1100 
1101     spin_lock(&fsc->async_unlink_conflict_lock);
1102     hash_del_rcu(&di->hnode);
1103     spin_unlock(&fsc->async_unlink_conflict_lock);
1104 
1105     spin_lock(&dentry->d_lock);
1106     di->flags &= ~CEPH_DENTRY_ASYNC_UNLINK;
1107     wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_UNLINK_BIT);
1108     spin_unlock(&dentry->d_lock);
1109 
1110     synchronize_rcu();
1111 
1112     if (result == -EJUKEBOX)
1113         goto out;
1114 
1115     /* If op failed, mark everyone involved for errors */
1116     if (result) {
1117         int pathlen = 0;
1118         u64 base = 0;
1119         char *path = ceph_mdsc_build_path(dentry, &pathlen,
1120                           &base, 0);
1121 
1122         /* mark error on parent + clear complete */
1123         mapping_set_error(req->r_parent->i_mapping, result);
1124         ceph_dir_clear_complete(req->r_parent);
1125 
1126         /* drop the dentry -- we don't know its status */
1127         if (!d_unhashed(dentry))
1128             d_drop(dentry);
1129 
1130         /* mark inode itself for an error (since metadata is bogus) */
1131         mapping_set_error(req->r_old_inode->i_mapping, result);
1132 
1133         pr_warn("async unlink failure path=(%llx)%s result=%d!\n",
1134             base, IS_ERR(path) ? "<<bad>>" : path, result);
1135         ceph_mdsc_free_path(path, pathlen);
1136     }
1137 out:
1138     iput(req->r_old_inode);
1139     ceph_mdsc_release_dir_caps(req);
1140 }
1141 
1142 static int get_caps_for_async_unlink(struct inode *dir, struct dentry *dentry)
1143 {
1144     struct ceph_inode_info *ci = ceph_inode(dir);
1145     struct ceph_dentry_info *di;
1146     int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_UNLINK;
1147 
1148     spin_lock(&ci->i_ceph_lock);
1149     if ((__ceph_caps_issued(ci, NULL) & want) == want) {
1150         ceph_take_cap_refs(ci, want, false);
1151         got = want;
1152     }
1153     spin_unlock(&ci->i_ceph_lock);
1154 
1155     /* If we didn't get anything, return 0 */
1156     if (!got)
1157         return 0;
1158 
1159         spin_lock(&dentry->d_lock);
1160         di = ceph_dentry(dentry);
1161     /*
1162      * - We are holding Fx, which implies Fs caps.
1163      * - Only support async unlink for primary linkage
1164      */
1165     if (atomic_read(&ci->i_shared_gen) != di->lease_shared_gen ||
1166         !(di->flags & CEPH_DENTRY_PRIMARY_LINK))
1167         want = 0;
1168         spin_unlock(&dentry->d_lock);
1169 
1170     /* Do we still want what we've got? */
1171     if (want == got)
1172         return got;
1173 
1174     ceph_put_cap_refs(ci, got);
1175     return 0;
1176 }
1177 
1178 /*
1179  * rmdir and unlink are differ only by the metadata op code
1180  */
1181 static int ceph_unlink(struct inode *dir, struct dentry *dentry)
1182 {
1183     struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
1184     struct ceph_mds_client *mdsc = fsc->mdsc;
1185     struct inode *inode = d_inode(dentry);
1186     struct ceph_mds_request *req;
1187     bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
1188     int err = -EROFS;
1189     int op;
1190 
1191     if (ceph_snap(dir) == CEPH_SNAPDIR) {
1192         /* rmdir .snap/foo is RMSNAP */
1193         dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry);
1194         op = CEPH_MDS_OP_RMSNAP;
1195     } else if (ceph_snap(dir) == CEPH_NOSNAP) {
1196         dout("unlink/rmdir dir %p dn %p inode %p\n",
1197              dir, dentry, inode);
1198         op = d_is_dir(dentry) ?
1199             CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
1200     } else
1201         goto out;
1202 retry:
1203     req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
1204     if (IS_ERR(req)) {
1205         err = PTR_ERR(req);
1206         goto out;
1207     }
1208     req->r_dentry = dget(dentry);
1209     req->r_num_caps = 2;
1210     req->r_parent = dir;
1211     ihold(dir);
1212     req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
1213     req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
1214     req->r_inode_drop = ceph_drop_caps_for_unlink(inode);
1215 
1216     if (try_async && op == CEPH_MDS_OP_UNLINK &&
1217         (req->r_dir_caps = get_caps_for_async_unlink(dir, dentry))) {
1218         struct ceph_dentry_info *di = ceph_dentry(dentry);
1219 
1220         dout("async unlink on %llu/%.*s caps=%s", ceph_ino(dir),
1221              dentry->d_name.len, dentry->d_name.name,
1222              ceph_cap_string(req->r_dir_caps));
1223         set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
1224         req->r_callback = ceph_async_unlink_cb;
1225         req->r_old_inode = d_inode(dentry);
1226         ihold(req->r_old_inode);
1227 
1228         spin_lock(&dentry->d_lock);
1229         di->flags |= CEPH_DENTRY_ASYNC_UNLINK;
1230         spin_unlock(&dentry->d_lock);
1231 
1232         spin_lock(&fsc->async_unlink_conflict_lock);
1233         hash_add_rcu(fsc->async_unlink_conflict, &di->hnode,
1234                  dentry->d_name.hash);
1235         spin_unlock(&fsc->async_unlink_conflict_lock);
1236 
1237         err = ceph_mdsc_submit_request(mdsc, dir, req);
1238         if (!err) {
1239             /*
1240              * We have enough caps, so we assume that the unlink
1241              * will succeed. Fix up the target inode and dcache.
1242              */
1243             drop_nlink(inode);
1244             d_delete(dentry);
1245         } else {
1246             spin_lock(&fsc->async_unlink_conflict_lock);
1247             hash_del_rcu(&di->hnode);
1248             spin_unlock(&fsc->async_unlink_conflict_lock);
1249 
1250             spin_lock(&dentry->d_lock);
1251             di->flags &= ~CEPH_DENTRY_ASYNC_UNLINK;
1252             spin_unlock(&dentry->d_lock);
1253 
1254             if (err == -EJUKEBOX) {
1255                 try_async = false;
1256                 ceph_mdsc_put_request(req);
1257                 goto retry;
1258             }
1259         }
1260     } else {
1261         set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
1262         err = ceph_mdsc_do_request(mdsc, dir, req);
1263         if (!err && !req->r_reply_info.head->is_dentry)
1264             d_delete(dentry);
1265     }
1266 
1267     ceph_mdsc_put_request(req);
1268 out:
1269     return err;
1270 }
1271 
1272 static int ceph_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
1273                struct dentry *old_dentry, struct inode *new_dir,
1274                struct dentry *new_dentry, unsigned int flags)
1275 {
1276     struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(old_dir->i_sb);
1277     struct ceph_mds_request *req;
1278     int op = CEPH_MDS_OP_RENAME;
1279     int err;
1280 
1281     if (flags)
1282         return -EINVAL;
1283 
1284     if (ceph_snap(old_dir) != ceph_snap(new_dir))
1285         return -EXDEV;
1286     if (ceph_snap(old_dir) != CEPH_NOSNAP) {
1287         if (old_dir == new_dir && ceph_snap(old_dir) == CEPH_SNAPDIR)
1288             op = CEPH_MDS_OP_RENAMESNAP;
1289         else
1290             return -EROFS;
1291     }
1292     /* don't allow cross-quota renames */
1293     if ((old_dir != new_dir) &&
1294         (!ceph_quota_is_same_realm(old_dir, new_dir)))
1295         return -EXDEV;
1296 
1297     err = ceph_wait_on_conflict_unlink(new_dentry);
1298     if (err)
1299         return err;
1300 
1301     dout("rename dir %p dentry %p to dir %p dentry %p\n",
1302          old_dir, old_dentry, new_dir, new_dentry);
1303     req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
1304     if (IS_ERR(req))
1305         return PTR_ERR(req);
1306     ihold(old_dir);
1307     req->r_dentry = dget(new_dentry);
1308     req->r_num_caps = 2;
1309     req->r_old_dentry = dget(old_dentry);
1310     req->r_old_dentry_dir = old_dir;
1311     req->r_parent = new_dir;
1312     ihold(new_dir);
1313     set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
1314     req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
1315     req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
1316     req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
1317     req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
1318     /* release LINK_RDCACHE on source inode (mds will lock it) */
1319     req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
1320     if (d_really_is_positive(new_dentry)) {
1321         req->r_inode_drop =
1322             ceph_drop_caps_for_unlink(d_inode(new_dentry));
1323     }
1324     err = ceph_mdsc_do_request(mdsc, old_dir, req);
1325     if (!err && !req->r_reply_info.head->is_dentry) {
1326         /*
1327          * Normally d_move() is done by fill_trace (called by
1328          * do_request, above).  If there is no trace, we need
1329          * to do it here.
1330          */
1331         d_move(old_dentry, new_dentry);
1332     }
1333     ceph_mdsc_put_request(req);
1334     return err;
1335 }
1336 
1337 /*
1338  * Move dentry to tail of mdsc->dentry_leases list when lease is updated.
1339  * Leases at front of the list will expire first. (Assume all leases have
1340  * similar duration)
1341  *
1342  * Called under dentry->d_lock.
1343  */
1344 void __ceph_dentry_lease_touch(struct ceph_dentry_info *di)
1345 {
1346     struct dentry *dn = di->dentry;
1347     struct ceph_mds_client *mdsc;
1348 
1349     dout("dentry_lease_touch %p %p '%pd'\n", di, dn, dn);
1350 
1351     di->flags |= CEPH_DENTRY_LEASE_LIST;
1352     if (di->flags & CEPH_DENTRY_SHRINK_LIST) {
1353         di->flags |= CEPH_DENTRY_REFERENCED;
1354         return;
1355     }
1356 
1357     mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1358     spin_lock(&mdsc->dentry_list_lock);
1359     list_move_tail(&di->lease_list, &mdsc->dentry_leases);
1360     spin_unlock(&mdsc->dentry_list_lock);
1361 }
1362 
1363 static void __dentry_dir_lease_touch(struct ceph_mds_client* mdsc,
1364                      struct ceph_dentry_info *di)
1365 {
1366     di->flags &= ~(CEPH_DENTRY_LEASE_LIST | CEPH_DENTRY_REFERENCED);
1367     di->lease_gen = 0;
1368     di->time = jiffies;
1369     list_move_tail(&di->lease_list, &mdsc->dentry_dir_leases);
1370 }
1371 
1372 /*
1373  * When dir lease is used, add dentry to tail of mdsc->dentry_dir_leases
1374  * list if it's not in the list, otherwise set 'referenced' flag.
1375  *
1376  * Called under dentry->d_lock.
1377  */
1378 void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di)
1379 {
1380     struct dentry *dn = di->dentry;
1381     struct ceph_mds_client *mdsc;
1382 
1383     dout("dentry_dir_lease_touch %p %p '%pd' (offset 0x%llx)\n",
1384          di, dn, dn, di->offset);
1385 
1386     if (!list_empty(&di->lease_list)) {
1387         if (di->flags & CEPH_DENTRY_LEASE_LIST) {
1388             /* don't remove dentry from dentry lease list
1389              * if its lease is valid */
1390             if (__dentry_lease_is_valid(di))
1391                 return;
1392         } else {
1393             di->flags |= CEPH_DENTRY_REFERENCED;
1394             return;
1395         }
1396     }
1397 
1398     if (di->flags & CEPH_DENTRY_SHRINK_LIST) {
1399         di->flags |= CEPH_DENTRY_REFERENCED;
1400         di->flags &= ~CEPH_DENTRY_LEASE_LIST;
1401         return;
1402     }
1403 
1404     mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1405     spin_lock(&mdsc->dentry_list_lock);
1406     __dentry_dir_lease_touch(mdsc, di),
1407     spin_unlock(&mdsc->dentry_list_lock);
1408 }
1409 
1410 static void __dentry_lease_unlist(struct ceph_dentry_info *di)
1411 {
1412     struct ceph_mds_client *mdsc;
1413     if (di->flags & CEPH_DENTRY_SHRINK_LIST)
1414         return;
1415     if (list_empty(&di->lease_list))
1416         return;
1417 
1418     mdsc = ceph_sb_to_client(di->dentry->d_sb)->mdsc;
1419     spin_lock(&mdsc->dentry_list_lock);
1420     list_del_init(&di->lease_list);
1421     spin_unlock(&mdsc->dentry_list_lock);
1422 }
1423 
1424 enum {
1425     KEEP    = 0,
1426     DELETE  = 1,
1427     TOUCH   = 2,
1428     STOP    = 4,
1429 };
1430 
1431 struct ceph_lease_walk_control {
1432     bool dir_lease;
1433     bool expire_dir_lease;
1434     unsigned long nr_to_scan;
1435     unsigned long dir_lease_ttl;
1436 };
1437 
1438 static unsigned long
1439 __dentry_leases_walk(struct ceph_mds_client *mdsc,
1440              struct ceph_lease_walk_control *lwc,
1441              int (*check)(struct dentry*, void*))
1442 {
1443     struct ceph_dentry_info *di, *tmp;
1444     struct dentry *dentry, *last = NULL;
1445     struct list_head* list;
1446         LIST_HEAD(dispose);
1447     unsigned long freed = 0;
1448     int ret = 0;
1449 
1450     list = lwc->dir_lease ? &mdsc->dentry_dir_leases : &mdsc->dentry_leases;
1451     spin_lock(&mdsc->dentry_list_lock);
1452     list_for_each_entry_safe(di, tmp, list, lease_list) {
1453         if (!lwc->nr_to_scan)
1454             break;
1455         --lwc->nr_to_scan;
1456 
1457         dentry = di->dentry;
1458         if (last == dentry)
1459             break;
1460 
1461         if (!spin_trylock(&dentry->d_lock))
1462             continue;
1463 
1464         if (__lockref_is_dead(&dentry->d_lockref)) {
1465             list_del_init(&di->lease_list);
1466             goto next;
1467         }
1468 
1469         ret = check(dentry, lwc);
1470         if (ret & TOUCH) {
1471             /* move it into tail of dir lease list */
1472             __dentry_dir_lease_touch(mdsc, di);
1473             if (!last)
1474                 last = dentry;
1475         }
1476         if (ret & DELETE) {
1477             /* stale lease */
1478             di->flags &= ~CEPH_DENTRY_REFERENCED;
1479             if (dentry->d_lockref.count > 0) {
1480                 /* update_dentry_lease() will re-add
1481                  * it to lease list, or
1482                  * ceph_d_delete() will return 1 when
1483                  * last reference is dropped */
1484                 list_del_init(&di->lease_list);
1485             } else {
1486                 di->flags |= CEPH_DENTRY_SHRINK_LIST;
1487                 list_move_tail(&di->lease_list, &dispose);
1488                 dget_dlock(dentry);
1489             }
1490         }
1491 next:
1492         spin_unlock(&dentry->d_lock);
1493         if (ret & STOP)
1494             break;
1495     }
1496     spin_unlock(&mdsc->dentry_list_lock);
1497 
1498     while (!list_empty(&dispose)) {
1499         di = list_first_entry(&dispose, struct ceph_dentry_info,
1500                       lease_list);
1501         dentry = di->dentry;
1502         spin_lock(&dentry->d_lock);
1503 
1504         list_del_init(&di->lease_list);
1505         di->flags &= ~CEPH_DENTRY_SHRINK_LIST;
1506         if (di->flags & CEPH_DENTRY_REFERENCED) {
1507             spin_lock(&mdsc->dentry_list_lock);
1508             if (di->flags & CEPH_DENTRY_LEASE_LIST) {
1509                 list_add_tail(&di->lease_list,
1510                           &mdsc->dentry_leases);
1511             } else {
1512                 __dentry_dir_lease_touch(mdsc, di);
1513             }
1514             spin_unlock(&mdsc->dentry_list_lock);
1515         } else {
1516             freed++;
1517         }
1518 
1519         spin_unlock(&dentry->d_lock);
1520         /* ceph_d_delete() does the trick */
1521         dput(dentry);
1522     }
1523     return freed;
1524 }
1525 
1526 static int __dentry_lease_check(struct dentry *dentry, void *arg)
1527 {
1528     struct ceph_dentry_info *di = ceph_dentry(dentry);
1529     int ret;
1530 
1531     if (__dentry_lease_is_valid(di))
1532         return STOP;
1533     ret = __dir_lease_try_check(dentry);
1534     if (ret == -EBUSY)
1535         return KEEP;
1536     if (ret > 0)
1537         return TOUCH;
1538     return DELETE;
1539 }
1540 
1541 static int __dir_lease_check(struct dentry *dentry, void *arg)
1542 {
1543     struct ceph_lease_walk_control *lwc = arg;
1544     struct ceph_dentry_info *di = ceph_dentry(dentry);
1545 
1546     int ret = __dir_lease_try_check(dentry);
1547     if (ret == -EBUSY)
1548         return KEEP;
1549     if (ret > 0) {
1550         if (time_before(jiffies, di->time + lwc->dir_lease_ttl))
1551             return STOP;
1552         /* Move dentry to tail of dir lease list if we don't want
1553          * to delete it. So dentries in the list are checked in a
1554          * round robin manner */
1555         if (!lwc->expire_dir_lease)
1556             return TOUCH;
1557         if (dentry->d_lockref.count > 0 ||
1558             (di->flags & CEPH_DENTRY_REFERENCED))
1559             return TOUCH;
1560         /* invalidate dir lease */
1561         di->lease_shared_gen = 0;
1562     }
1563     return DELETE;
1564 }
1565 
1566 int ceph_trim_dentries(struct ceph_mds_client *mdsc)
1567 {
1568     struct ceph_lease_walk_control lwc;
1569     unsigned long count;
1570     unsigned long freed;
1571 
1572     spin_lock(&mdsc->caps_list_lock);
1573         if (mdsc->caps_use_max > 0 &&
1574             mdsc->caps_use_count > mdsc->caps_use_max)
1575         count = mdsc->caps_use_count - mdsc->caps_use_max;
1576     else
1577         count = 0;
1578         spin_unlock(&mdsc->caps_list_lock);
1579 
1580     lwc.dir_lease = false;
1581     lwc.nr_to_scan  = CEPH_CAPS_PER_RELEASE * 2;
1582     freed = __dentry_leases_walk(mdsc, &lwc, __dentry_lease_check);
1583     if (!lwc.nr_to_scan) /* more invalid leases */
1584         return -EAGAIN;
1585 
1586     if (lwc.nr_to_scan < CEPH_CAPS_PER_RELEASE)
1587         lwc.nr_to_scan = CEPH_CAPS_PER_RELEASE;
1588 
1589     lwc.dir_lease = true;
1590     lwc.expire_dir_lease = freed < count;
1591     lwc.dir_lease_ttl = mdsc->fsc->mount_options->caps_wanted_delay_max * HZ;
1592     freed +=__dentry_leases_walk(mdsc, &lwc, __dir_lease_check);
1593     if (!lwc.nr_to_scan) /* more to check */
1594         return -EAGAIN;
1595 
1596     return freed > 0 ? 1 : 0;
1597 }
1598 
1599 /*
1600  * Ensure a dentry lease will no longer revalidate.
1601  */
1602 void ceph_invalidate_dentry_lease(struct dentry *dentry)
1603 {
1604     struct ceph_dentry_info *di = ceph_dentry(dentry);
1605     spin_lock(&dentry->d_lock);
1606     di->time = jiffies;
1607     di->lease_shared_gen = 0;
1608     di->flags &= ~CEPH_DENTRY_PRIMARY_LINK;
1609     __dentry_lease_unlist(di);
1610     spin_unlock(&dentry->d_lock);
1611 }
1612 
1613 /*
1614  * Check if dentry lease is valid.  If not, delete the lease.  Try to
1615  * renew if the least is more than half up.
1616  */
1617 static bool __dentry_lease_is_valid(struct ceph_dentry_info *di)
1618 {
1619     struct ceph_mds_session *session;
1620 
1621     if (!di->lease_gen)
1622         return false;
1623 
1624     session = di->lease_session;
1625     if (session) {
1626         u32 gen;
1627         unsigned long ttl;
1628 
1629         gen = atomic_read(&session->s_cap_gen);
1630         ttl = session->s_cap_ttl;
1631 
1632         if (di->lease_gen == gen &&
1633             time_before(jiffies, ttl) &&
1634             time_before(jiffies, di->time))
1635             return true;
1636     }
1637     di->lease_gen = 0;
1638     return false;
1639 }
1640 
1641 static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags)
1642 {
1643     struct ceph_dentry_info *di;
1644     struct ceph_mds_session *session = NULL;
1645     u32 seq = 0;
1646     int valid = 0;
1647 
1648     spin_lock(&dentry->d_lock);
1649     di = ceph_dentry(dentry);
1650     if (di && __dentry_lease_is_valid(di)) {
1651         valid = 1;
1652 
1653         if (di->lease_renew_after &&
1654             time_after(jiffies, di->lease_renew_after)) {
1655             /*
1656              * We should renew. If we're in RCU walk mode
1657              * though, we can't do that so just return
1658              * -ECHILD.
1659              */
1660             if (flags & LOOKUP_RCU) {
1661                 valid = -ECHILD;
1662             } else {
1663                 session = ceph_get_mds_session(di->lease_session);
1664                 seq = di->lease_seq;
1665                 di->lease_renew_after = 0;
1666                 di->lease_renew_from = jiffies;
1667             }
1668         }
1669     }
1670     spin_unlock(&dentry->d_lock);
1671 
1672     if (session) {
1673         ceph_mdsc_lease_send_msg(session, dentry,
1674                      CEPH_MDS_LEASE_RENEW, seq);
1675         ceph_put_mds_session(session);
1676     }
1677     dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
1678     return valid;
1679 }
1680 
1681 /*
1682  * Called under dentry->d_lock.
1683  */
1684 static int __dir_lease_try_check(const struct dentry *dentry)
1685 {
1686     struct ceph_dentry_info *di = ceph_dentry(dentry);
1687     struct inode *dir;
1688     struct ceph_inode_info *ci;
1689     int valid = 0;
1690 
1691     if (!di->lease_shared_gen)
1692         return 0;
1693     if (IS_ROOT(dentry))
1694         return 0;
1695 
1696     dir = d_inode(dentry->d_parent);
1697     ci = ceph_inode(dir);
1698 
1699     if (spin_trylock(&ci->i_ceph_lock)) {
1700         if (atomic_read(&ci->i_shared_gen) == di->lease_shared_gen &&
1701             __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 0))
1702             valid = 1;
1703         spin_unlock(&ci->i_ceph_lock);
1704     } else {
1705         valid = -EBUSY;
1706     }
1707 
1708     if (!valid)
1709         di->lease_shared_gen = 0;
1710     return valid;
1711 }
1712 
1713 /*
1714  * Check if directory-wide content lease/cap is valid.
1715  */
1716 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry,
1717                   struct ceph_mds_client *mdsc)
1718 {
1719     struct ceph_inode_info *ci = ceph_inode(dir);
1720     int valid;
1721     int shared_gen;
1722 
1723     spin_lock(&ci->i_ceph_lock);
1724     valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
1725     if (valid) {
1726         __ceph_touch_fmode(ci, mdsc, CEPH_FILE_MODE_RD);
1727         shared_gen = atomic_read(&ci->i_shared_gen);
1728     }
1729     spin_unlock(&ci->i_ceph_lock);
1730     if (valid) {
1731         struct ceph_dentry_info *di;
1732         spin_lock(&dentry->d_lock);
1733         di = ceph_dentry(dentry);
1734         if (dir == d_inode(dentry->d_parent) &&
1735             di && di->lease_shared_gen == shared_gen)
1736             __ceph_dentry_dir_lease_touch(di);
1737         else
1738             valid = 0;
1739         spin_unlock(&dentry->d_lock);
1740     }
1741     dout("dir_lease_is_valid dir %p v%u dentry %p = %d\n",
1742          dir, (unsigned)atomic_read(&ci->i_shared_gen), dentry, valid);
1743     return valid;
1744 }
1745 
1746 /*
1747  * Check if cached dentry can be trusted.
1748  */
1749 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
1750 {
1751     int valid = 0;
1752     struct dentry *parent;
1753     struct inode *dir, *inode;
1754     struct ceph_mds_client *mdsc;
1755 
1756     if (flags & LOOKUP_RCU) {
1757         parent = READ_ONCE(dentry->d_parent);
1758         dir = d_inode_rcu(parent);
1759         if (!dir)
1760             return -ECHILD;
1761         inode = d_inode_rcu(dentry);
1762     } else {
1763         parent = dget_parent(dentry);
1764         dir = d_inode(parent);
1765         inode = d_inode(dentry);
1766     }
1767 
1768     dout("d_revalidate %p '%pd' inode %p offset 0x%llx\n", dentry,
1769          dentry, inode, ceph_dentry(dentry)->offset);
1770 
1771     mdsc = ceph_sb_to_client(dir->i_sb)->mdsc;
1772 
1773     /* always trust cached snapped dentries, snapdir dentry */
1774     if (ceph_snap(dir) != CEPH_NOSNAP) {
1775         dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
1776              dentry, inode);
1777         valid = 1;
1778     } else if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
1779         valid = 1;
1780     } else {
1781         valid = dentry_lease_is_valid(dentry, flags);
1782         if (valid == -ECHILD)
1783             return valid;
1784         if (valid || dir_lease_is_valid(dir, dentry, mdsc)) {
1785             if (inode)
1786                 valid = ceph_is_any_caps(inode);
1787             else
1788                 valid = 1;
1789         }
1790     }
1791 
1792     if (!valid) {
1793         struct ceph_mds_request *req;
1794         int op, err;
1795         u32 mask;
1796 
1797         if (flags & LOOKUP_RCU)
1798             return -ECHILD;
1799 
1800         percpu_counter_inc(&mdsc->metric.d_lease_mis);
1801 
1802         op = ceph_snap(dir) == CEPH_SNAPDIR ?
1803             CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
1804         req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
1805         if (!IS_ERR(req)) {
1806             req->r_dentry = dget(dentry);
1807             req->r_num_caps = 2;
1808             req->r_parent = dir;
1809             ihold(dir);
1810 
1811             mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
1812             if (ceph_security_xattr_wanted(dir))
1813                 mask |= CEPH_CAP_XATTR_SHARED;
1814             req->r_args.getattr.mask = cpu_to_le32(mask);
1815 
1816             err = ceph_mdsc_do_request(mdsc, NULL, req);
1817             switch (err) {
1818             case 0:
1819                 if (d_really_is_positive(dentry) &&
1820                     d_inode(dentry) == req->r_target_inode)
1821                     valid = 1;
1822                 break;
1823             case -ENOENT:
1824                 if (d_really_is_negative(dentry))
1825                     valid = 1;
1826                 fallthrough;
1827             default:
1828                 break;
1829             }
1830             ceph_mdsc_put_request(req);
1831             dout("d_revalidate %p lookup result=%d\n",
1832                  dentry, err);
1833         }
1834     } else {
1835         percpu_counter_inc(&mdsc->metric.d_lease_hit);
1836     }
1837 
1838     dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
1839     if (!valid)
1840         ceph_dir_clear_complete(dir);
1841 
1842     if (!(flags & LOOKUP_RCU))
1843         dput(parent);
1844     return valid;
1845 }
1846 
1847 /*
1848  * Delete unused dentry that doesn't have valid lease
1849  *
1850  * Called under dentry->d_lock.
1851  */
1852 static int ceph_d_delete(const struct dentry *dentry)
1853 {
1854     struct ceph_dentry_info *di;
1855 
1856     /* won't release caps */
1857     if (d_really_is_negative(dentry))
1858         return 0;
1859     if (ceph_snap(d_inode(dentry)) != CEPH_NOSNAP)
1860         return 0;
1861     /* vaild lease? */
1862     di = ceph_dentry(dentry);
1863     if (di) {
1864         if (__dentry_lease_is_valid(di))
1865             return 0;
1866         if (__dir_lease_try_check(dentry))
1867             return 0;
1868     }
1869     return 1;
1870 }
1871 
1872 /*
1873  * Release our ceph_dentry_info.
1874  */
1875 static void ceph_d_release(struct dentry *dentry)
1876 {
1877     struct ceph_dentry_info *di = ceph_dentry(dentry);
1878     struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
1879 
1880     dout("d_release %p\n", dentry);
1881 
1882     atomic64_dec(&fsc->mdsc->metric.total_dentries);
1883 
1884     spin_lock(&dentry->d_lock);
1885     __dentry_lease_unlist(di);
1886     dentry->d_fsdata = NULL;
1887     spin_unlock(&dentry->d_lock);
1888 
1889     ceph_put_mds_session(di->lease_session);
1890     kmem_cache_free(ceph_dentry_cachep, di);
1891 }
1892 
1893 /*
1894  * When the VFS prunes a dentry from the cache, we need to clear the
1895  * complete flag on the parent directory.
1896  *
1897  * Called under dentry->d_lock.
1898  */
1899 static void ceph_d_prune(struct dentry *dentry)
1900 {
1901     struct ceph_inode_info *dir_ci;
1902     struct ceph_dentry_info *di;
1903 
1904     dout("ceph_d_prune %pd %p\n", dentry, dentry);
1905 
1906     /* do we have a valid parent? */
1907     if (IS_ROOT(dentry))
1908         return;
1909 
1910     /* we hold d_lock, so d_parent is stable */
1911     dir_ci = ceph_inode(d_inode(dentry->d_parent));
1912     if (dir_ci->i_vino.snap == CEPH_SNAPDIR)
1913         return;
1914 
1915     /* who calls d_delete() should also disable dcache readdir */
1916     if (d_really_is_negative(dentry))
1917         return;
1918 
1919     /* d_fsdata does not get cleared until d_release */
1920     if (!d_unhashed(dentry)) {
1921         __ceph_dir_clear_complete(dir_ci);
1922         return;
1923     }
1924 
1925     /* Disable dcache readdir just in case that someone called d_drop()
1926      * or d_invalidate(), but MDS didn't revoke CEPH_CAP_FILE_SHARED
1927      * properly (dcache readdir is still enabled) */
1928     di = ceph_dentry(dentry);
1929     if (di->offset > 0 &&
1930         di->lease_shared_gen == atomic_read(&dir_ci->i_shared_gen))
1931         __ceph_dir_clear_ordered(dir_ci);
1932 }
1933 
1934 /*
1935  * read() on a dir.  This weird interface hack only works if mounted
1936  * with '-o dirstat'.
1937  */
1938 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1939                  loff_t *ppos)
1940 {
1941     struct ceph_dir_file_info *dfi = file->private_data;
1942     struct inode *inode = file_inode(file);
1943     struct ceph_inode_info *ci = ceph_inode(inode);
1944     int left;
1945     const int bufsize = 1024;
1946 
1947     if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1948         return -EISDIR;
1949 
1950     if (!dfi->dir_info) {
1951         dfi->dir_info = kmalloc(bufsize, GFP_KERNEL);
1952         if (!dfi->dir_info)
1953             return -ENOMEM;
1954         dfi->dir_info_len =
1955             snprintf(dfi->dir_info, bufsize,
1956                 "entries:   %20lld\n"
1957                 " files:    %20lld\n"
1958                 " subdirs:  %20lld\n"
1959                 "rentries:  %20lld\n"
1960                 " rfiles:   %20lld\n"
1961                 " rsubdirs: %20lld\n"
1962                 "rbytes:    %20lld\n"
1963                 "rctime:    %10lld.%09ld\n",
1964                 ci->i_files + ci->i_subdirs,
1965                 ci->i_files,
1966                 ci->i_subdirs,
1967                 ci->i_rfiles + ci->i_rsubdirs,
1968                 ci->i_rfiles,
1969                 ci->i_rsubdirs,
1970                 ci->i_rbytes,
1971                 ci->i_rctime.tv_sec,
1972                 ci->i_rctime.tv_nsec);
1973     }
1974 
1975     if (*ppos >= dfi->dir_info_len)
1976         return 0;
1977     size = min_t(unsigned, size, dfi->dir_info_len-*ppos);
1978     left = copy_to_user(buf, dfi->dir_info + *ppos, size);
1979     if (left == size)
1980         return -EFAULT;
1981     *ppos += (size - left);
1982     return size - left;
1983 }
1984 
1985 
1986 
1987 /*
1988  * Return name hash for a given dentry.  This is dependent on
1989  * the parent directory's hash function.
1990  */
1991 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
1992 {
1993     struct ceph_inode_info *dci = ceph_inode(dir);
1994     unsigned hash;
1995 
1996     switch (dci->i_dir_layout.dl_dir_hash) {
1997     case 0: /* for backward compat */
1998     case CEPH_STR_HASH_LINUX:
1999         return dn->d_name.hash;
2000 
2001     default:
2002         spin_lock(&dn->d_lock);
2003         hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
2004                      dn->d_name.name, dn->d_name.len);
2005         spin_unlock(&dn->d_lock);
2006         return hash;
2007     }
2008 }
2009 
2010 const struct file_operations ceph_dir_fops = {
2011     .read = ceph_read_dir,
2012     .iterate = ceph_readdir,
2013     .llseek = ceph_dir_llseek,
2014     .open = ceph_open,
2015     .release = ceph_release,
2016     .unlocked_ioctl = ceph_ioctl,
2017     .compat_ioctl = compat_ptr_ioctl,
2018     .fsync = ceph_fsync,
2019     .lock = ceph_lock,
2020     .flock = ceph_flock,
2021 };
2022 
2023 const struct file_operations ceph_snapdir_fops = {
2024     .iterate = ceph_readdir,
2025     .llseek = ceph_dir_llseek,
2026     .open = ceph_open,
2027     .release = ceph_release,
2028 };
2029 
2030 const struct inode_operations ceph_dir_iops = {
2031     .lookup = ceph_lookup,
2032     .permission = ceph_permission,
2033     .getattr = ceph_getattr,
2034     .setattr = ceph_setattr,
2035     .listxattr = ceph_listxattr,
2036     .get_acl = ceph_get_acl,
2037     .set_acl = ceph_set_acl,
2038     .mknod = ceph_mknod,
2039     .symlink = ceph_symlink,
2040     .mkdir = ceph_mkdir,
2041     .link = ceph_link,
2042     .unlink = ceph_unlink,
2043     .rmdir = ceph_unlink,
2044     .rename = ceph_rename,
2045     .create = ceph_create,
2046     .atomic_open = ceph_atomic_open,
2047 };
2048 
2049 const struct inode_operations ceph_snapdir_iops = {
2050     .lookup = ceph_lookup,
2051     .permission = ceph_permission,
2052     .getattr = ceph_getattr,
2053     .mkdir = ceph_mkdir,
2054     .rmdir = ceph_unlink,
2055     .rename = ceph_rename,
2056 };
2057 
2058 const struct dentry_operations ceph_dentry_ops = {
2059     .d_revalidate = ceph_d_revalidate,
2060     .d_delete = ceph_d_delete,
2061     .d_release = ceph_d_release,
2062     .d_prune = ceph_d_prune,
2063     .d_init = ceph_d_init,
2064 };