Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (c) 2014 Christoph Hellwig.
0004  */
0005 #include <linux/blkdev.h>
0006 #include <linux/kmod.h>
0007 #include <linux/file.h>
0008 #include <linux/jhash.h>
0009 #include <linux/sched.h>
0010 #include <linux/sunrpc/addr.h>
0011 
0012 #include "pnfs.h"
0013 #include "netns.h"
0014 #include "trace.h"
0015 
0016 #define NFSDDBG_FACILITY                NFSDDBG_PNFS
0017 
0018 struct nfs4_layout {
0019     struct list_head        lo_perstate;
0020     struct nfs4_layout_stateid  *lo_state;
0021     struct nfsd4_layout_seg     lo_seg;
0022 };
0023 
0024 static struct kmem_cache *nfs4_layout_cache;
0025 static struct kmem_cache *nfs4_layout_stateid_cache;
0026 
0027 static const struct nfsd4_callback_ops nfsd4_cb_layout_ops;
0028 static const struct lock_manager_operations nfsd4_layouts_lm_ops;
0029 
0030 const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] =  {
0031 #ifdef CONFIG_NFSD_FLEXFILELAYOUT
0032     [LAYOUT_FLEX_FILES] = &ff_layout_ops,
0033 #endif
0034 #ifdef CONFIG_NFSD_BLOCKLAYOUT
0035     [LAYOUT_BLOCK_VOLUME]   = &bl_layout_ops,
0036 #endif
0037 #ifdef CONFIG_NFSD_SCSILAYOUT
0038     [LAYOUT_SCSI]       = &scsi_layout_ops,
0039 #endif
0040 };
0041 
0042 /* pNFS device ID to export fsid mapping */
0043 #define DEVID_HASH_BITS 8
0044 #define DEVID_HASH_SIZE (1 << DEVID_HASH_BITS)
0045 #define DEVID_HASH_MASK (DEVID_HASH_SIZE - 1)
0046 static u64 nfsd_devid_seq = 1;
0047 static struct list_head nfsd_devid_hash[DEVID_HASH_SIZE];
0048 static DEFINE_SPINLOCK(nfsd_devid_lock);
0049 
0050 static inline u32 devid_hashfn(u64 idx)
0051 {
0052     return jhash_2words(idx, idx >> 32, 0) & DEVID_HASH_MASK;
0053 }
0054 
0055 static void
0056 nfsd4_alloc_devid_map(const struct svc_fh *fhp)
0057 {
0058     const struct knfsd_fh *fh = &fhp->fh_handle;
0059     size_t fsid_len = key_len(fh->fh_fsid_type);
0060     struct nfsd4_deviceid_map *map, *old;
0061     int i;
0062 
0063     map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL);
0064     if (!map)
0065         return;
0066 
0067     map->fsid_type = fh->fh_fsid_type;
0068     memcpy(&map->fsid, fh->fh_fsid, fsid_len);
0069 
0070     spin_lock(&nfsd_devid_lock);
0071     if (fhp->fh_export->ex_devid_map)
0072         goto out_unlock;
0073 
0074     for (i = 0; i < DEVID_HASH_SIZE; i++) {
0075         list_for_each_entry(old, &nfsd_devid_hash[i], hash) {
0076             if (old->fsid_type != fh->fh_fsid_type)
0077                 continue;
0078             if (memcmp(old->fsid, fh->fh_fsid,
0079                     key_len(old->fsid_type)))
0080                 continue;
0081 
0082             fhp->fh_export->ex_devid_map = old;
0083             goto out_unlock;
0084         }
0085     }
0086 
0087     map->idx = nfsd_devid_seq++;
0088     list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]);
0089     fhp->fh_export->ex_devid_map = map;
0090     map = NULL;
0091 
0092 out_unlock:
0093     spin_unlock(&nfsd_devid_lock);
0094     kfree(map);
0095 }
0096 
0097 struct nfsd4_deviceid_map *
0098 nfsd4_find_devid_map(int idx)
0099 {
0100     struct nfsd4_deviceid_map *map, *ret = NULL;
0101 
0102     rcu_read_lock();
0103     list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash)
0104         if (map->idx == idx)
0105             ret = map;
0106     rcu_read_unlock();
0107 
0108     return ret;
0109 }
0110 
0111 int
0112 nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
0113         u32 device_generation)
0114 {
0115     if (!fhp->fh_export->ex_devid_map) {
0116         nfsd4_alloc_devid_map(fhp);
0117         if (!fhp->fh_export->ex_devid_map)
0118             return -ENOMEM;
0119     }
0120 
0121     id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
0122     id->generation = device_generation;
0123     id->pad = 0;
0124     return 0;
0125 }
0126 
0127 void nfsd4_setup_layout_type(struct svc_export *exp)
0128 {
0129 #if defined(CONFIG_NFSD_BLOCKLAYOUT) || defined(CONFIG_NFSD_SCSILAYOUT)
0130     struct super_block *sb = exp->ex_path.mnt->mnt_sb;
0131 #endif
0132 
0133     if (!(exp->ex_flags & NFSEXP_PNFS))
0134         return;
0135 
0136 #ifdef CONFIG_NFSD_FLEXFILELAYOUT
0137     exp->ex_layout_types |= 1 << LAYOUT_FLEX_FILES;
0138 #endif
0139 #ifdef CONFIG_NFSD_BLOCKLAYOUT
0140     if (sb->s_export_op->get_uuid &&
0141         sb->s_export_op->map_blocks &&
0142         sb->s_export_op->commit_blocks)
0143         exp->ex_layout_types |= 1 << LAYOUT_BLOCK_VOLUME;
0144 #endif
0145 #ifdef CONFIG_NFSD_SCSILAYOUT
0146     if (sb->s_export_op->map_blocks &&
0147         sb->s_export_op->commit_blocks &&
0148         sb->s_bdev &&
0149         sb->s_bdev->bd_disk->fops->pr_ops &&
0150         sb->s_bdev->bd_disk->fops->get_unique_id)
0151         exp->ex_layout_types |= 1 << LAYOUT_SCSI;
0152 #endif
0153 }
0154 
0155 static void
0156 nfsd4_free_layout_stateid(struct nfs4_stid *stid)
0157 {
0158     struct nfs4_layout_stateid *ls = layoutstateid(stid);
0159     struct nfs4_client *clp = ls->ls_stid.sc_client;
0160     struct nfs4_file *fp = ls->ls_stid.sc_file;
0161 
0162     trace_nfsd_layoutstate_free(&ls->ls_stid.sc_stateid);
0163 
0164     spin_lock(&clp->cl_lock);
0165     list_del_init(&ls->ls_perclnt);
0166     spin_unlock(&clp->cl_lock);
0167 
0168     spin_lock(&fp->fi_lock);
0169     list_del_init(&ls->ls_perfile);
0170     spin_unlock(&fp->fi_lock);
0171 
0172     if (!nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
0173         vfs_setlease(ls->ls_file->nf_file, F_UNLCK, NULL, (void **)&ls);
0174     nfsd_file_put(ls->ls_file);
0175 
0176     if (ls->ls_recalled)
0177         atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls);
0178 
0179     kmem_cache_free(nfs4_layout_stateid_cache, ls);
0180 }
0181 
0182 static int
0183 nfsd4_layout_setlease(struct nfs4_layout_stateid *ls)
0184 {
0185     struct file_lock *fl;
0186     int status;
0187 
0188     if (nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
0189         return 0;
0190 
0191     fl = locks_alloc_lock();
0192     if (!fl)
0193         return -ENOMEM;
0194     locks_init_lock(fl);
0195     fl->fl_lmops = &nfsd4_layouts_lm_ops;
0196     fl->fl_flags = FL_LAYOUT;
0197     fl->fl_type = F_RDLCK;
0198     fl->fl_end = OFFSET_MAX;
0199     fl->fl_owner = ls;
0200     fl->fl_pid = current->tgid;
0201     fl->fl_file = ls->ls_file->nf_file;
0202 
0203     status = vfs_setlease(fl->fl_file, fl->fl_type, &fl, NULL);
0204     if (status) {
0205         locks_free_lock(fl);
0206         return status;
0207     }
0208     BUG_ON(fl != NULL);
0209     return 0;
0210 }
0211 
0212 static struct nfs4_layout_stateid *
0213 nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
0214         struct nfs4_stid *parent, u32 layout_type)
0215 {
0216     struct nfs4_client *clp = cstate->clp;
0217     struct nfs4_file *fp = parent->sc_file;
0218     struct nfs4_layout_stateid *ls;
0219     struct nfs4_stid *stp;
0220 
0221     stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
0222                     nfsd4_free_layout_stateid);
0223     if (!stp)
0224         return NULL;
0225 
0226     get_nfs4_file(fp);
0227     stp->sc_file = fp;
0228 
0229     ls = layoutstateid(stp);
0230     INIT_LIST_HEAD(&ls->ls_perclnt);
0231     INIT_LIST_HEAD(&ls->ls_perfile);
0232     spin_lock_init(&ls->ls_lock);
0233     INIT_LIST_HEAD(&ls->ls_layouts);
0234     mutex_init(&ls->ls_mutex);
0235     ls->ls_layout_type = layout_type;
0236     nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops,
0237             NFSPROC4_CLNT_CB_LAYOUT);
0238 
0239     if (parent->sc_type == NFS4_DELEG_STID)
0240         ls->ls_file = nfsd_file_get(fp->fi_deleg_file);
0241     else
0242         ls->ls_file = find_any_file(fp);
0243     BUG_ON(!ls->ls_file);
0244 
0245     if (nfsd4_layout_setlease(ls)) {
0246         nfsd_file_put(ls->ls_file);
0247         put_nfs4_file(fp);
0248         kmem_cache_free(nfs4_layout_stateid_cache, ls);
0249         return NULL;
0250     }
0251 
0252     spin_lock(&clp->cl_lock);
0253     stp->sc_type = NFS4_LAYOUT_STID;
0254     list_add(&ls->ls_perclnt, &clp->cl_lo_states);
0255     spin_unlock(&clp->cl_lock);
0256 
0257     spin_lock(&fp->fi_lock);
0258     list_add(&ls->ls_perfile, &fp->fi_lo_states);
0259     spin_unlock(&fp->fi_lock);
0260 
0261     trace_nfsd_layoutstate_alloc(&ls->ls_stid.sc_stateid);
0262     return ls;
0263 }
0264 
0265 __be32
0266 nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
0267         struct nfsd4_compound_state *cstate, stateid_t *stateid,
0268         bool create, u32 layout_type, struct nfs4_layout_stateid **lsp)
0269 {
0270     struct nfs4_layout_stateid *ls;
0271     struct nfs4_stid *stid;
0272     unsigned char typemask = NFS4_LAYOUT_STID;
0273     __be32 status;
0274 
0275     if (create)
0276         typemask |= (NFS4_OPEN_STID | NFS4_LOCK_STID | NFS4_DELEG_STID);
0277 
0278     status = nfsd4_lookup_stateid(cstate, stateid, typemask, &stid,
0279             net_generic(SVC_NET(rqstp), nfsd_net_id));
0280     if (status)
0281         goto out;
0282 
0283     if (!fh_match(&cstate->current_fh.fh_handle,
0284               &stid->sc_file->fi_fhandle)) {
0285         status = nfserr_bad_stateid;
0286         goto out_put_stid;
0287     }
0288 
0289     if (stid->sc_type != NFS4_LAYOUT_STID) {
0290         ls = nfsd4_alloc_layout_stateid(cstate, stid, layout_type);
0291         nfs4_put_stid(stid);
0292 
0293         status = nfserr_jukebox;
0294         if (!ls)
0295             goto out;
0296         mutex_lock(&ls->ls_mutex);
0297     } else {
0298         ls = container_of(stid, struct nfs4_layout_stateid, ls_stid);
0299 
0300         status = nfserr_bad_stateid;
0301         mutex_lock(&ls->ls_mutex);
0302         if (nfsd4_stateid_generation_after(stateid, &stid->sc_stateid))
0303             goto out_unlock_stid;
0304         if (layout_type != ls->ls_layout_type)
0305             goto out_unlock_stid;
0306     }
0307 
0308     *lsp = ls;
0309     return 0;
0310 
0311 out_unlock_stid:
0312     mutex_unlock(&ls->ls_mutex);
0313 out_put_stid:
0314     nfs4_put_stid(stid);
0315 out:
0316     return status;
0317 }
0318 
0319 static void
0320 nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
0321 {
0322     spin_lock(&ls->ls_lock);
0323     if (ls->ls_recalled)
0324         goto out_unlock;
0325 
0326     ls->ls_recalled = true;
0327     atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
0328     if (list_empty(&ls->ls_layouts))
0329         goto out_unlock;
0330 
0331     trace_nfsd_layout_recall(&ls->ls_stid.sc_stateid);
0332 
0333     refcount_inc(&ls->ls_stid.sc_count);
0334     nfsd4_run_cb(&ls->ls_recall);
0335 
0336 out_unlock:
0337     spin_unlock(&ls->ls_lock);
0338 }
0339 
0340 static inline u64
0341 layout_end(struct nfsd4_layout_seg *seg)
0342 {
0343     u64 end = seg->offset + seg->length;
0344     return end >= seg->offset ? end : NFS4_MAX_UINT64;
0345 }
0346 
0347 static void
0348 layout_update_len(struct nfsd4_layout_seg *lo, u64 end)
0349 {
0350     if (end == NFS4_MAX_UINT64)
0351         lo->length = NFS4_MAX_UINT64;
0352     else
0353         lo->length = end - lo->offset;
0354 }
0355 
0356 static bool
0357 layouts_overlapping(struct nfs4_layout *lo, struct nfsd4_layout_seg *s)
0358 {
0359     if (s->iomode != IOMODE_ANY && s->iomode != lo->lo_seg.iomode)
0360         return false;
0361     if (layout_end(&lo->lo_seg) <= s->offset)
0362         return false;
0363     if (layout_end(s) <= lo->lo_seg.offset)
0364         return false;
0365     return true;
0366 }
0367 
0368 static bool
0369 layouts_try_merge(struct nfsd4_layout_seg *lo, struct nfsd4_layout_seg *new)
0370 {
0371     if (lo->iomode != new->iomode)
0372         return false;
0373     if (layout_end(new) < lo->offset)
0374         return false;
0375     if (layout_end(lo) < new->offset)
0376         return false;
0377 
0378     lo->offset = min(lo->offset, new->offset);
0379     layout_update_len(lo, max(layout_end(lo), layout_end(new)));
0380     return true;
0381 }
0382 
0383 static __be32
0384 nfsd4_recall_conflict(struct nfs4_layout_stateid *ls)
0385 {
0386     struct nfs4_file *fp = ls->ls_stid.sc_file;
0387     struct nfs4_layout_stateid *l, *n;
0388     __be32 nfserr = nfs_ok;
0389 
0390     assert_spin_locked(&fp->fi_lock);
0391 
0392     list_for_each_entry_safe(l, n, &fp->fi_lo_states, ls_perfile) {
0393         if (l != ls) {
0394             nfsd4_recall_file_layout(l);
0395             nfserr = nfserr_recallconflict;
0396         }
0397     }
0398 
0399     return nfserr;
0400 }
0401 
0402 __be32
0403 nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls)
0404 {
0405     struct nfsd4_layout_seg *seg = &lgp->lg_seg;
0406     struct nfs4_file *fp = ls->ls_stid.sc_file;
0407     struct nfs4_layout *lp, *new = NULL;
0408     __be32 nfserr;
0409 
0410     spin_lock(&fp->fi_lock);
0411     nfserr = nfsd4_recall_conflict(ls);
0412     if (nfserr)
0413         goto out;
0414     spin_lock(&ls->ls_lock);
0415     list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
0416         if (layouts_try_merge(&lp->lo_seg, seg))
0417             goto done;
0418     }
0419     spin_unlock(&ls->ls_lock);
0420     spin_unlock(&fp->fi_lock);
0421 
0422     new = kmem_cache_alloc(nfs4_layout_cache, GFP_KERNEL);
0423     if (!new)
0424         return nfserr_jukebox;
0425     memcpy(&new->lo_seg, seg, sizeof(new->lo_seg));
0426     new->lo_state = ls;
0427 
0428     spin_lock(&fp->fi_lock);
0429     nfserr = nfsd4_recall_conflict(ls);
0430     if (nfserr)
0431         goto out;
0432     spin_lock(&ls->ls_lock);
0433     list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
0434         if (layouts_try_merge(&lp->lo_seg, seg))
0435             goto done;
0436     }
0437 
0438     refcount_inc(&ls->ls_stid.sc_count);
0439     list_add_tail(&new->lo_perstate, &ls->ls_layouts);
0440     new = NULL;
0441 done:
0442     nfs4_inc_and_copy_stateid(&lgp->lg_sid, &ls->ls_stid);
0443     spin_unlock(&ls->ls_lock);
0444 out:
0445     spin_unlock(&fp->fi_lock);
0446     if (new)
0447         kmem_cache_free(nfs4_layout_cache, new);
0448     return nfserr;
0449 }
0450 
0451 static void
0452 nfsd4_free_layouts(struct list_head *reaplist)
0453 {
0454     while (!list_empty(reaplist)) {
0455         struct nfs4_layout *lp = list_first_entry(reaplist,
0456                 struct nfs4_layout, lo_perstate);
0457 
0458         list_del(&lp->lo_perstate);
0459         nfs4_put_stid(&lp->lo_state->ls_stid);
0460         kmem_cache_free(nfs4_layout_cache, lp);
0461     }
0462 }
0463 
0464 static void
0465 nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
0466         struct list_head *reaplist)
0467 {
0468     struct nfsd4_layout_seg *lo = &lp->lo_seg;
0469     u64 end = layout_end(lo);
0470 
0471     if (seg->offset <= lo->offset) {
0472         if (layout_end(seg) >= end) {
0473             list_move_tail(&lp->lo_perstate, reaplist);
0474             return;
0475         }
0476         lo->offset = layout_end(seg);
0477     } else {
0478         /* retain the whole layout segment on a split. */
0479         if (layout_end(seg) < end) {
0480             dprintk("%s: split not supported\n", __func__);
0481             return;
0482         }
0483         end = seg->offset;
0484     }
0485 
0486     layout_update_len(lo, end);
0487 }
0488 
0489 __be32
0490 nfsd4_return_file_layouts(struct svc_rqst *rqstp,
0491         struct nfsd4_compound_state *cstate,
0492         struct nfsd4_layoutreturn *lrp)
0493 {
0494     struct nfs4_layout_stateid *ls;
0495     struct nfs4_layout *lp, *n;
0496     LIST_HEAD(reaplist);
0497     __be32 nfserr;
0498     int found = 0;
0499 
0500     nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lrp->lr_sid,
0501                         false, lrp->lr_layout_type,
0502                         &ls);
0503     if (nfserr) {
0504         trace_nfsd_layout_return_lookup_fail(&lrp->lr_sid);
0505         return nfserr;
0506     }
0507 
0508     spin_lock(&ls->ls_lock);
0509     list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) {
0510         if (layouts_overlapping(lp, &lrp->lr_seg)) {
0511             nfsd4_return_file_layout(lp, &lrp->lr_seg, &reaplist);
0512             found++;
0513         }
0514     }
0515     if (!list_empty(&ls->ls_layouts)) {
0516         if (found)
0517             nfs4_inc_and_copy_stateid(&lrp->lr_sid, &ls->ls_stid);
0518         lrp->lrs_present = 1;
0519     } else {
0520         trace_nfsd_layoutstate_unhash(&ls->ls_stid.sc_stateid);
0521         nfs4_unhash_stid(&ls->ls_stid);
0522         lrp->lrs_present = 0;
0523     }
0524     spin_unlock(&ls->ls_lock);
0525 
0526     mutex_unlock(&ls->ls_mutex);
0527     nfs4_put_stid(&ls->ls_stid);
0528     nfsd4_free_layouts(&reaplist);
0529     return nfs_ok;
0530 }
0531 
0532 __be32
0533 nfsd4_return_client_layouts(struct svc_rqst *rqstp,
0534         struct nfsd4_compound_state *cstate,
0535         struct nfsd4_layoutreturn *lrp)
0536 {
0537     struct nfs4_layout_stateid *ls, *n;
0538     struct nfs4_client *clp = cstate->clp;
0539     struct nfs4_layout *lp, *t;
0540     LIST_HEAD(reaplist);
0541 
0542     lrp->lrs_present = 0;
0543 
0544     spin_lock(&clp->cl_lock);
0545     list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
0546         if (ls->ls_layout_type != lrp->lr_layout_type)
0547             continue;
0548 
0549         if (lrp->lr_return_type == RETURN_FSID &&
0550             !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
0551                    &cstate->current_fh.fh_handle))
0552             continue;
0553 
0554         spin_lock(&ls->ls_lock);
0555         list_for_each_entry_safe(lp, t, &ls->ls_layouts, lo_perstate) {
0556             if (lrp->lr_seg.iomode == IOMODE_ANY ||
0557                 lrp->lr_seg.iomode == lp->lo_seg.iomode)
0558                 list_move_tail(&lp->lo_perstate, &reaplist);
0559         }
0560         spin_unlock(&ls->ls_lock);
0561     }
0562     spin_unlock(&clp->cl_lock);
0563 
0564     nfsd4_free_layouts(&reaplist);
0565     return 0;
0566 }
0567 
0568 static void
0569 nfsd4_return_all_layouts(struct nfs4_layout_stateid *ls,
0570         struct list_head *reaplist)
0571 {
0572     spin_lock(&ls->ls_lock);
0573     list_splice_init(&ls->ls_layouts, reaplist);
0574     spin_unlock(&ls->ls_lock);
0575 }
0576 
0577 void
0578 nfsd4_return_all_client_layouts(struct nfs4_client *clp)
0579 {
0580     struct nfs4_layout_stateid *ls, *n;
0581     LIST_HEAD(reaplist);
0582 
0583     spin_lock(&clp->cl_lock);
0584     list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt)
0585         nfsd4_return_all_layouts(ls, &reaplist);
0586     spin_unlock(&clp->cl_lock);
0587 
0588     nfsd4_free_layouts(&reaplist);
0589 }
0590 
0591 void
0592 nfsd4_return_all_file_layouts(struct nfs4_client *clp, struct nfs4_file *fp)
0593 {
0594     struct nfs4_layout_stateid *ls, *n;
0595     LIST_HEAD(reaplist);
0596 
0597     spin_lock(&fp->fi_lock);
0598     list_for_each_entry_safe(ls, n, &fp->fi_lo_states, ls_perfile) {
0599         if (ls->ls_stid.sc_client == clp)
0600             nfsd4_return_all_layouts(ls, &reaplist);
0601     }
0602     spin_unlock(&fp->fi_lock);
0603 
0604     nfsd4_free_layouts(&reaplist);
0605 }
0606 
0607 static void
0608 nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
0609 {
0610     struct nfs4_client *clp = ls->ls_stid.sc_client;
0611     char addr_str[INET6_ADDRSTRLEN];
0612     static char const nfsd_recall_failed[] = "/sbin/nfsd-recall-failed";
0613     static char *envp[] = {
0614         "HOME=/",
0615         "TERM=linux",
0616         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
0617         NULL
0618     };
0619     char *argv[8];
0620     int error;
0621 
0622     rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
0623 
0624     printk(KERN_WARNING
0625         "nfsd: client %s failed to respond to layout recall. "
0626         "  Fencing..\n", addr_str);
0627 
0628     argv[0] = (char *)nfsd_recall_failed;
0629     argv[1] = addr_str;
0630     argv[2] = ls->ls_file->nf_file->f_path.mnt->mnt_sb->s_id;
0631     argv[3] = NULL;
0632 
0633     error = call_usermodehelper(nfsd_recall_failed, argv, envp,
0634                     UMH_WAIT_PROC);
0635     if (error) {
0636         printk(KERN_ERR "nfsd: fence failed for client %s: %d!\n",
0637             addr_str, error);
0638     }
0639 }
0640 
0641 static void
0642 nfsd4_cb_layout_prepare(struct nfsd4_callback *cb)
0643 {
0644     struct nfs4_layout_stateid *ls =
0645         container_of(cb, struct nfs4_layout_stateid, ls_recall);
0646 
0647     mutex_lock(&ls->ls_mutex);
0648     nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid);
0649     mutex_unlock(&ls->ls_mutex);
0650 }
0651 
0652 static int
0653 nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
0654 {
0655     struct nfs4_layout_stateid *ls =
0656         container_of(cb, struct nfs4_layout_stateid, ls_recall);
0657     struct nfsd_net *nn;
0658     ktime_t now, cutoff;
0659     const struct nfsd4_layout_ops *ops;
0660 
0661 
0662     switch (task->tk_status) {
0663     case 0:
0664     case -NFS4ERR_DELAY:
0665         /*
0666          * Anything left? If not, then call it done. Note that we don't
0667          * take the spinlock since this is an optimization and nothing
0668          * should get added until the cb counter goes to zero.
0669          */
0670         if (list_empty(&ls->ls_layouts))
0671             return 1;
0672 
0673         /* Poll the client until it's done with the layout */
0674         now = ktime_get();
0675         nn = net_generic(ls->ls_stid.sc_client->net, nfsd_net_id);
0676 
0677         /* Client gets 2 lease periods to return it */
0678         cutoff = ktime_add_ns(task->tk_start,
0679                      (u64)nn->nfsd4_lease * NSEC_PER_SEC * 2);
0680 
0681         if (ktime_before(now, cutoff)) {
0682             rpc_delay(task, HZ/100); /* 10 mili-seconds */
0683             return 0;
0684         }
0685         fallthrough;
0686     default:
0687         /*
0688          * Unknown error or non-responding client, we'll need to fence.
0689          */
0690         trace_nfsd_layout_recall_fail(&ls->ls_stid.sc_stateid);
0691 
0692         ops = nfsd4_layout_ops[ls->ls_layout_type];
0693         if (ops->fence_client)
0694             ops->fence_client(ls);
0695         else
0696             nfsd4_cb_layout_fail(ls);
0697         return 1;
0698     case -NFS4ERR_NOMATCHING_LAYOUT:
0699         trace_nfsd_layout_recall_done(&ls->ls_stid.sc_stateid);
0700         task->tk_status = 0;
0701         return 1;
0702     }
0703 }
0704 
0705 static void
0706 nfsd4_cb_layout_release(struct nfsd4_callback *cb)
0707 {
0708     struct nfs4_layout_stateid *ls =
0709         container_of(cb, struct nfs4_layout_stateid, ls_recall);
0710     LIST_HEAD(reaplist);
0711 
0712     trace_nfsd_layout_recall_release(&ls->ls_stid.sc_stateid);
0713 
0714     nfsd4_return_all_layouts(ls, &reaplist);
0715     nfsd4_free_layouts(&reaplist);
0716     nfs4_put_stid(&ls->ls_stid);
0717 }
0718 
0719 static const struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
0720     .prepare    = nfsd4_cb_layout_prepare,
0721     .done       = nfsd4_cb_layout_done,
0722     .release    = nfsd4_cb_layout_release,
0723 };
0724 
0725 static bool
0726 nfsd4_layout_lm_break(struct file_lock *fl)
0727 {
0728     /*
0729      * We don't want the locks code to timeout the lease for us;
0730      * we'll remove it ourself if a layout isn't returned
0731      * in time:
0732      */
0733     fl->fl_break_time = 0;
0734     nfsd4_recall_file_layout(fl->fl_owner);
0735     return false;
0736 }
0737 
0738 static int
0739 nfsd4_layout_lm_change(struct file_lock *onlist, int arg,
0740         struct list_head *dispose)
0741 {
0742     BUG_ON(!(arg & F_UNLCK));
0743     return lease_modify(onlist, arg, dispose);
0744 }
0745 
0746 static const struct lock_manager_operations nfsd4_layouts_lm_ops = {
0747     .lm_break   = nfsd4_layout_lm_break,
0748     .lm_change  = nfsd4_layout_lm_change,
0749 };
0750 
0751 int
0752 nfsd4_init_pnfs(void)
0753 {
0754     int i;
0755 
0756     for (i = 0; i < DEVID_HASH_SIZE; i++)
0757         INIT_LIST_HEAD(&nfsd_devid_hash[i]);
0758 
0759     nfs4_layout_cache = kmem_cache_create("nfs4_layout",
0760             sizeof(struct nfs4_layout), 0, 0, NULL);
0761     if (!nfs4_layout_cache)
0762         return -ENOMEM;
0763 
0764     nfs4_layout_stateid_cache = kmem_cache_create("nfs4_layout_stateid",
0765             sizeof(struct nfs4_layout_stateid), 0, 0, NULL);
0766     if (!nfs4_layout_stateid_cache) {
0767         kmem_cache_destroy(nfs4_layout_cache);
0768         return -ENOMEM;
0769     }
0770     return 0;
0771 }
0772 
0773 void
0774 nfsd4_exit_pnfs(void)
0775 {
0776     int i;
0777 
0778     kmem_cache_destroy(nfs4_layout_cache);
0779     kmem_cache_destroy(nfs4_layout_stateid_cache);
0780 
0781     for (i = 0; i < DEVID_HASH_SIZE; i++) {
0782         struct nfsd4_deviceid_map *map, *n;
0783 
0784         list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash)
0785             kfree(map);
0786     }
0787 }