Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * linux/fs/lockd/clntproc.c
0004  *
0005  * RPC procedures for the client side NLM implementation
0006  *
0007  * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
0008  */
0009 
0010 #include <linux/module.h>
0011 #include <linux/slab.h>
0012 #include <linux/types.h>
0013 #include <linux/errno.h>
0014 #include <linux/fs.h>
0015 #include <linux/nfs_fs.h>
0016 #include <linux/utsname.h>
0017 #include <linux/freezer.h>
0018 #include <linux/sunrpc/clnt.h>
0019 #include <linux/sunrpc/svc.h>
0020 #include <linux/lockd/lockd.h>
0021 
0022 #define NLMDBG_FACILITY     NLMDBG_CLIENT
0023 #define NLMCLNT_GRACE_WAIT  (5*HZ)
0024 #define NLMCLNT_POLL_TIMEOUT    (30*HZ)
0025 #define NLMCLNT_MAX_RETRIES 3
0026 
0027 static int  nlmclnt_test(struct nlm_rqst *, struct file_lock *);
0028 static int  nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
0029 static int  nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
0030 static int  nlm_stat_to_errno(__be32 stat);
0031 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
0032 static int  nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
0033 
0034 static const struct rpc_call_ops nlmclnt_unlock_ops;
0035 static const struct rpc_call_ops nlmclnt_cancel_ops;
0036 
0037 /*
0038  * Cookie counter for NLM requests
0039  */
0040 static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
0041 
0042 void nlmclnt_next_cookie(struct nlm_cookie *c)
0043 {
0044     u32 cookie = atomic_inc_return(&nlm_cookie);
0045 
0046     memcpy(c->data, &cookie, 4);
0047     c->len=4;
0048 }
0049 
0050 static struct nlm_lockowner *
0051 nlmclnt_get_lockowner(struct nlm_lockowner *lockowner)
0052 {
0053     refcount_inc(&lockowner->count);
0054     return lockowner;
0055 }
0056 
0057 static void nlmclnt_put_lockowner(struct nlm_lockowner *lockowner)
0058 {
0059     if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
0060         return;
0061     list_del(&lockowner->list);
0062     spin_unlock(&lockowner->host->h_lock);
0063     nlmclnt_release_host(lockowner->host);
0064     kfree(lockowner);
0065 }
0066 
0067 static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
0068 {
0069     struct nlm_lockowner *lockowner;
0070     list_for_each_entry(lockowner, &host->h_lockowners, list) {
0071         if (lockowner->pid == pid)
0072             return -EBUSY;
0073     }
0074     return 0;
0075 }
0076 
0077 static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
0078 {
0079     uint32_t res;
0080     do {
0081         res = host->h_pidcount++;
0082     } while (nlm_pidbusy(host, res) < 0);
0083     return res;
0084 }
0085 
0086 static struct nlm_lockowner *__nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
0087 {
0088     struct nlm_lockowner *lockowner;
0089     list_for_each_entry(lockowner, &host->h_lockowners, list) {
0090         if (lockowner->owner != owner)
0091             continue;
0092         return nlmclnt_get_lockowner(lockowner);
0093     }
0094     return NULL;
0095 }
0096 
0097 static struct nlm_lockowner *nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
0098 {
0099     struct nlm_lockowner *res, *new = NULL;
0100 
0101     spin_lock(&host->h_lock);
0102     res = __nlmclnt_find_lockowner(host, owner);
0103     if (res == NULL) {
0104         spin_unlock(&host->h_lock);
0105         new = kmalloc(sizeof(*new), GFP_KERNEL);
0106         spin_lock(&host->h_lock);
0107         res = __nlmclnt_find_lockowner(host, owner);
0108         if (res == NULL && new != NULL) {
0109             res = new;
0110             refcount_set(&new->count, 1);
0111             new->owner = owner;
0112             new->pid = __nlm_alloc_pid(host);
0113             new->host = nlm_get_host(host);
0114             list_add(&new->list, &host->h_lockowners);
0115             new = NULL;
0116         }
0117     }
0118     spin_unlock(&host->h_lock);
0119     kfree(new);
0120     return res;
0121 }
0122 
0123 /*
0124  * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
0125  */
0126 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
0127 {
0128     struct nlm_args *argp = &req->a_args;
0129     struct nlm_lock *lock = &argp->lock;
0130     char *nodename = req->a_host->h_rpcclnt->cl_nodename;
0131 
0132     nlmclnt_next_cookie(&argp->cookie);
0133     memcpy(&lock->fh, NFS_FH(locks_inode(fl->fl_file)), sizeof(struct nfs_fh));
0134     lock->caller  = nodename;
0135     lock->oh.data = req->a_owner;
0136     lock->oh.len  = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
0137                 (unsigned int)fl->fl_u.nfs_fl.owner->pid,
0138                 nodename);
0139     lock->svid = fl->fl_u.nfs_fl.owner->pid;
0140     lock->fl.fl_start = fl->fl_start;
0141     lock->fl.fl_end = fl->fl_end;
0142     lock->fl.fl_type = fl->fl_type;
0143 }
0144 
0145 static void nlmclnt_release_lockargs(struct nlm_rqst *req)
0146 {
0147     WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL);
0148 }
0149 
0150 /**
0151  * nlmclnt_proc - Perform a single client-side lock request
0152  * @host: address of a valid nlm_host context representing the NLM server
0153  * @cmd: fcntl-style file lock operation to perform
0154  * @fl: address of arguments for the lock operation
0155  * @data: address of data to be sent to callback operations
0156  *
0157  */
0158 int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data)
0159 {
0160     struct nlm_rqst     *call;
0161     int         status;
0162     const struct nlmclnt_operations *nlmclnt_ops = host->h_nlmclnt_ops;
0163 
0164     call = nlm_alloc_call(host);
0165     if (call == NULL)
0166         return -ENOMEM;
0167 
0168     if (nlmclnt_ops && nlmclnt_ops->nlmclnt_alloc_call)
0169         nlmclnt_ops->nlmclnt_alloc_call(data);
0170 
0171     nlmclnt_locks_init_private(fl, host);
0172     if (!fl->fl_u.nfs_fl.owner) {
0173         /* lockowner allocation has failed */
0174         nlmclnt_release_call(call);
0175         return -ENOMEM;
0176     }
0177     /* Set up the argument struct */
0178     nlmclnt_setlockargs(call, fl);
0179     call->a_callback_data = data;
0180 
0181     if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
0182         if (fl->fl_type != F_UNLCK) {
0183             call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
0184             status = nlmclnt_lock(call, fl);
0185         } else
0186             status = nlmclnt_unlock(call, fl);
0187     } else if (IS_GETLK(cmd))
0188         status = nlmclnt_test(call, fl);
0189     else
0190         status = -EINVAL;
0191     fl->fl_ops->fl_release_private(fl);
0192     fl->fl_ops = NULL;
0193 
0194     dprintk("lockd: clnt proc returns %d\n", status);
0195     return status;
0196 }
0197 EXPORT_SYMBOL_GPL(nlmclnt_proc);
0198 
0199 /*
0200  * Allocate an NLM RPC call struct
0201  */
0202 struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
0203 {
0204     struct nlm_rqst *call;
0205 
0206     for(;;) {
0207         call = kzalloc(sizeof(*call), GFP_KERNEL);
0208         if (call != NULL) {
0209             refcount_set(&call->a_count, 1);
0210             locks_init_lock(&call->a_args.lock.fl);
0211             locks_init_lock(&call->a_res.lock.fl);
0212             call->a_host = nlm_get_host(host);
0213             return call;
0214         }
0215         if (signalled())
0216             break;
0217         printk("nlm_alloc_call: failed, waiting for memory\n");
0218         schedule_timeout_interruptible(5*HZ);
0219     }
0220     return NULL;
0221 }
0222 
0223 void nlmclnt_release_call(struct nlm_rqst *call)
0224 {
0225     const struct nlmclnt_operations *nlmclnt_ops = call->a_host->h_nlmclnt_ops;
0226 
0227     if (!refcount_dec_and_test(&call->a_count))
0228         return;
0229     if (nlmclnt_ops && nlmclnt_ops->nlmclnt_release_call)
0230         nlmclnt_ops->nlmclnt_release_call(call->a_callback_data);
0231     nlmclnt_release_host(call->a_host);
0232     nlmclnt_release_lockargs(call);
0233     kfree(call);
0234 }
0235 
0236 static void nlmclnt_rpc_release(void *data)
0237 {
0238     nlmclnt_release_call(data);
0239 }
0240 
0241 static int nlm_wait_on_grace(wait_queue_head_t *queue)
0242 {
0243     DEFINE_WAIT(wait);
0244     int status = -EINTR;
0245 
0246     prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
0247     if (!signalled ()) {
0248         schedule_timeout(NLMCLNT_GRACE_WAIT);
0249         try_to_freeze();
0250         if (!signalled ())
0251             status = 0;
0252     }
0253     finish_wait(queue, &wait);
0254     return status;
0255 }
0256 
0257 /*
0258  * Generic NLM call
0259  */
0260 static int
0261 nlmclnt_call(const struct cred *cred, struct nlm_rqst *req, u32 proc)
0262 {
0263     struct nlm_host *host = req->a_host;
0264     struct rpc_clnt *clnt;
0265     struct nlm_args *argp = &req->a_args;
0266     struct nlm_res  *resp = &req->a_res;
0267     struct rpc_message msg = {
0268         .rpc_argp   = argp,
0269         .rpc_resp   = resp,
0270         .rpc_cred   = cred,
0271     };
0272     int     status;
0273 
0274     dprintk("lockd: call procedure %d on %s\n",
0275             (int)proc, host->h_name);
0276 
0277     do {
0278         if (host->h_reclaiming && !argp->reclaim)
0279             goto in_grace_period;
0280 
0281         /* If we have no RPC client yet, create one. */
0282         if ((clnt = nlm_bind_host(host)) == NULL)
0283             return -ENOLCK;
0284         msg.rpc_proc = &clnt->cl_procinfo[proc];
0285 
0286         /* Perform the RPC call. If an error occurs, try again */
0287         if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
0288             dprintk("lockd: rpc_call returned error %d\n", -status);
0289             switch (status) {
0290             case -EPROTONOSUPPORT:
0291                 status = -EINVAL;
0292                 break;
0293             case -ECONNREFUSED:
0294             case -ETIMEDOUT:
0295             case -ENOTCONN:
0296                 nlm_rebind_host(host);
0297                 status = -EAGAIN;
0298                 break;
0299             case -ERESTARTSYS:
0300                 return signalled () ? -EINTR : status;
0301             default:
0302                 break;
0303             }
0304             break;
0305         } else
0306         if (resp->status == nlm_lck_denied_grace_period) {
0307             dprintk("lockd: server in grace period\n");
0308             if (argp->reclaim) {
0309                 printk(KERN_WARNING
0310                      "lockd: spurious grace period reject?!\n");
0311                 return -ENOLCK;
0312             }
0313         } else {
0314             if (!argp->reclaim) {
0315                 /* We appear to be out of the grace period */
0316                 wake_up_all(&host->h_gracewait);
0317             }
0318             dprintk("lockd: server returns status %d\n",
0319                 ntohl(resp->status));
0320             return 0;   /* Okay, call complete */
0321         }
0322 
0323 in_grace_period:
0324         /*
0325          * The server has rebooted and appears to be in the grace
0326          * period during which locks are only allowed to be
0327          * reclaimed.
0328          * We can only back off and try again later.
0329          */
0330         status = nlm_wait_on_grace(&host->h_gracewait);
0331     } while (status == 0);
0332 
0333     return status;
0334 }
0335 
0336 /*
0337  * Generic NLM call, async version.
0338  */
0339 static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
0340 {
0341     struct nlm_host *host = req->a_host;
0342     struct rpc_clnt *clnt;
0343     struct rpc_task_setup task_setup_data = {
0344         .rpc_message = msg,
0345         .callback_ops = tk_ops,
0346         .callback_data = req,
0347         .flags = RPC_TASK_ASYNC,
0348     };
0349 
0350     dprintk("lockd: call procedure %d on %s (async)\n",
0351             (int)proc, host->h_name);
0352 
0353     /* If we have no RPC client yet, create one. */
0354     clnt = nlm_bind_host(host);
0355     if (clnt == NULL)
0356         goto out_err;
0357     msg->rpc_proc = &clnt->cl_procinfo[proc];
0358     task_setup_data.rpc_client = clnt;
0359 
0360         /* bootstrap and kick off the async RPC call */
0361     return rpc_run_task(&task_setup_data);
0362 out_err:
0363     tk_ops->rpc_release(req);
0364     return ERR_PTR(-ENOLCK);
0365 }
0366 
0367 static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
0368 {
0369     struct rpc_task *task;
0370 
0371     task = __nlm_async_call(req, proc, msg, tk_ops);
0372     if (IS_ERR(task))
0373         return PTR_ERR(task);
0374     rpc_put_task(task);
0375     return 0;
0376 }
0377 
0378 /*
0379  * NLM asynchronous call.
0380  */
0381 int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
0382 {
0383     struct rpc_message msg = {
0384         .rpc_argp   = &req->a_args,
0385         .rpc_resp   = &req->a_res,
0386     };
0387     return nlm_do_async_call(req, proc, &msg, tk_ops);
0388 }
0389 
0390 int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
0391 {
0392     struct rpc_message msg = {
0393         .rpc_argp   = &req->a_res,
0394     };
0395     return nlm_do_async_call(req, proc, &msg, tk_ops);
0396 }
0397 
0398 /*
0399  * NLM client asynchronous call.
0400  *
0401  * Note that although the calls are asynchronous, and are therefore
0402  *      guaranteed to complete, we still always attempt to wait for
0403  *      completion in order to be able to correctly track the lock
0404  *      state.
0405  */
0406 static int nlmclnt_async_call(const struct cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
0407 {
0408     struct rpc_message msg = {
0409         .rpc_argp   = &req->a_args,
0410         .rpc_resp   = &req->a_res,
0411         .rpc_cred   = cred,
0412     };
0413     struct rpc_task *task;
0414     int err;
0415 
0416     task = __nlm_async_call(req, proc, &msg, tk_ops);
0417     if (IS_ERR(task))
0418         return PTR_ERR(task);
0419     err = rpc_wait_for_completion_task(task);
0420     rpc_put_task(task);
0421     return err;
0422 }
0423 
0424 /*
0425  * TEST for the presence of a conflicting lock
0426  */
0427 static int
0428 nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
0429 {
0430     int status;
0431 
0432     status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST);
0433     if (status < 0)
0434         goto out;
0435 
0436     switch (req->a_res.status) {
0437         case nlm_granted:
0438             fl->fl_type = F_UNLCK;
0439             break;
0440         case nlm_lck_denied:
0441             /*
0442              * Report the conflicting lock back to the application.
0443              */
0444             fl->fl_start = req->a_res.lock.fl.fl_start;
0445             fl->fl_end = req->a_res.lock.fl.fl_end;
0446             fl->fl_type = req->a_res.lock.fl.fl_type;
0447             fl->fl_pid = -req->a_res.lock.fl.fl_pid;
0448             break;
0449         default:
0450             status = nlm_stat_to_errno(req->a_res.status);
0451     }
0452 out:
0453     nlmclnt_release_call(req);
0454     return status;
0455 }
0456 
0457 static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
0458 {
0459     spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
0460     new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
0461     new->fl_u.nfs_fl.owner = nlmclnt_get_lockowner(fl->fl_u.nfs_fl.owner);
0462     list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
0463     spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
0464 }
0465 
0466 static void nlmclnt_locks_release_private(struct file_lock *fl)
0467 {
0468     spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
0469     list_del(&fl->fl_u.nfs_fl.list);
0470     spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
0471     nlmclnt_put_lockowner(fl->fl_u.nfs_fl.owner);
0472 }
0473 
0474 static const struct file_lock_operations nlmclnt_lock_ops = {
0475     .fl_copy_lock = nlmclnt_locks_copy_lock,
0476     .fl_release_private = nlmclnt_locks_release_private,
0477 };
0478 
0479 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
0480 {
0481     fl->fl_u.nfs_fl.state = 0;
0482     fl->fl_u.nfs_fl.owner = nlmclnt_find_lockowner(host, fl->fl_owner);
0483     INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
0484     fl->fl_ops = &nlmclnt_lock_ops;
0485 }
0486 
0487 static int do_vfs_lock(struct file_lock *fl)
0488 {
0489     return locks_lock_file_wait(fl->fl_file, fl);
0490 }
0491 
0492 /*
0493  * LOCK: Try to create a lock
0494  *
0495  *          Programmer Harassment Alert
0496  *
0497  * When given a blocking lock request in a sync RPC call, the HPUX lockd
0498  * will faithfully return LCK_BLOCKED but never cares to notify us when
0499  * the lock could be granted. This way, our local process could hang
0500  * around forever waiting for the callback.
0501  *
0502  *  Solution A: Implement busy-waiting
0503  *  Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
0504  *
0505  * For now I am implementing solution A, because I hate the idea of
0506  * re-implementing lockd for a third time in two months. The async
0507  * calls shouldn't be too hard to do, however.
0508  *
0509  * This is one of the lovely things about standards in the NFS area:
0510  * they're so soft and squishy you can't really blame HP for doing this.
0511  */
0512 static int
0513 nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
0514 {
0515     const struct cred *cred = nfs_file_cred(fl->fl_file);
0516     struct nlm_host *host = req->a_host;
0517     struct nlm_res  *resp = &req->a_res;
0518     struct nlm_wait *block = NULL;
0519     unsigned char fl_flags = fl->fl_flags;
0520     unsigned char fl_type;
0521     int status = -ENOLCK;
0522 
0523     if (nsm_monitor(host) < 0)
0524         goto out;
0525     req->a_args.state = nsm_local_state;
0526 
0527     fl->fl_flags |= FL_ACCESS;
0528     status = do_vfs_lock(fl);
0529     fl->fl_flags = fl_flags;
0530     if (status < 0)
0531         goto out;
0532 
0533     block = nlmclnt_prepare_block(host, fl);
0534 again:
0535     /*
0536      * Initialise resp->status to a valid non-zero value,
0537      * since 0 == nlm_lck_granted
0538      */
0539     resp->status = nlm_lck_blocked;
0540     for(;;) {
0541         /* Reboot protection */
0542         fl->fl_u.nfs_fl.state = host->h_state;
0543         status = nlmclnt_call(cred, req, NLMPROC_LOCK);
0544         if (status < 0)
0545             break;
0546         /* Did a reclaimer thread notify us of a server reboot? */
0547         if (resp->status ==  nlm_lck_denied_grace_period)
0548             continue;
0549         if (resp->status != nlm_lck_blocked)
0550             break;
0551         /* Wait on an NLM blocking lock */
0552         status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
0553         if (status < 0)
0554             break;
0555         if (resp->status != nlm_lck_blocked)
0556             break;
0557     }
0558 
0559     /* if we were interrupted while blocking, then cancel the lock request
0560      * and exit
0561      */
0562     if (resp->status == nlm_lck_blocked) {
0563         if (!req->a_args.block)
0564             goto out_unlock;
0565         if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
0566             goto out_unblock;
0567     }
0568 
0569     if (resp->status == nlm_granted) {
0570         down_read(&host->h_rwsem);
0571         /* Check whether or not the server has rebooted */
0572         if (fl->fl_u.nfs_fl.state != host->h_state) {
0573             up_read(&host->h_rwsem);
0574             goto again;
0575         }
0576         /* Ensure the resulting lock will get added to granted list */
0577         fl->fl_flags |= FL_SLEEP;
0578         if (do_vfs_lock(fl) < 0)
0579             printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
0580         up_read(&host->h_rwsem);
0581         fl->fl_flags = fl_flags;
0582         status = 0;
0583     }
0584     if (status < 0)
0585         goto out_unlock;
0586     /*
0587      * EAGAIN doesn't make sense for sleeping locks, and in some
0588      * cases NLM_LCK_DENIED is returned for a permanent error.  So
0589      * turn it into an ENOLCK.
0590      */
0591     if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP))
0592         status = -ENOLCK;
0593     else
0594         status = nlm_stat_to_errno(resp->status);
0595 out_unblock:
0596     nlmclnt_finish_block(block);
0597 out:
0598     nlmclnt_release_call(req);
0599     return status;
0600 out_unlock:
0601     /* Fatal error: ensure that we remove the lock altogether */
0602     dprintk("lockd: lock attempt ended in fatal error.\n"
0603         "       Attempting to unlock.\n");
0604     nlmclnt_finish_block(block);
0605     fl_type = fl->fl_type;
0606     fl->fl_type = F_UNLCK;
0607     down_read(&host->h_rwsem);
0608     do_vfs_lock(fl);
0609     up_read(&host->h_rwsem);
0610     fl->fl_type = fl_type;
0611     fl->fl_flags = fl_flags;
0612     nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
0613     return status;
0614 }
0615 
0616 /*
0617  * RECLAIM: Try to reclaim a lock
0618  */
0619 int
0620 nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl,
0621         struct nlm_rqst *req)
0622 {
0623     int     status;
0624 
0625     memset(req, 0, sizeof(*req));
0626     locks_init_lock(&req->a_args.lock.fl);
0627     locks_init_lock(&req->a_res.lock.fl);
0628     req->a_host  = host;
0629 
0630     /* Set up the argument struct */
0631     nlmclnt_setlockargs(req, fl);
0632     req->a_args.reclaim = 1;
0633 
0634     status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK);
0635     if (status >= 0 && req->a_res.status == nlm_granted)
0636         return 0;
0637 
0638     printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
0639                 "(errno %d, status %d)\n", fl->fl_pid,
0640                 status, ntohl(req->a_res.status));
0641 
0642     /*
0643      * FIXME: This is a serious failure. We can
0644      *
0645      *  a.  Ignore the problem
0646      *  b.  Send the owning process some signal (Linux doesn't have
0647      *  SIGLOST, though...)
0648      *  c.  Retry the operation
0649      *
0650      * Until someone comes up with a simple implementation
0651      * for b or c, I'll choose option a.
0652      */
0653 
0654     return -ENOLCK;
0655 }
0656 
0657 /*
0658  * UNLOCK: remove an existing lock
0659  */
0660 static int
0661 nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
0662 {
0663     struct nlm_host *host = req->a_host;
0664     struct nlm_res  *resp = &req->a_res;
0665     int status;
0666     unsigned char fl_flags = fl->fl_flags;
0667 
0668     /*
0669      * Note: the server is supposed to either grant us the unlock
0670      * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
0671      * case, we want to unlock.
0672      */
0673     fl->fl_flags |= FL_EXISTS;
0674     down_read(&host->h_rwsem);
0675     status = do_vfs_lock(fl);
0676     up_read(&host->h_rwsem);
0677     fl->fl_flags = fl_flags;
0678     if (status == -ENOENT) {
0679         status = 0;
0680         goto out;
0681     }
0682 
0683     refcount_inc(&req->a_count);
0684     status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
0685             NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
0686     if (status < 0)
0687         goto out;
0688 
0689     if (resp->status == nlm_granted)
0690         goto out;
0691 
0692     if (resp->status != nlm_lck_denied_nolocks)
0693         printk("lockd: unexpected unlock status: %d\n",
0694             ntohl(resp->status));
0695     /* What to do now? I'm out of my depth... */
0696     status = -ENOLCK;
0697 out:
0698     nlmclnt_release_call(req);
0699     return status;
0700 }
0701 
0702 static void nlmclnt_unlock_prepare(struct rpc_task *task, void *data)
0703 {
0704     struct nlm_rqst *req = data;
0705     const struct nlmclnt_operations *nlmclnt_ops = req->a_host->h_nlmclnt_ops;
0706     bool defer_call = false;
0707 
0708     if (nlmclnt_ops && nlmclnt_ops->nlmclnt_unlock_prepare)
0709         defer_call = nlmclnt_ops->nlmclnt_unlock_prepare(task, req->a_callback_data);
0710 
0711     if (!defer_call)
0712         rpc_call_start(task);
0713 }
0714 
0715 static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
0716 {
0717     struct nlm_rqst *req = data;
0718     u32 status = ntohl(req->a_res.status);
0719 
0720     if (RPC_SIGNALLED(task))
0721         goto die;
0722 
0723     if (task->tk_status < 0) {
0724         dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
0725         switch (task->tk_status) {
0726         case -EACCES:
0727         case -EIO:
0728             goto die;
0729         default:
0730             goto retry_rebind;
0731         }
0732     }
0733     if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
0734         rpc_delay(task, NLMCLNT_GRACE_WAIT);
0735         goto retry_unlock;
0736     }
0737     if (status != NLM_LCK_GRANTED)
0738         printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
0739 die:
0740     return;
0741  retry_rebind:
0742     nlm_rebind_host(req->a_host);
0743  retry_unlock:
0744     rpc_restart_call(task);
0745 }
0746 
0747 static const struct rpc_call_ops nlmclnt_unlock_ops = {
0748     .rpc_call_prepare = nlmclnt_unlock_prepare,
0749     .rpc_call_done = nlmclnt_unlock_callback,
0750     .rpc_release = nlmclnt_rpc_release,
0751 };
0752 
0753 /*
0754  * Cancel a blocked lock request.
0755  * We always use an async RPC call for this in order not to hang a
0756  * process that has been Ctrl-C'ed.
0757  */
0758 static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
0759 {
0760     struct nlm_rqst *req;
0761     int status;
0762 
0763     dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
0764         "       Attempting to cancel lock.\n");
0765 
0766     req = nlm_alloc_call(host);
0767     if (!req)
0768         return -ENOMEM;
0769     req->a_flags = RPC_TASK_ASYNC;
0770 
0771     nlmclnt_setlockargs(req, fl);
0772     req->a_args.block = block;
0773 
0774     refcount_inc(&req->a_count);
0775     status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
0776             NLMPROC_CANCEL, &nlmclnt_cancel_ops);
0777     if (status == 0 && req->a_res.status == nlm_lck_denied)
0778         status = -ENOLCK;
0779     nlmclnt_release_call(req);
0780     return status;
0781 }
0782 
0783 static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
0784 {
0785     struct nlm_rqst *req = data;
0786     u32 status = ntohl(req->a_res.status);
0787 
0788     if (RPC_SIGNALLED(task))
0789         goto die;
0790 
0791     if (task->tk_status < 0) {
0792         dprintk("lockd: CANCEL call error %d, retrying.\n",
0793                     task->tk_status);
0794         goto retry_cancel;
0795     }
0796 
0797     switch (status) {
0798     case NLM_LCK_GRANTED:
0799     case NLM_LCK_DENIED_GRACE_PERIOD:
0800     case NLM_LCK_DENIED:
0801         /* Everything's good */
0802         break;
0803     case NLM_LCK_DENIED_NOLOCKS:
0804         dprintk("lockd: CANCEL failed (server has no locks)\n");
0805         goto retry_cancel;
0806     default:
0807         printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
0808             status);
0809     }
0810 
0811 die:
0812     return;
0813 
0814 retry_cancel:
0815     /* Don't ever retry more than 3 times */
0816     if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
0817         goto die;
0818     nlm_rebind_host(req->a_host);
0819     rpc_restart_call(task);
0820     rpc_delay(task, 30 * HZ);
0821 }
0822 
0823 static const struct rpc_call_ops nlmclnt_cancel_ops = {
0824     .rpc_call_done = nlmclnt_cancel_callback,
0825     .rpc_release = nlmclnt_rpc_release,
0826 };
0827 
0828 /*
0829  * Convert an NLM status code to a generic kernel errno
0830  */
0831 static int
0832 nlm_stat_to_errno(__be32 status)
0833 {
0834     switch(ntohl(status)) {
0835     case NLM_LCK_GRANTED:
0836         return 0;
0837     case NLM_LCK_DENIED:
0838         return -EAGAIN;
0839     case NLM_LCK_DENIED_NOLOCKS:
0840     case NLM_LCK_DENIED_GRACE_PERIOD:
0841         return -ENOLCK;
0842     case NLM_LCK_BLOCKED:
0843         printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
0844         return -ENOLCK;
0845 #ifdef CONFIG_LOCKD_V4
0846     case NLM_DEADLCK:
0847         return -EDEADLK;
0848     case NLM_ROFS:
0849         return -EROFS;
0850     case NLM_STALE_FH:
0851         return -ESTALE;
0852     case NLM_FBIG:
0853         return -EOVERFLOW;
0854     case NLM_FAILED:
0855         return -ENOLCK;
0856 #endif
0857     }
0858     printk(KERN_NOTICE "lockd: unexpected server status %d\n",
0859          ntohl(status));
0860     return -ENOLCK;
0861 }