0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #include <linux/file.h>
0036 #include <linux/fs.h>
0037 #include <linux/slab.h>
0038 #include <linux/namei.h>
0039 #include <linux/swap.h>
0040 #include <linux/pagemap.h>
0041 #include <linux/ratelimit.h>
0042 #include <linux/sunrpc/svcauth_gss.h>
0043 #include <linux/sunrpc/addr.h>
0044 #include <linux/jhash.h>
0045 #include <linux/string_helpers.h>
0046 #include <linux/fsnotify.h>
0047 #include <linux/nfs_ssc.h>
0048 #include "xdr4.h"
0049 #include "xdr4cb.h"
0050 #include "vfs.h"
0051 #include "current_stateid.h"
0052
0053 #include "netns.h"
0054 #include "pnfs.h"
0055 #include "filecache.h"
0056 #include "trace.h"
0057
0058 #define NFSDDBG_FACILITY NFSDDBG_PROC
0059
0060 #define all_ones {{~0,~0},~0}
0061 static const stateid_t one_stateid = {
0062 .si_generation = ~0,
0063 .si_opaque = all_ones,
0064 };
0065 static const stateid_t zero_stateid = {
0066
0067 };
0068 static const stateid_t currentstateid = {
0069 .si_generation = 1,
0070 };
0071 static const stateid_t close_stateid = {
0072 .si_generation = 0xffffffffU,
0073 };
0074
0075 static u64 current_sessionid = 1;
0076
0077 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
0078 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
0079 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
0080 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
0081
0082
0083 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
0084 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
0085 void nfsd4_end_grace(struct nfsd_net *nn);
0086 static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
0087
0088
0089
0090
0091
0092
0093
0094
0095 static DEFINE_SPINLOCK(state_lock);
0096
0097 enum nfsd4_st_mutex_lock_subclass {
0098 OPEN_STATEID_MUTEX = 0,
0099 LOCK_STATEID_MUTEX = 1,
0100 };
0101
0102
0103
0104
0105
0106 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
0107
0108
0109
0110
0111
0112
0113 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
0114
0115 static struct kmem_cache *client_slab;
0116 static struct kmem_cache *openowner_slab;
0117 static struct kmem_cache *lockowner_slab;
0118 static struct kmem_cache *file_slab;
0119 static struct kmem_cache *stateid_slab;
0120 static struct kmem_cache *deleg_slab;
0121 static struct kmem_cache *odstate_slab;
0122
0123 static void free_session(struct nfsd4_session *);
0124
0125 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
0126 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
0127
0128 static struct workqueue_struct *laundry_wq;
0129
0130 int nfsd4_create_laundry_wq(void)
0131 {
0132 int rc = 0;
0133
0134 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
0135 if (laundry_wq == NULL)
0136 rc = -ENOMEM;
0137 return rc;
0138 }
0139
0140 void nfsd4_destroy_laundry_wq(void)
0141 {
0142 destroy_workqueue(laundry_wq);
0143 }
0144
0145 static bool is_session_dead(struct nfsd4_session *ses)
0146 {
0147 return ses->se_flags & NFS4_SESSION_DEAD;
0148 }
0149
0150 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
0151 {
0152 if (atomic_read(&ses->se_ref) > ref_held_by_me)
0153 return nfserr_jukebox;
0154 ses->se_flags |= NFS4_SESSION_DEAD;
0155 return nfs_ok;
0156 }
0157
0158 static bool is_client_expired(struct nfs4_client *clp)
0159 {
0160 return clp->cl_time == 0;
0161 }
0162
0163 static __be32 get_client_locked(struct nfs4_client *clp)
0164 {
0165 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
0166
0167 lockdep_assert_held(&nn->client_lock);
0168
0169 if (is_client_expired(clp))
0170 return nfserr_expired;
0171 atomic_inc(&clp->cl_rpc_users);
0172 clp->cl_state = NFSD4_ACTIVE;
0173 return nfs_ok;
0174 }
0175
0176
0177 static inline void
0178 renew_client_locked(struct nfs4_client *clp)
0179 {
0180 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
0181
0182 if (is_client_expired(clp)) {
0183 WARN_ON(1);
0184 printk("%s: client (clientid %08x/%08x) already expired\n",
0185 __func__,
0186 clp->cl_clientid.cl_boot,
0187 clp->cl_clientid.cl_id);
0188 return;
0189 }
0190
0191 list_move_tail(&clp->cl_lru, &nn->client_lru);
0192 clp->cl_time = ktime_get_boottime_seconds();
0193 clp->cl_state = NFSD4_ACTIVE;
0194 }
0195
0196 static void put_client_renew_locked(struct nfs4_client *clp)
0197 {
0198 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
0199
0200 lockdep_assert_held(&nn->client_lock);
0201
0202 if (!atomic_dec_and_test(&clp->cl_rpc_users))
0203 return;
0204 if (!is_client_expired(clp))
0205 renew_client_locked(clp);
0206 else
0207 wake_up_all(&expiry_wq);
0208 }
0209
0210 static void put_client_renew(struct nfs4_client *clp)
0211 {
0212 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
0213
0214 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
0215 return;
0216 if (!is_client_expired(clp))
0217 renew_client_locked(clp);
0218 else
0219 wake_up_all(&expiry_wq);
0220 spin_unlock(&nn->client_lock);
0221 }
0222
0223 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
0224 {
0225 __be32 status;
0226
0227 if (is_session_dead(ses))
0228 return nfserr_badsession;
0229 status = get_client_locked(ses->se_client);
0230 if (status)
0231 return status;
0232 atomic_inc(&ses->se_ref);
0233 return nfs_ok;
0234 }
0235
0236 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
0237 {
0238 struct nfs4_client *clp = ses->se_client;
0239 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
0240
0241 lockdep_assert_held(&nn->client_lock);
0242
0243 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
0244 free_session(ses);
0245 put_client_renew_locked(clp);
0246 }
0247
0248 static void nfsd4_put_session(struct nfsd4_session *ses)
0249 {
0250 struct nfs4_client *clp = ses->se_client;
0251 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
0252
0253 spin_lock(&nn->client_lock);
0254 nfsd4_put_session_locked(ses);
0255 spin_unlock(&nn->client_lock);
0256 }
0257
0258 static struct nfsd4_blocked_lock *
0259 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
0260 struct nfsd_net *nn)
0261 {
0262 struct nfsd4_blocked_lock *cur, *found = NULL;
0263
0264 spin_lock(&nn->blocked_locks_lock);
0265 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
0266 if (fh_match(fh, &cur->nbl_fh)) {
0267 list_del_init(&cur->nbl_list);
0268 WARN_ON(list_empty(&cur->nbl_lru));
0269 list_del_init(&cur->nbl_lru);
0270 found = cur;
0271 break;
0272 }
0273 }
0274 spin_unlock(&nn->blocked_locks_lock);
0275 if (found)
0276 locks_delete_block(&found->nbl_lock);
0277 return found;
0278 }
0279
0280 static struct nfsd4_blocked_lock *
0281 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
0282 struct nfsd_net *nn)
0283 {
0284 struct nfsd4_blocked_lock *nbl;
0285
0286 nbl = find_blocked_lock(lo, fh, nn);
0287 if (!nbl) {
0288 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
0289 if (nbl) {
0290 INIT_LIST_HEAD(&nbl->nbl_list);
0291 INIT_LIST_HEAD(&nbl->nbl_lru);
0292 fh_copy_shallow(&nbl->nbl_fh, fh);
0293 locks_init_lock(&nbl->nbl_lock);
0294 kref_init(&nbl->nbl_kref);
0295 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
0296 &nfsd4_cb_notify_lock_ops,
0297 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
0298 }
0299 }
0300 return nbl;
0301 }
0302
0303 static void
0304 free_nbl(struct kref *kref)
0305 {
0306 struct nfsd4_blocked_lock *nbl;
0307
0308 nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref);
0309 kfree(nbl);
0310 }
0311
0312 static void
0313 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
0314 {
0315 locks_delete_block(&nbl->nbl_lock);
0316 locks_release_private(&nbl->nbl_lock);
0317 kref_put(&nbl->nbl_kref, free_nbl);
0318 }
0319
0320 static void
0321 remove_blocked_locks(struct nfs4_lockowner *lo)
0322 {
0323 struct nfs4_client *clp = lo->lo_owner.so_client;
0324 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
0325 struct nfsd4_blocked_lock *nbl;
0326 LIST_HEAD(reaplist);
0327
0328
0329 spin_lock(&nn->blocked_locks_lock);
0330 while (!list_empty(&lo->lo_blocked)) {
0331 nbl = list_first_entry(&lo->lo_blocked,
0332 struct nfsd4_blocked_lock,
0333 nbl_list);
0334 list_del_init(&nbl->nbl_list);
0335 WARN_ON(list_empty(&nbl->nbl_lru));
0336 list_move(&nbl->nbl_lru, &reaplist);
0337 }
0338 spin_unlock(&nn->blocked_locks_lock);
0339
0340
0341 while (!list_empty(&reaplist)) {
0342 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
0343 nbl_lru);
0344 list_del_init(&nbl->nbl_lru);
0345 free_blocked_lock(nbl);
0346 }
0347 }
0348
0349 static void
0350 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
0351 {
0352 struct nfsd4_blocked_lock *nbl = container_of(cb,
0353 struct nfsd4_blocked_lock, nbl_cb);
0354 locks_delete_block(&nbl->nbl_lock);
0355 }
0356
0357 static int
0358 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
0359 {
0360
0361
0362
0363
0364
0365 switch (task->tk_status) {
0366 case -NFS4ERR_DELAY:
0367 rpc_delay(task, 1 * HZ);
0368 return 0;
0369 default:
0370 return 1;
0371 }
0372 }
0373
0374 static void
0375 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
0376 {
0377 struct nfsd4_blocked_lock *nbl = container_of(cb,
0378 struct nfsd4_blocked_lock, nbl_cb);
0379
0380 free_blocked_lock(nbl);
0381 }
0382
0383 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
0384 .prepare = nfsd4_cb_notify_lock_prepare,
0385 .done = nfsd4_cb_notify_lock_done,
0386 .release = nfsd4_cb_notify_lock_release,
0387 };
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413 static unsigned int
0414 bmap_to_share_mode(unsigned long bmap)
0415 {
0416 int i;
0417 unsigned int access = 0;
0418
0419 for (i = 1; i < 4; i++) {
0420 if (test_bit(i, &bmap))
0421 access |= i;
0422 }
0423 return access;
0424 }
0425
0426
0427 static inline void
0428 set_access(u32 access, struct nfs4_ol_stateid *stp)
0429 {
0430 unsigned char mask = 1 << access;
0431
0432 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
0433 stp->st_access_bmap |= mask;
0434 }
0435
0436
0437 static inline void
0438 clear_access(u32 access, struct nfs4_ol_stateid *stp)
0439 {
0440 unsigned char mask = 1 << access;
0441
0442 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
0443 stp->st_access_bmap &= ~mask;
0444 }
0445
0446
0447 static inline bool
0448 test_access(u32 access, struct nfs4_ol_stateid *stp)
0449 {
0450 unsigned char mask = 1 << access;
0451
0452 return (bool)(stp->st_access_bmap & mask);
0453 }
0454
0455
0456 static inline void
0457 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
0458 {
0459 unsigned char mask = 1 << deny;
0460
0461 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
0462 stp->st_deny_bmap |= mask;
0463 }
0464
0465
0466 static inline void
0467 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
0468 {
0469 unsigned char mask = 1 << deny;
0470
0471 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
0472 stp->st_deny_bmap &= ~mask;
0473 }
0474
0475
0476 static inline bool
0477 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
0478 {
0479 unsigned char mask = 1 << deny;
0480
0481 return (bool)(stp->st_deny_bmap & mask);
0482 }
0483
0484 static int nfs4_access_to_omode(u32 access)
0485 {
0486 switch (access & NFS4_SHARE_ACCESS_BOTH) {
0487 case NFS4_SHARE_ACCESS_READ:
0488 return O_RDONLY;
0489 case NFS4_SHARE_ACCESS_WRITE:
0490 return O_WRONLY;
0491 case NFS4_SHARE_ACCESS_BOTH:
0492 return O_RDWR;
0493 }
0494 WARN_ON_ONCE(1);
0495 return O_RDONLY;
0496 }
0497
0498 static inline int
0499 access_permit_read(struct nfs4_ol_stateid *stp)
0500 {
0501 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
0502 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
0503 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
0504 }
0505
0506 static inline int
0507 access_permit_write(struct nfs4_ol_stateid *stp)
0508 {
0509 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
0510 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
0511 }
0512
0513 static inline struct nfs4_stateowner *
0514 nfs4_get_stateowner(struct nfs4_stateowner *sop)
0515 {
0516 atomic_inc(&sop->so_count);
0517 return sop;
0518 }
0519
0520 static int
0521 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
0522 {
0523 return (sop->so_owner.len == owner->len) &&
0524 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
0525 }
0526
0527 static struct nfs4_openowner *
0528 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
0529 struct nfs4_client *clp)
0530 {
0531 struct nfs4_stateowner *so;
0532
0533 lockdep_assert_held(&clp->cl_lock);
0534
0535 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
0536 so_strhash) {
0537 if (!so->so_is_open_owner)
0538 continue;
0539 if (same_owner_str(so, &open->op_owner))
0540 return openowner(nfs4_get_stateowner(so));
0541 }
0542 return NULL;
0543 }
0544
0545 static struct nfs4_openowner *
0546 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
0547 struct nfs4_client *clp)
0548 {
0549 struct nfs4_openowner *oo;
0550
0551 spin_lock(&clp->cl_lock);
0552 oo = find_openstateowner_str_locked(hashval, open, clp);
0553 spin_unlock(&clp->cl_lock);
0554 return oo;
0555 }
0556
0557 static inline u32
0558 opaque_hashval(const void *ptr, int nbytes)
0559 {
0560 unsigned char *cptr = (unsigned char *) ptr;
0561
0562 u32 x = 0;
0563 while (nbytes--) {
0564 x *= 37;
0565 x += *cptr++;
0566 }
0567 return x;
0568 }
0569
0570 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
0571 {
0572 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
0573
0574 kmem_cache_free(file_slab, fp);
0575 }
0576
0577 void
0578 put_nfs4_file(struct nfs4_file *fi)
0579 {
0580 might_lock(&state_lock);
0581
0582 if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
0583 hlist_del_rcu(&fi->fi_hash);
0584 spin_unlock(&state_lock);
0585 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
0586 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
0587 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
0588 }
0589 }
0590
0591 static struct nfsd_file *
0592 __nfs4_get_fd(struct nfs4_file *f, int oflag)
0593 {
0594 if (f->fi_fds[oflag])
0595 return nfsd_file_get(f->fi_fds[oflag]);
0596 return NULL;
0597 }
0598
0599 static struct nfsd_file *
0600 find_writeable_file_locked(struct nfs4_file *f)
0601 {
0602 struct nfsd_file *ret;
0603
0604 lockdep_assert_held(&f->fi_lock);
0605
0606 ret = __nfs4_get_fd(f, O_WRONLY);
0607 if (!ret)
0608 ret = __nfs4_get_fd(f, O_RDWR);
0609 return ret;
0610 }
0611
0612 static struct nfsd_file *
0613 find_writeable_file(struct nfs4_file *f)
0614 {
0615 struct nfsd_file *ret;
0616
0617 spin_lock(&f->fi_lock);
0618 ret = find_writeable_file_locked(f);
0619 spin_unlock(&f->fi_lock);
0620
0621 return ret;
0622 }
0623
0624 static struct nfsd_file *
0625 find_readable_file_locked(struct nfs4_file *f)
0626 {
0627 struct nfsd_file *ret;
0628
0629 lockdep_assert_held(&f->fi_lock);
0630
0631 ret = __nfs4_get_fd(f, O_RDONLY);
0632 if (!ret)
0633 ret = __nfs4_get_fd(f, O_RDWR);
0634 return ret;
0635 }
0636
0637 static struct nfsd_file *
0638 find_readable_file(struct nfs4_file *f)
0639 {
0640 struct nfsd_file *ret;
0641
0642 spin_lock(&f->fi_lock);
0643 ret = find_readable_file_locked(f);
0644 spin_unlock(&f->fi_lock);
0645
0646 return ret;
0647 }
0648
0649 struct nfsd_file *
0650 find_any_file(struct nfs4_file *f)
0651 {
0652 struct nfsd_file *ret;
0653
0654 if (!f)
0655 return NULL;
0656 spin_lock(&f->fi_lock);
0657 ret = __nfs4_get_fd(f, O_RDWR);
0658 if (!ret) {
0659 ret = __nfs4_get_fd(f, O_WRONLY);
0660 if (!ret)
0661 ret = __nfs4_get_fd(f, O_RDONLY);
0662 }
0663 spin_unlock(&f->fi_lock);
0664 return ret;
0665 }
0666
0667 static struct nfsd_file *find_deleg_file(struct nfs4_file *f)
0668 {
0669 struct nfsd_file *ret = NULL;
0670
0671 spin_lock(&f->fi_lock);
0672 if (f->fi_deleg_file)
0673 ret = nfsd_file_get(f->fi_deleg_file);
0674 spin_unlock(&f->fi_lock);
0675 return ret;
0676 }
0677
0678 static atomic_long_t num_delegations;
0679 unsigned long max_delegations;
0680
0681
0682
0683
0684
0685
0686 #define OWNER_HASH_BITS 8
0687 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
0688 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
0689
0690 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
0691 {
0692 unsigned int ret;
0693
0694 ret = opaque_hashval(ownername->data, ownername->len);
0695 return ret & OWNER_HASH_MASK;
0696 }
0697
0698
0699 #define FILE_HASH_BITS 8
0700 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
0701
0702 static unsigned int file_hashval(struct svc_fh *fh)
0703 {
0704 struct inode *inode = d_inode(fh->fh_dentry);
0705
0706
0707 return (unsigned int)hash_long(inode->i_ino, FILE_HASH_BITS);
0708 }
0709
0710 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728 static bool
0729 nfs4_resolve_deny_conflicts_locked(struct nfs4_file *fp, bool new_stp,
0730 struct nfs4_ol_stateid *stp, u32 access, bool share_access)
0731 {
0732 struct nfs4_ol_stateid *st;
0733 bool resolvable = true;
0734 unsigned char bmap;
0735 struct nfsd_net *nn;
0736 struct nfs4_client *clp;
0737
0738 lockdep_assert_held(&fp->fi_lock);
0739 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
0740
0741 if (st->st_openstp)
0742 continue;
0743 if (st == stp && new_stp)
0744 continue;
0745
0746 bmap = share_access ? st->st_deny_bmap : st->st_access_bmap;
0747 if (!(access & bmap_to_share_mode(bmap)))
0748 continue;
0749 clp = st->st_stid.sc_client;
0750 if (try_to_expire_client(clp))
0751 continue;
0752 resolvable = false;
0753 break;
0754 }
0755 if (resolvable) {
0756 clp = stp->st_stid.sc_client;
0757 nn = net_generic(clp->net, nfsd_net_id);
0758 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
0759 }
0760 return resolvable;
0761 }
0762
0763 static void
0764 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
0765 {
0766 lockdep_assert_held(&fp->fi_lock);
0767
0768 if (access & NFS4_SHARE_ACCESS_WRITE)
0769 atomic_inc(&fp->fi_access[O_WRONLY]);
0770 if (access & NFS4_SHARE_ACCESS_READ)
0771 atomic_inc(&fp->fi_access[O_RDONLY]);
0772 }
0773
0774 static __be32
0775 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
0776 {
0777 lockdep_assert_held(&fp->fi_lock);
0778
0779
0780 if (access & ~NFS4_SHARE_ACCESS_BOTH)
0781 return nfserr_inval;
0782
0783
0784 if ((access & fp->fi_share_deny) != 0)
0785 return nfserr_share_denied;
0786
0787 __nfs4_file_get_access(fp, access);
0788 return nfs_ok;
0789 }
0790
0791 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
0792 {
0793
0794 if (deny) {
0795
0796 if (deny & ~NFS4_SHARE_DENY_BOTH)
0797 return nfserr_inval;
0798
0799 if ((deny & NFS4_SHARE_DENY_READ) &&
0800 atomic_read(&fp->fi_access[O_RDONLY]))
0801 return nfserr_share_denied;
0802
0803 if ((deny & NFS4_SHARE_DENY_WRITE) &&
0804 atomic_read(&fp->fi_access[O_WRONLY]))
0805 return nfserr_share_denied;
0806 }
0807 return nfs_ok;
0808 }
0809
0810 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
0811 {
0812 might_lock(&fp->fi_lock);
0813
0814 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
0815 struct nfsd_file *f1 = NULL;
0816 struct nfsd_file *f2 = NULL;
0817
0818 swap(f1, fp->fi_fds[oflag]);
0819 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
0820 swap(f2, fp->fi_fds[O_RDWR]);
0821 spin_unlock(&fp->fi_lock);
0822 if (f1)
0823 nfsd_file_close(f1);
0824 if (f2)
0825 nfsd_file_close(f2);
0826 }
0827 }
0828
0829 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
0830 {
0831 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
0832
0833 if (access & NFS4_SHARE_ACCESS_WRITE)
0834 __nfs4_file_put_access(fp, O_WRONLY);
0835 if (access & NFS4_SHARE_ACCESS_READ)
0836 __nfs4_file_put_access(fp, O_RDONLY);
0837 }
0838
0839
0840
0841
0842
0843
0844
0845
0846 static struct nfs4_clnt_odstate *
0847 alloc_clnt_odstate(struct nfs4_client *clp)
0848 {
0849 struct nfs4_clnt_odstate *co;
0850
0851 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
0852 if (co) {
0853 co->co_client = clp;
0854 refcount_set(&co->co_odcount, 1);
0855 }
0856 return co;
0857 }
0858
0859 static void
0860 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
0861 {
0862 struct nfs4_file *fp = co->co_file;
0863
0864 lockdep_assert_held(&fp->fi_lock);
0865 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
0866 }
0867
0868 static inline void
0869 get_clnt_odstate(struct nfs4_clnt_odstate *co)
0870 {
0871 if (co)
0872 refcount_inc(&co->co_odcount);
0873 }
0874
0875 static void
0876 put_clnt_odstate(struct nfs4_clnt_odstate *co)
0877 {
0878 struct nfs4_file *fp;
0879
0880 if (!co)
0881 return;
0882
0883 fp = co->co_file;
0884 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
0885 list_del(&co->co_perfile);
0886 spin_unlock(&fp->fi_lock);
0887
0888 nfsd4_return_all_file_layouts(co->co_client, fp);
0889 kmem_cache_free(odstate_slab, co);
0890 }
0891 }
0892
0893 static struct nfs4_clnt_odstate *
0894 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
0895 {
0896 struct nfs4_clnt_odstate *co;
0897 struct nfs4_client *cl;
0898
0899 if (!new)
0900 return NULL;
0901
0902 cl = new->co_client;
0903
0904 spin_lock(&fp->fi_lock);
0905 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
0906 if (co->co_client == cl) {
0907 get_clnt_odstate(co);
0908 goto out;
0909 }
0910 }
0911 co = new;
0912 co->co_file = fp;
0913 hash_clnt_odstate_locked(new);
0914 out:
0915 spin_unlock(&fp->fi_lock);
0916 return co;
0917 }
0918
0919 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
0920 void (*sc_free)(struct nfs4_stid *))
0921 {
0922 struct nfs4_stid *stid;
0923 int new_id;
0924
0925 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
0926 if (!stid)
0927 return NULL;
0928
0929 idr_preload(GFP_KERNEL);
0930 spin_lock(&cl->cl_lock);
0931
0932 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
0933 spin_unlock(&cl->cl_lock);
0934 idr_preload_end();
0935 if (new_id < 0)
0936 goto out_free;
0937
0938 stid->sc_free = sc_free;
0939 stid->sc_client = cl;
0940 stid->sc_stateid.si_opaque.so_id = new_id;
0941 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
0942
0943 refcount_set(&stid->sc_count, 1);
0944 spin_lock_init(&stid->sc_lock);
0945 INIT_LIST_HEAD(&stid->sc_cp_list);
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956 return stid;
0957 out_free:
0958 kmem_cache_free(slab, stid);
0959 return NULL;
0960 }
0961
0962
0963
0964
0965 static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
0966 unsigned char sc_type)
0967 {
0968 int new_id;
0969
0970 stid->stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
0971 stid->stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
0972 stid->sc_type = sc_type;
0973
0974 idr_preload(GFP_KERNEL);
0975 spin_lock(&nn->s2s_cp_lock);
0976 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
0977 stid->stid.si_opaque.so_id = new_id;
0978 stid->stid.si_generation = 1;
0979 spin_unlock(&nn->s2s_cp_lock);
0980 idr_preload_end();
0981 if (new_id < 0)
0982 return 0;
0983 return 1;
0984 }
0985
0986 int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
0987 {
0988 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID);
0989 }
0990
0991 struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
0992 struct nfs4_stid *p_stid)
0993 {
0994 struct nfs4_cpntf_state *cps;
0995
0996 cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
0997 if (!cps)
0998 return NULL;
0999 cps->cpntf_time = ktime_get_boottime_seconds();
1000 refcount_set(&cps->cp_stateid.sc_count, 1);
1001 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
1002 goto out_free;
1003 spin_lock(&nn->s2s_cp_lock);
1004 list_add(&cps->cp_list, &p_stid->sc_cp_list);
1005 spin_unlock(&nn->s2s_cp_lock);
1006 return cps;
1007 out_free:
1008 kfree(cps);
1009 return NULL;
1010 }
1011
1012 void nfs4_free_copy_state(struct nfsd4_copy *copy)
1013 {
1014 struct nfsd_net *nn;
1015
1016 WARN_ON_ONCE(copy->cp_stateid.sc_type != NFS4_COPY_STID);
1017 nn = net_generic(copy->cp_clp->net, nfsd_net_id);
1018 spin_lock(&nn->s2s_cp_lock);
1019 idr_remove(&nn->s2s_cp_stateids,
1020 copy->cp_stateid.stid.si_opaque.so_id);
1021 spin_unlock(&nn->s2s_cp_lock);
1022 }
1023
1024 static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
1025 {
1026 struct nfs4_cpntf_state *cps;
1027 struct nfsd_net *nn;
1028
1029 nn = net_generic(net, nfsd_net_id);
1030 spin_lock(&nn->s2s_cp_lock);
1031 while (!list_empty(&stid->sc_cp_list)) {
1032 cps = list_first_entry(&stid->sc_cp_list,
1033 struct nfs4_cpntf_state, cp_list);
1034 _free_cpntf_state_locked(nn, cps);
1035 }
1036 spin_unlock(&nn->s2s_cp_lock);
1037 }
1038
1039 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
1040 {
1041 struct nfs4_stid *stid;
1042
1043 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
1044 if (!stid)
1045 return NULL;
1046
1047 return openlockstateid(stid);
1048 }
1049
1050 static void nfs4_free_deleg(struct nfs4_stid *stid)
1051 {
1052 kmem_cache_free(deleg_slab, stid);
1053 atomic_long_dec(&num_delegations);
1054 }
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074 static DEFINE_SPINLOCK(blocked_delegations_lock);
1075 static struct bloom_pair {
1076 int entries, old_entries;
1077 time64_t swap_time;
1078 int new;
1079 DECLARE_BITMAP(set[2], 256);
1080 } blocked_delegations;
1081
1082 static int delegation_blocked(struct knfsd_fh *fh)
1083 {
1084 u32 hash;
1085 struct bloom_pair *bd = &blocked_delegations;
1086
1087 if (bd->entries == 0)
1088 return 0;
1089 if (ktime_get_seconds() - bd->swap_time > 30) {
1090 spin_lock(&blocked_delegations_lock);
1091 if (ktime_get_seconds() - bd->swap_time > 30) {
1092 bd->entries -= bd->old_entries;
1093 bd->old_entries = bd->entries;
1094 memset(bd->set[bd->new], 0,
1095 sizeof(bd->set[0]));
1096 bd->new = 1-bd->new;
1097 bd->swap_time = ktime_get_seconds();
1098 }
1099 spin_unlock(&blocked_delegations_lock);
1100 }
1101 hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1102 if (test_bit(hash&255, bd->set[0]) &&
1103 test_bit((hash>>8)&255, bd->set[0]) &&
1104 test_bit((hash>>16)&255, bd->set[0]))
1105 return 1;
1106
1107 if (test_bit(hash&255, bd->set[1]) &&
1108 test_bit((hash>>8)&255, bd->set[1]) &&
1109 test_bit((hash>>16)&255, bd->set[1]))
1110 return 1;
1111
1112 return 0;
1113 }
1114
1115 static void block_delegations(struct knfsd_fh *fh)
1116 {
1117 u32 hash;
1118 struct bloom_pair *bd = &blocked_delegations;
1119
1120 hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1121
1122 spin_lock(&blocked_delegations_lock);
1123 __set_bit(hash&255, bd->set[bd->new]);
1124 __set_bit((hash>>8)&255, bd->set[bd->new]);
1125 __set_bit((hash>>16)&255, bd->set[bd->new]);
1126 if (bd->entries == 0)
1127 bd->swap_time = ktime_get_seconds();
1128 bd->entries += 1;
1129 spin_unlock(&blocked_delegations_lock);
1130 }
1131
1132 static struct nfs4_delegation *
1133 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
1134 struct nfs4_clnt_odstate *odstate)
1135 {
1136 struct nfs4_delegation *dp;
1137 long n;
1138
1139 dprintk("NFSD alloc_init_deleg\n");
1140 n = atomic_long_inc_return(&num_delegations);
1141 if (n < 0 || n > max_delegations)
1142 goto out_dec;
1143 if (delegation_blocked(&fp->fi_fhandle))
1144 goto out_dec;
1145 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
1146 if (dp == NULL)
1147 goto out_dec;
1148
1149
1150
1151
1152
1153
1154 dp->dl_stid.sc_stateid.si_generation = 1;
1155 INIT_LIST_HEAD(&dp->dl_perfile);
1156 INIT_LIST_HEAD(&dp->dl_perclnt);
1157 INIT_LIST_HEAD(&dp->dl_recall_lru);
1158 dp->dl_clnt_odstate = odstate;
1159 get_clnt_odstate(odstate);
1160 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
1161 dp->dl_retries = 1;
1162 dp->dl_recalled = false;
1163 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
1164 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
1165 get_nfs4_file(fp);
1166 dp->dl_stid.sc_file = fp;
1167 return dp;
1168 out_dec:
1169 atomic_long_dec(&num_delegations);
1170 return NULL;
1171 }
1172
1173 void
1174 nfs4_put_stid(struct nfs4_stid *s)
1175 {
1176 struct nfs4_file *fp = s->sc_file;
1177 struct nfs4_client *clp = s->sc_client;
1178
1179 might_lock(&clp->cl_lock);
1180
1181 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
1182 wake_up_all(&close_wq);
1183 return;
1184 }
1185 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1186 nfs4_free_cpntf_statelist(clp->net, s);
1187 spin_unlock(&clp->cl_lock);
1188 s->sc_free(s);
1189 if (fp)
1190 put_nfs4_file(fp);
1191 }
1192
1193 void
1194 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
1195 {
1196 stateid_t *src = &stid->sc_stateid;
1197
1198 spin_lock(&stid->sc_lock);
1199 if (unlikely(++src->si_generation == 0))
1200 src->si_generation = 1;
1201 memcpy(dst, src, sizeof(*dst));
1202 spin_unlock(&stid->sc_lock);
1203 }
1204
1205 static void put_deleg_file(struct nfs4_file *fp)
1206 {
1207 struct nfsd_file *nf = NULL;
1208
1209 spin_lock(&fp->fi_lock);
1210 if (--fp->fi_delegees == 0)
1211 swap(nf, fp->fi_deleg_file);
1212 spin_unlock(&fp->fi_lock);
1213
1214 if (nf)
1215 nfsd_file_put(nf);
1216 }
1217
1218 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
1219 {
1220 struct nfs4_file *fp = dp->dl_stid.sc_file;
1221 struct nfsd_file *nf = fp->fi_deleg_file;
1222
1223 WARN_ON_ONCE(!fp->fi_delegees);
1224
1225 vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
1226 put_deleg_file(fp);
1227 }
1228
1229 static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
1230 {
1231 put_clnt_odstate(dp->dl_clnt_odstate);
1232 nfs4_unlock_deleg_lease(dp);
1233 nfs4_put_stid(&dp->dl_stid);
1234 }
1235
1236 void nfs4_unhash_stid(struct nfs4_stid *s)
1237 {
1238 s->sc_type = 0;
1239 }
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250 static bool
1251 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
1252 {
1253 struct nfs4_delegation *searchdp = NULL;
1254 struct nfs4_client *searchclp = NULL;
1255
1256 lockdep_assert_held(&state_lock);
1257 lockdep_assert_held(&fp->fi_lock);
1258
1259 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
1260 searchclp = searchdp->dl_stid.sc_client;
1261 if (clp == searchclp) {
1262 return true;
1263 }
1264 }
1265 return false;
1266 }
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281 static int
1282 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1283 {
1284 struct nfs4_client *clp = dp->dl_stid.sc_client;
1285
1286 lockdep_assert_held(&state_lock);
1287 lockdep_assert_held(&fp->fi_lock);
1288
1289 if (nfs4_delegation_exists(clp, fp))
1290 return -EAGAIN;
1291 refcount_inc(&dp->dl_stid.sc_count);
1292 dp->dl_stid.sc_type = NFS4_DELEG_STID;
1293 list_add(&dp->dl_perfile, &fp->fi_delegations);
1294 list_add(&dp->dl_perclnt, &clp->cl_delegations);
1295 return 0;
1296 }
1297
1298 static bool delegation_hashed(struct nfs4_delegation *dp)
1299 {
1300 return !(list_empty(&dp->dl_perfile));
1301 }
1302
1303 static bool
1304 unhash_delegation_locked(struct nfs4_delegation *dp)
1305 {
1306 struct nfs4_file *fp = dp->dl_stid.sc_file;
1307
1308 lockdep_assert_held(&state_lock);
1309
1310 if (!delegation_hashed(dp))
1311 return false;
1312
1313 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
1314
1315 ++dp->dl_time;
1316 spin_lock(&fp->fi_lock);
1317 list_del_init(&dp->dl_perclnt);
1318 list_del_init(&dp->dl_recall_lru);
1319 list_del_init(&dp->dl_perfile);
1320 spin_unlock(&fp->fi_lock);
1321 return true;
1322 }
1323
1324 static void destroy_delegation(struct nfs4_delegation *dp)
1325 {
1326 bool unhashed;
1327
1328 spin_lock(&state_lock);
1329 unhashed = unhash_delegation_locked(dp);
1330 spin_unlock(&state_lock);
1331 if (unhashed)
1332 destroy_unhashed_deleg(dp);
1333 }
1334
1335 static void revoke_delegation(struct nfs4_delegation *dp)
1336 {
1337 struct nfs4_client *clp = dp->dl_stid.sc_client;
1338
1339 WARN_ON(!list_empty(&dp->dl_recall_lru));
1340
1341 if (clp->cl_minorversion) {
1342 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1343 refcount_inc(&dp->dl_stid.sc_count);
1344 spin_lock(&clp->cl_lock);
1345 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1346 spin_unlock(&clp->cl_lock);
1347 }
1348 destroy_unhashed_deleg(dp);
1349 }
1350
1351
1352
1353
1354
1355 static unsigned int clientid_hashval(u32 id)
1356 {
1357 return id & CLIENT_HASH_MASK;
1358 }
1359
1360 static unsigned int clientstr_hashval(struct xdr_netobj name)
1361 {
1362 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
1363 }
1364
1365
1366
1367
1368
1369 static void
1370 recalculate_deny_mode(struct nfs4_file *fp)
1371 {
1372 struct nfs4_ol_stateid *stp;
1373
1374 spin_lock(&fp->fi_lock);
1375 fp->fi_share_deny = 0;
1376 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1377 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1378 spin_unlock(&fp->fi_lock);
1379 }
1380
1381 static void
1382 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1383 {
1384 int i;
1385 bool change = false;
1386
1387 for (i = 1; i < 4; i++) {
1388 if ((i & deny) != i) {
1389 change = true;
1390 clear_deny(i, stp);
1391 }
1392 }
1393
1394
1395 if (change)
1396 recalculate_deny_mode(stp->st_stid.sc_file);
1397 }
1398
1399
1400 static void
1401 release_all_access(struct nfs4_ol_stateid *stp)
1402 {
1403 int i;
1404 struct nfs4_file *fp = stp->st_stid.sc_file;
1405
1406 if (fp && stp->st_deny_bmap != 0)
1407 recalculate_deny_mode(fp);
1408
1409 for (i = 1; i < 4; i++) {
1410 if (test_access(i, stp))
1411 nfs4_file_put_access(stp->st_stid.sc_file, i);
1412 clear_access(i, stp);
1413 }
1414 }
1415
1416 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1417 {
1418 kfree(sop->so_owner.data);
1419 sop->so_ops->so_free(sop);
1420 }
1421
1422 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1423 {
1424 struct nfs4_client *clp = sop->so_client;
1425
1426 might_lock(&clp->cl_lock);
1427
1428 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1429 return;
1430 sop->so_ops->so_unhash(sop);
1431 spin_unlock(&clp->cl_lock);
1432 nfs4_free_stateowner(sop);
1433 }
1434
1435 static bool
1436 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
1437 {
1438 return list_empty(&stp->st_perfile);
1439 }
1440
1441 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1442 {
1443 struct nfs4_file *fp = stp->st_stid.sc_file;
1444
1445 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1446
1447 if (list_empty(&stp->st_perfile))
1448 return false;
1449
1450 spin_lock(&fp->fi_lock);
1451 list_del_init(&stp->st_perfile);
1452 spin_unlock(&fp->fi_lock);
1453 list_del(&stp->st_perstateowner);
1454 return true;
1455 }
1456
1457 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1458 {
1459 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1460
1461 put_clnt_odstate(stp->st_clnt_odstate);
1462 release_all_access(stp);
1463 if (stp->st_stateowner)
1464 nfs4_put_stateowner(stp->st_stateowner);
1465 kmem_cache_free(stateid_slab, stid);
1466 }
1467
1468 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1469 {
1470 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1471 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1472 struct nfsd_file *nf;
1473
1474 nf = find_any_file(stp->st_stid.sc_file);
1475 if (nf) {
1476 get_file(nf->nf_file);
1477 filp_close(nf->nf_file, (fl_owner_t)lo);
1478 nfsd_file_put(nf);
1479 }
1480 nfs4_free_ol_stateid(stid);
1481 }
1482
1483
1484
1485
1486
1487
1488 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1489 struct list_head *reaplist)
1490 {
1491 struct nfs4_stid *s = &stp->st_stid;
1492 struct nfs4_client *clp = s->sc_client;
1493
1494 lockdep_assert_held(&clp->cl_lock);
1495
1496 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1497
1498 if (!refcount_dec_and_test(&s->sc_count)) {
1499 wake_up_all(&close_wq);
1500 return;
1501 }
1502
1503 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1504 list_add(&stp->st_locks, reaplist);
1505 }
1506
1507 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1508 {
1509 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1510
1511 if (!unhash_ol_stateid(stp))
1512 return false;
1513 list_del_init(&stp->st_locks);
1514 nfs4_unhash_stid(&stp->st_stid);
1515 return true;
1516 }
1517
1518 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1519 {
1520 struct nfs4_client *clp = stp->st_stid.sc_client;
1521 bool unhashed;
1522
1523 spin_lock(&clp->cl_lock);
1524 unhashed = unhash_lock_stateid(stp);
1525 spin_unlock(&clp->cl_lock);
1526 if (unhashed)
1527 nfs4_put_stid(&stp->st_stid);
1528 }
1529
1530 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1531 {
1532 struct nfs4_client *clp = lo->lo_owner.so_client;
1533
1534 lockdep_assert_held(&clp->cl_lock);
1535
1536 list_del_init(&lo->lo_owner.so_strhash);
1537 }
1538
1539
1540
1541
1542
1543 static void
1544 free_ol_stateid_reaplist(struct list_head *reaplist)
1545 {
1546 struct nfs4_ol_stateid *stp;
1547 struct nfs4_file *fp;
1548
1549 might_sleep();
1550
1551 while (!list_empty(reaplist)) {
1552 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1553 st_locks);
1554 list_del(&stp->st_locks);
1555 fp = stp->st_stid.sc_file;
1556 stp->st_stid.sc_free(&stp->st_stid);
1557 if (fp)
1558 put_nfs4_file(fp);
1559 }
1560 }
1561
1562 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1563 struct list_head *reaplist)
1564 {
1565 struct nfs4_ol_stateid *stp;
1566
1567 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1568
1569 while (!list_empty(&open_stp->st_locks)) {
1570 stp = list_entry(open_stp->st_locks.next,
1571 struct nfs4_ol_stateid, st_locks);
1572 WARN_ON(!unhash_lock_stateid(stp));
1573 put_ol_stateid_locked(stp, reaplist);
1574 }
1575 }
1576
1577 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1578 struct list_head *reaplist)
1579 {
1580 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1581
1582 if (!unhash_ol_stateid(stp))
1583 return false;
1584 release_open_stateid_locks(stp, reaplist);
1585 return true;
1586 }
1587
1588 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1589 {
1590 LIST_HEAD(reaplist);
1591
1592 spin_lock(&stp->st_stid.sc_client->cl_lock);
1593 if (unhash_open_stateid(stp, &reaplist))
1594 put_ol_stateid_locked(stp, &reaplist);
1595 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1596 free_ol_stateid_reaplist(&reaplist);
1597 }
1598
1599 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1600 {
1601 struct nfs4_client *clp = oo->oo_owner.so_client;
1602
1603 lockdep_assert_held(&clp->cl_lock);
1604
1605 list_del_init(&oo->oo_owner.so_strhash);
1606 list_del_init(&oo->oo_perclient);
1607 }
1608
1609 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1610 {
1611 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1612 nfsd_net_id);
1613 struct nfs4_ol_stateid *s;
1614
1615 spin_lock(&nn->client_lock);
1616 s = oo->oo_last_closed_stid;
1617 if (s) {
1618 list_del_init(&oo->oo_close_lru);
1619 oo->oo_last_closed_stid = NULL;
1620 }
1621 spin_unlock(&nn->client_lock);
1622 if (s)
1623 nfs4_put_stid(&s->st_stid);
1624 }
1625
1626 static void release_openowner(struct nfs4_openowner *oo)
1627 {
1628 struct nfs4_ol_stateid *stp;
1629 struct nfs4_client *clp = oo->oo_owner.so_client;
1630 struct list_head reaplist;
1631
1632 INIT_LIST_HEAD(&reaplist);
1633
1634 spin_lock(&clp->cl_lock);
1635 unhash_openowner_locked(oo);
1636 while (!list_empty(&oo->oo_owner.so_stateids)) {
1637 stp = list_first_entry(&oo->oo_owner.so_stateids,
1638 struct nfs4_ol_stateid, st_perstateowner);
1639 if (unhash_open_stateid(stp, &reaplist))
1640 put_ol_stateid_locked(stp, &reaplist);
1641 }
1642 spin_unlock(&clp->cl_lock);
1643 free_ol_stateid_reaplist(&reaplist);
1644 release_last_closed_stateid(oo);
1645 nfs4_put_stateowner(&oo->oo_owner);
1646 }
1647
1648 static inline int
1649 hash_sessionid(struct nfs4_sessionid *sessionid)
1650 {
1651 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1652
1653 return sid->sequence % SESSION_HASH_SIZE;
1654 }
1655
1656 #ifdef CONFIG_SUNRPC_DEBUG
1657 static inline void
1658 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1659 {
1660 u32 *ptr = (u32 *)(&sessionid->data[0]);
1661 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1662 }
1663 #else
1664 static inline void
1665 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1666 {
1667 }
1668 #endif
1669
1670
1671
1672
1673
1674 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1675 {
1676 struct nfs4_stateowner *so = cstate->replay_owner;
1677
1678 if (nfserr == nfserr_replay_me)
1679 return;
1680
1681 if (!seqid_mutating_err(ntohl(nfserr))) {
1682 nfsd4_cstate_clear_replay(cstate);
1683 return;
1684 }
1685 if (!so)
1686 return;
1687 if (so->so_is_open_owner)
1688 release_last_closed_stateid(openowner(so));
1689 so->so_seqid++;
1690 return;
1691 }
1692
1693 static void
1694 gen_sessionid(struct nfsd4_session *ses)
1695 {
1696 struct nfs4_client *clp = ses->se_client;
1697 struct nfsd4_sessionid *sid;
1698
1699 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1700 sid->clientid = clp->cl_clientid;
1701 sid->sequence = current_sessionid++;
1702 sid->reserved = 0;
1703 }
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1718
1719 static void
1720 free_session_slots(struct nfsd4_session *ses)
1721 {
1722 int i;
1723
1724 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1725 free_svc_cred(&ses->se_slots[i]->sl_cred);
1726 kfree(ses->se_slots[i]);
1727 }
1728 }
1729
1730
1731
1732
1733
1734 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1735 {
1736 u32 size;
1737
1738 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1739 size = 0;
1740 else
1741 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1742 return size + sizeof(struct nfsd4_slot);
1743 }
1744
1745
1746
1747
1748
1749
1750 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1751 {
1752 u32 slotsize = slot_bytes(ca);
1753 u32 num = ca->maxreqs;
1754 unsigned long avail, total_avail;
1755 unsigned int scale_factor;
1756
1757 spin_lock(&nfsd_drc_lock);
1758 if (nfsd_drc_max_mem > nfsd_drc_mem_used)
1759 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1760 else
1761
1762
1763
1764
1765
1766 total_avail = 0;
1767 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
1778
1779 avail = clamp_t(unsigned long, avail, slotsize,
1780 total_avail/scale_factor);
1781 num = min_t(int, num, avail / slotsize);
1782 num = max_t(int, num, 1);
1783 nfsd_drc_mem_used += num * slotsize;
1784 spin_unlock(&nfsd_drc_lock);
1785
1786 return num;
1787 }
1788
1789 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1790 {
1791 int slotsize = slot_bytes(ca);
1792
1793 spin_lock(&nfsd_drc_lock);
1794 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1795 spin_unlock(&nfsd_drc_lock);
1796 }
1797
1798 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1799 struct nfsd4_channel_attrs *battrs)
1800 {
1801 int numslots = fattrs->maxreqs;
1802 int slotsize = slot_bytes(fattrs);
1803 struct nfsd4_session *new;
1804 int mem, i;
1805
1806 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1807 + sizeof(struct nfsd4_session) > PAGE_SIZE);
1808 mem = numslots * sizeof(struct nfsd4_slot *);
1809
1810 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1811 if (!new)
1812 return NULL;
1813
1814 for (i = 0; i < numslots; i++) {
1815 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1816 if (!new->se_slots[i])
1817 goto out_free;
1818 }
1819
1820 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1821 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1822
1823 return new;
1824 out_free:
1825 while (i--)
1826 kfree(new->se_slots[i]);
1827 kfree(new);
1828 return NULL;
1829 }
1830
1831 static void free_conn(struct nfsd4_conn *c)
1832 {
1833 svc_xprt_put(c->cn_xprt);
1834 kfree(c);
1835 }
1836
1837 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1838 {
1839 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1840 struct nfs4_client *clp = c->cn_session->se_client;
1841
1842 trace_nfsd_cb_lost(clp);
1843
1844 spin_lock(&clp->cl_lock);
1845 if (!list_empty(&c->cn_persession)) {
1846 list_del(&c->cn_persession);
1847 free_conn(c);
1848 }
1849 nfsd4_probe_callback(clp);
1850 spin_unlock(&clp->cl_lock);
1851 }
1852
1853 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1854 {
1855 struct nfsd4_conn *conn;
1856
1857 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1858 if (!conn)
1859 return NULL;
1860 svc_xprt_get(rqstp->rq_xprt);
1861 conn->cn_xprt = rqstp->rq_xprt;
1862 conn->cn_flags = flags;
1863 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1864 return conn;
1865 }
1866
1867 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1868 {
1869 conn->cn_session = ses;
1870 list_add(&conn->cn_persession, &ses->se_conns);
1871 }
1872
1873 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1874 {
1875 struct nfs4_client *clp = ses->se_client;
1876
1877 spin_lock(&clp->cl_lock);
1878 __nfsd4_hash_conn(conn, ses);
1879 spin_unlock(&clp->cl_lock);
1880 }
1881
1882 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1883 {
1884 conn->cn_xpt_user.callback = nfsd4_conn_lost;
1885 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1886 }
1887
1888 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1889 {
1890 int ret;
1891
1892 nfsd4_hash_conn(conn, ses);
1893 ret = nfsd4_register_conn(conn);
1894 if (ret)
1895
1896 nfsd4_conn_lost(&conn->cn_xpt_user);
1897
1898 nfsd4_probe_callback_sync(ses->se_client);
1899 }
1900
1901 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1902 {
1903 u32 dir = NFS4_CDFC4_FORE;
1904
1905 if (cses->flags & SESSION4_BACK_CHAN)
1906 dir |= NFS4_CDFC4_BACK;
1907 return alloc_conn(rqstp, dir);
1908 }
1909
1910
1911 static void nfsd4_del_conns(struct nfsd4_session *s)
1912 {
1913 struct nfs4_client *clp = s->se_client;
1914 struct nfsd4_conn *c;
1915
1916 spin_lock(&clp->cl_lock);
1917 while (!list_empty(&s->se_conns)) {
1918 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1919 list_del_init(&c->cn_persession);
1920 spin_unlock(&clp->cl_lock);
1921
1922 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1923 free_conn(c);
1924
1925 spin_lock(&clp->cl_lock);
1926 }
1927 spin_unlock(&clp->cl_lock);
1928 }
1929
1930 static void __free_session(struct nfsd4_session *ses)
1931 {
1932 free_session_slots(ses);
1933 kfree(ses);
1934 }
1935
1936 static void free_session(struct nfsd4_session *ses)
1937 {
1938 nfsd4_del_conns(ses);
1939 nfsd4_put_drc_mem(&ses->se_fchannel);
1940 __free_session(ses);
1941 }
1942
1943 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1944 {
1945 int idx;
1946 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1947
1948 new->se_client = clp;
1949 gen_sessionid(new);
1950
1951 INIT_LIST_HEAD(&new->se_conns);
1952
1953 new->se_cb_seq_nr = 1;
1954 new->se_flags = cses->flags;
1955 new->se_cb_prog = cses->callback_prog;
1956 new->se_cb_sec = cses->cb_sec;
1957 atomic_set(&new->se_ref, 0);
1958 idx = hash_sessionid(&new->se_sessionid);
1959 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1960 spin_lock(&clp->cl_lock);
1961 list_add(&new->se_perclnt, &clp->cl_sessions);
1962 spin_unlock(&clp->cl_lock);
1963
1964 {
1965 struct sockaddr *sa = svc_addr(rqstp);
1966
1967
1968
1969
1970
1971
1972
1973 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1974 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1975 }
1976 }
1977
1978
1979 static struct nfsd4_session *
1980 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1981 {
1982 struct nfsd4_session *elem;
1983 int idx;
1984 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1985
1986 lockdep_assert_held(&nn->client_lock);
1987
1988 dump_sessionid(__func__, sessionid);
1989 idx = hash_sessionid(sessionid);
1990
1991 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1992 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1993 NFS4_MAX_SESSIONID_LEN)) {
1994 return elem;
1995 }
1996 }
1997
1998 dprintk("%s: session not found\n", __func__);
1999 return NULL;
2000 }
2001
2002 static struct nfsd4_session *
2003 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
2004 __be32 *ret)
2005 {
2006 struct nfsd4_session *session;
2007 __be32 status = nfserr_badsession;
2008
2009 session = __find_in_sessionid_hashtbl(sessionid, net);
2010 if (!session)
2011 goto out;
2012 status = nfsd4_get_session_locked(session);
2013 if (status)
2014 session = NULL;
2015 out:
2016 *ret = status;
2017 return session;
2018 }
2019
2020
2021 static void
2022 unhash_session(struct nfsd4_session *ses)
2023 {
2024 struct nfs4_client *clp = ses->se_client;
2025 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2026
2027 lockdep_assert_held(&nn->client_lock);
2028
2029 list_del(&ses->se_hash);
2030 spin_lock(&ses->se_client->cl_lock);
2031 list_del(&ses->se_perclnt);
2032 spin_unlock(&ses->se_client->cl_lock);
2033 }
2034
2035
2036 static int
2037 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
2038 {
2039
2040
2041
2042
2043
2044 if (clid->cl_boot == (u32)nn->boot_time)
2045 return 0;
2046 trace_nfsd_clid_stale(clid);
2047 return 1;
2048 }
2049
2050
2051
2052
2053
2054
2055 static struct nfs4_client *alloc_client(struct xdr_netobj name,
2056 struct nfsd_net *nn)
2057 {
2058 struct nfs4_client *clp;
2059 int i;
2060
2061 if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) {
2062 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
2063 return NULL;
2064 }
2065 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
2066 if (clp == NULL)
2067 return NULL;
2068 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
2069 if (clp->cl_name.data == NULL)
2070 goto err_no_name;
2071 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
2072 sizeof(struct list_head),
2073 GFP_KERNEL);
2074 if (!clp->cl_ownerstr_hashtbl)
2075 goto err_no_hashtbl;
2076 for (i = 0; i < OWNER_HASH_SIZE; i++)
2077 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
2078 INIT_LIST_HEAD(&clp->cl_sessions);
2079 idr_init(&clp->cl_stateids);
2080 atomic_set(&clp->cl_rpc_users, 0);
2081 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
2082 clp->cl_state = NFSD4_ACTIVE;
2083 atomic_inc(&nn->nfs4_client_count);
2084 atomic_set(&clp->cl_delegs_in_recall, 0);
2085 INIT_LIST_HEAD(&clp->cl_idhash);
2086 INIT_LIST_HEAD(&clp->cl_openowners);
2087 INIT_LIST_HEAD(&clp->cl_delegations);
2088 INIT_LIST_HEAD(&clp->cl_lru);
2089 INIT_LIST_HEAD(&clp->cl_revoked);
2090 #ifdef CONFIG_NFSD_PNFS
2091 INIT_LIST_HEAD(&clp->cl_lo_states);
2092 #endif
2093 INIT_LIST_HEAD(&clp->async_copies);
2094 spin_lock_init(&clp->async_lock);
2095 spin_lock_init(&clp->cl_lock);
2096 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
2097 return clp;
2098 err_no_hashtbl:
2099 kfree(clp->cl_name.data);
2100 err_no_name:
2101 kmem_cache_free(client_slab, clp);
2102 return NULL;
2103 }
2104
2105 static void __free_client(struct kref *k)
2106 {
2107 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
2108 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
2109
2110 free_svc_cred(&clp->cl_cred);
2111 kfree(clp->cl_ownerstr_hashtbl);
2112 kfree(clp->cl_name.data);
2113 kfree(clp->cl_nii_domain.data);
2114 kfree(clp->cl_nii_name.data);
2115 idr_destroy(&clp->cl_stateids);
2116 kmem_cache_free(client_slab, clp);
2117 }
2118
2119 static void drop_client(struct nfs4_client *clp)
2120 {
2121 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
2122 }
2123
2124 static void
2125 free_client(struct nfs4_client *clp)
2126 {
2127 while (!list_empty(&clp->cl_sessions)) {
2128 struct nfsd4_session *ses;
2129 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
2130 se_perclnt);
2131 list_del(&ses->se_perclnt);
2132 WARN_ON_ONCE(atomic_read(&ses->se_ref));
2133 free_session(ses);
2134 }
2135 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
2136 if (clp->cl_nfsd_dentry) {
2137 nfsd_client_rmdir(clp->cl_nfsd_dentry);
2138 clp->cl_nfsd_dentry = NULL;
2139 wake_up_all(&expiry_wq);
2140 }
2141 drop_client(clp);
2142 }
2143
2144
2145 static void
2146 unhash_client_locked(struct nfs4_client *clp)
2147 {
2148 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2149 struct nfsd4_session *ses;
2150
2151 lockdep_assert_held(&nn->client_lock);
2152
2153
2154 clp->cl_time = 0;
2155
2156 if (!list_empty(&clp->cl_idhash)) {
2157 list_del_init(&clp->cl_idhash);
2158 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2159 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
2160 else
2161 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2162 }
2163 list_del_init(&clp->cl_lru);
2164 spin_lock(&clp->cl_lock);
2165 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
2166 list_del_init(&ses->se_hash);
2167 spin_unlock(&clp->cl_lock);
2168 }
2169
2170 static void
2171 unhash_client(struct nfs4_client *clp)
2172 {
2173 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2174
2175 spin_lock(&nn->client_lock);
2176 unhash_client_locked(clp);
2177 spin_unlock(&nn->client_lock);
2178 }
2179
2180 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
2181 {
2182 if (atomic_read(&clp->cl_rpc_users))
2183 return nfserr_jukebox;
2184 unhash_client_locked(clp);
2185 return nfs_ok;
2186 }
2187
2188 static void
2189 __destroy_client(struct nfs4_client *clp)
2190 {
2191 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2192 int i;
2193 struct nfs4_openowner *oo;
2194 struct nfs4_delegation *dp;
2195 struct list_head reaplist;
2196
2197 INIT_LIST_HEAD(&reaplist);
2198 spin_lock(&state_lock);
2199 while (!list_empty(&clp->cl_delegations)) {
2200 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2201 WARN_ON(!unhash_delegation_locked(dp));
2202 list_add(&dp->dl_recall_lru, &reaplist);
2203 }
2204 spin_unlock(&state_lock);
2205 while (!list_empty(&reaplist)) {
2206 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
2207 list_del_init(&dp->dl_recall_lru);
2208 destroy_unhashed_deleg(dp);
2209 }
2210 while (!list_empty(&clp->cl_revoked)) {
2211 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2212 list_del_init(&dp->dl_recall_lru);
2213 nfs4_put_stid(&dp->dl_stid);
2214 }
2215 while (!list_empty(&clp->cl_openowners)) {
2216 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2217 nfs4_get_stateowner(&oo->oo_owner);
2218 release_openowner(oo);
2219 }
2220 for (i = 0; i < OWNER_HASH_SIZE; i++) {
2221 struct nfs4_stateowner *so, *tmp;
2222
2223 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2224 so_strhash) {
2225
2226 WARN_ON_ONCE(so->so_is_open_owner);
2227 remove_blocked_locks(lockowner(so));
2228 }
2229 }
2230 nfsd4_return_all_client_layouts(clp);
2231 nfsd4_shutdown_copy(clp);
2232 nfsd4_shutdown_callback(clp);
2233 if (clp->cl_cb_conn.cb_xprt)
2234 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2235 atomic_add_unless(&nn->nfs4_client_count, -1, 0);
2236 free_client(clp);
2237 wake_up_all(&expiry_wq);
2238 }
2239
2240 static void
2241 destroy_client(struct nfs4_client *clp)
2242 {
2243 unhash_client(clp);
2244 __destroy_client(clp);
2245 }
2246
2247 static void inc_reclaim_complete(struct nfs4_client *clp)
2248 {
2249 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2250
2251 if (!nn->track_reclaim_completes)
2252 return;
2253 if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2254 return;
2255 if (atomic_inc_return(&nn->nr_reclaim_complete) ==
2256 nn->reclaim_str_hashtbl_size) {
2257 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2258 clp->net->ns.inum);
2259 nfsd4_end_grace(nn);
2260 }
2261 }
2262
2263 static void expire_client(struct nfs4_client *clp)
2264 {
2265 unhash_client(clp);
2266 nfsd4_client_record_remove(clp);
2267 __destroy_client(clp);
2268 }
2269
2270 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
2271 {
2272 memcpy(target->cl_verifier.data, source->data,
2273 sizeof(target->cl_verifier.data));
2274 }
2275
2276 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
2277 {
2278 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
2279 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
2280 }
2281
2282 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
2283 {
2284 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
2285 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
2286 GFP_KERNEL);
2287 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
2288 if ((source->cr_principal && !target->cr_principal) ||
2289 (source->cr_raw_principal && !target->cr_raw_principal) ||
2290 (source->cr_targ_princ && !target->cr_targ_princ))
2291 return -ENOMEM;
2292
2293 target->cr_flavor = source->cr_flavor;
2294 target->cr_uid = source->cr_uid;
2295 target->cr_gid = source->cr_gid;
2296 target->cr_group_info = source->cr_group_info;
2297 get_group_info(target->cr_group_info);
2298 target->cr_gss_mech = source->cr_gss_mech;
2299 if (source->cr_gss_mech)
2300 gss_mech_get(source->cr_gss_mech);
2301 return 0;
2302 }
2303
2304 static int
2305 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2306 {
2307 if (o1->len < o2->len)
2308 return -1;
2309 if (o1->len > o2->len)
2310 return 1;
2311 return memcmp(o1->data, o2->data, o1->len);
2312 }
2313
2314 static int
2315 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2316 {
2317 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2318 }
2319
2320 static int
2321 same_clid(clientid_t *cl1, clientid_t *cl2)
2322 {
2323 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2324 }
2325
2326 static bool groups_equal(struct group_info *g1, struct group_info *g2)
2327 {
2328 int i;
2329
2330 if (g1->ngroups != g2->ngroups)
2331 return false;
2332 for (i=0; i<g1->ngroups; i++)
2333 if (!gid_eq(g1->gid[i], g2->gid[i]))
2334 return false;
2335 return true;
2336 }
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347 static bool is_gss_cred(struct svc_cred *cr)
2348 {
2349
2350 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2351 }
2352
2353
2354 static bool
2355 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2356 {
2357 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2358 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2359 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2360 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2361 return false;
2362
2363 if (cr1->cr_principal == cr2->cr_principal)
2364 return true;
2365 if (!cr1->cr_principal || !cr2->cr_principal)
2366 return false;
2367 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2368 }
2369
2370 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2371 {
2372 struct svc_cred *cr = &rqstp->rq_cred;
2373 u32 service;
2374
2375 if (!cr->cr_gss_mech)
2376 return false;
2377 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2378 return service == RPC_GSS_SVC_INTEGRITY ||
2379 service == RPC_GSS_SVC_PRIVACY;
2380 }
2381
2382 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2383 {
2384 struct svc_cred *cr = &rqstp->rq_cred;
2385
2386 if (!cl->cl_mach_cred)
2387 return true;
2388 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2389 return false;
2390 if (!svc_rqst_integrity_protected(rqstp))
2391 return false;
2392 if (cl->cl_cred.cr_raw_principal)
2393 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2394 cr->cr_raw_principal);
2395 if (!cr->cr_principal)
2396 return false;
2397 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2398 }
2399
2400 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2401 {
2402 __be32 verf[2];
2403
2404
2405
2406
2407
2408 verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
2409 verf[1] = (__force __be32)nn->clverifier_counter++;
2410 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2411 }
2412
2413 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2414 {
2415 clp->cl_clientid.cl_boot = (u32)nn->boot_time;
2416 clp->cl_clientid.cl_id = nn->clientid_counter++;
2417 gen_confirm(clp, nn);
2418 }
2419
2420 static struct nfs4_stid *
2421 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2422 {
2423 struct nfs4_stid *ret;
2424
2425 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2426 if (!ret || !ret->sc_type)
2427 return NULL;
2428 return ret;
2429 }
2430
2431 static struct nfs4_stid *
2432 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2433 {
2434 struct nfs4_stid *s;
2435
2436 spin_lock(&cl->cl_lock);
2437 s = find_stateid_locked(cl, t);
2438 if (s != NULL) {
2439 if (typemask & s->sc_type)
2440 refcount_inc(&s->sc_count);
2441 else
2442 s = NULL;
2443 }
2444 spin_unlock(&cl->cl_lock);
2445 return s;
2446 }
2447
2448 static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
2449 {
2450 struct nfsdfs_client *nc;
2451 nc = get_nfsdfs_client(inode);
2452 if (!nc)
2453 return NULL;
2454 return container_of(nc, struct nfs4_client, cl_nfsdfs);
2455 }
2456
2457 static void seq_quote_mem(struct seq_file *m, char *data, int len)
2458 {
2459 seq_printf(m, "\"");
2460 seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\");
2461 seq_printf(m, "\"");
2462 }
2463
2464 static const char *cb_state2str(int state)
2465 {
2466 switch (state) {
2467 case NFSD4_CB_UP:
2468 return "UP";
2469 case NFSD4_CB_UNKNOWN:
2470 return "UNKNOWN";
2471 case NFSD4_CB_DOWN:
2472 return "DOWN";
2473 case NFSD4_CB_FAULT:
2474 return "FAULT";
2475 }
2476 return "UNDEFINED";
2477 }
2478
2479 static int client_info_show(struct seq_file *m, void *v)
2480 {
2481 struct inode *inode = m->private;
2482 struct nfs4_client *clp;
2483 u64 clid;
2484
2485 clp = get_nfsdfs_clp(inode);
2486 if (!clp)
2487 return -ENXIO;
2488 memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2489 seq_printf(m, "clientid: 0x%llx\n", clid);
2490 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2491
2492 if (clp->cl_state == NFSD4_COURTESY)
2493 seq_puts(m, "status: courtesy\n");
2494 else if (clp->cl_state == NFSD4_EXPIRABLE)
2495 seq_puts(m, "status: expirable\n");
2496 else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2497 seq_puts(m, "status: confirmed\n");
2498 else
2499 seq_puts(m, "status: unconfirmed\n");
2500 seq_printf(m, "seconds from last renew: %lld\n",
2501 ktime_get_boottime_seconds() - clp->cl_time);
2502 seq_printf(m, "name: ");
2503 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2504 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2505 if (clp->cl_nii_domain.data) {
2506 seq_printf(m, "Implementation domain: ");
2507 seq_quote_mem(m, clp->cl_nii_domain.data,
2508 clp->cl_nii_domain.len);
2509 seq_printf(m, "\nImplementation name: ");
2510 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2511 seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
2512 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2513 }
2514 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
2515 seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr);
2516 drop_client(clp);
2517
2518 return 0;
2519 }
2520
2521 static int client_info_open(struct inode *inode, struct file *file)
2522 {
2523 return single_open(file, client_info_show, inode);
2524 }
2525
2526 static const struct file_operations client_info_fops = {
2527 .open = client_info_open,
2528 .read = seq_read,
2529 .llseek = seq_lseek,
2530 .release = single_release,
2531 };
2532
2533 static void *states_start(struct seq_file *s, loff_t *pos)
2534 __acquires(&clp->cl_lock)
2535 {
2536 struct nfs4_client *clp = s->private;
2537 unsigned long id = *pos;
2538 void *ret;
2539
2540 spin_lock(&clp->cl_lock);
2541 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2542 *pos = id;
2543 return ret;
2544 }
2545
2546 static void *states_next(struct seq_file *s, void *v, loff_t *pos)
2547 {
2548 struct nfs4_client *clp = s->private;
2549 unsigned long id = *pos;
2550 void *ret;
2551
2552 id = *pos;
2553 id++;
2554 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2555 *pos = id;
2556 return ret;
2557 }
2558
2559 static void states_stop(struct seq_file *s, void *v)
2560 __releases(&clp->cl_lock)
2561 {
2562 struct nfs4_client *clp = s->private;
2563
2564 spin_unlock(&clp->cl_lock);
2565 }
2566
2567 static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
2568 {
2569 seq_printf(s, "filename: \"%pD2\"", f->nf_file);
2570 }
2571
2572 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
2573 {
2574 struct inode *inode = file_inode(f->nf_file);
2575
2576 seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
2577 MAJOR(inode->i_sb->s_dev),
2578 MINOR(inode->i_sb->s_dev),
2579 inode->i_ino);
2580 }
2581
2582 static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
2583 {
2584 seq_printf(s, "owner: ");
2585 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
2586 }
2587
2588 static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
2589 {
2590 seq_printf(s, "0x%.8x", stid->si_generation);
2591 seq_printf(s, "%12phN", &stid->si_opaque);
2592 }
2593
2594 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
2595 {
2596 struct nfs4_ol_stateid *ols;
2597 struct nfs4_file *nf;
2598 struct nfsd_file *file;
2599 struct nfs4_stateowner *oo;
2600 unsigned int access, deny;
2601
2602 if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
2603 return 0;
2604 ols = openlockstateid(st);
2605 oo = ols->st_stateowner;
2606 nf = st->sc_file;
2607 file = find_any_file(nf);
2608 if (!file)
2609 return 0;
2610
2611 seq_printf(s, "- ");
2612 nfs4_show_stateid(s, &st->sc_stateid);
2613 seq_printf(s, ": { type: open, ");
2614
2615 access = bmap_to_share_mode(ols->st_access_bmap);
2616 deny = bmap_to_share_mode(ols->st_deny_bmap);
2617
2618 seq_printf(s, "access: %s%s, ",
2619 access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2620 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2621 seq_printf(s, "deny: %s%s, ",
2622 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2623 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2624
2625 nfs4_show_superblock(s, file);
2626 seq_printf(s, ", ");
2627 nfs4_show_fname(s, file);
2628 seq_printf(s, ", ");
2629 nfs4_show_owner(s, oo);
2630 seq_printf(s, " }\n");
2631 nfsd_file_put(file);
2632
2633 return 0;
2634 }
2635
2636 static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
2637 {
2638 struct nfs4_ol_stateid *ols;
2639 struct nfs4_file *nf;
2640 struct nfsd_file *file;
2641 struct nfs4_stateowner *oo;
2642
2643 ols = openlockstateid(st);
2644 oo = ols->st_stateowner;
2645 nf = st->sc_file;
2646 file = find_any_file(nf);
2647 if (!file)
2648 return 0;
2649
2650 seq_printf(s, "- ");
2651 nfs4_show_stateid(s, &st->sc_stateid);
2652 seq_printf(s, ": { type: lock, ");
2653
2654
2655
2656
2657
2658
2659
2660
2661 nfs4_show_superblock(s, file);
2662
2663 seq_printf(s, ", ");
2664 nfs4_show_fname(s, file);
2665 seq_printf(s, ", ");
2666 nfs4_show_owner(s, oo);
2667 seq_printf(s, " }\n");
2668 nfsd_file_put(file);
2669
2670 return 0;
2671 }
2672
2673 static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
2674 {
2675 struct nfs4_delegation *ds;
2676 struct nfs4_file *nf;
2677 struct nfsd_file *file;
2678
2679 ds = delegstateid(st);
2680 nf = st->sc_file;
2681 file = find_deleg_file(nf);
2682 if (!file)
2683 return 0;
2684
2685 seq_printf(s, "- ");
2686 nfs4_show_stateid(s, &st->sc_stateid);
2687 seq_printf(s, ": { type: deleg, ");
2688
2689
2690 seq_printf(s, "access: %s, ",
2691 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
2692
2693
2694
2695 nfs4_show_superblock(s, file);
2696 seq_printf(s, ", ");
2697 nfs4_show_fname(s, file);
2698 seq_printf(s, " }\n");
2699 nfsd_file_put(file);
2700
2701 return 0;
2702 }
2703
2704 static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
2705 {
2706 struct nfs4_layout_stateid *ls;
2707 struct nfsd_file *file;
2708
2709 ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
2710 file = ls->ls_file;
2711
2712 seq_printf(s, "- ");
2713 nfs4_show_stateid(s, &st->sc_stateid);
2714 seq_printf(s, ": { type: layout, ");
2715
2716
2717
2718 nfs4_show_superblock(s, file);
2719 seq_printf(s, ", ");
2720 nfs4_show_fname(s, file);
2721 seq_printf(s, " }\n");
2722
2723 return 0;
2724 }
2725
2726 static int states_show(struct seq_file *s, void *v)
2727 {
2728 struct nfs4_stid *st = v;
2729
2730 switch (st->sc_type) {
2731 case NFS4_OPEN_STID:
2732 return nfs4_show_open(s, st);
2733 case NFS4_LOCK_STID:
2734 return nfs4_show_lock(s, st);
2735 case NFS4_DELEG_STID:
2736 return nfs4_show_deleg(s, st);
2737 case NFS4_LAYOUT_STID:
2738 return nfs4_show_layout(s, st);
2739 default:
2740 return 0;
2741 }
2742
2743 }
2744
2745 static struct seq_operations states_seq_ops = {
2746 .start = states_start,
2747 .next = states_next,
2748 .stop = states_stop,
2749 .show = states_show
2750 };
2751
2752 static int client_states_open(struct inode *inode, struct file *file)
2753 {
2754 struct seq_file *s;
2755 struct nfs4_client *clp;
2756 int ret;
2757
2758 clp = get_nfsdfs_clp(inode);
2759 if (!clp)
2760 return -ENXIO;
2761
2762 ret = seq_open(file, &states_seq_ops);
2763 if (ret)
2764 return ret;
2765 s = file->private_data;
2766 s->private = clp;
2767 return 0;
2768 }
2769
2770 static int client_opens_release(struct inode *inode, struct file *file)
2771 {
2772 struct seq_file *m = file->private_data;
2773 struct nfs4_client *clp = m->private;
2774
2775
2776 drop_client(clp);
2777 return 0;
2778 }
2779
2780 static const struct file_operations client_states_fops = {
2781 .open = client_states_open,
2782 .read = seq_read,
2783 .llseek = seq_lseek,
2784 .release = client_opens_release,
2785 };
2786
2787
2788
2789
2790
2791
2792
2793 static void force_expire_client(struct nfs4_client *clp)
2794 {
2795 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2796 bool already_expired;
2797
2798 trace_nfsd_clid_admin_expired(&clp->cl_clientid);
2799
2800 spin_lock(&nn->client_lock);
2801 clp->cl_time = 0;
2802 spin_unlock(&nn->client_lock);
2803
2804 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
2805 spin_lock(&nn->client_lock);
2806 already_expired = list_empty(&clp->cl_lru);
2807 if (!already_expired)
2808 unhash_client_locked(clp);
2809 spin_unlock(&nn->client_lock);
2810
2811 if (!already_expired)
2812 expire_client(clp);
2813 else
2814 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
2815 }
2816
2817 static ssize_t client_ctl_write(struct file *file, const char __user *buf,
2818 size_t size, loff_t *pos)
2819 {
2820 char *data;
2821 struct nfs4_client *clp;
2822
2823 data = simple_transaction_get(file, buf, size);
2824 if (IS_ERR(data))
2825 return PTR_ERR(data);
2826 if (size != 7 || 0 != memcmp(data, "expire\n", 7))
2827 return -EINVAL;
2828 clp = get_nfsdfs_clp(file_inode(file));
2829 if (!clp)
2830 return -ENXIO;
2831 force_expire_client(clp);
2832 drop_client(clp);
2833 return 7;
2834 }
2835
2836 static const struct file_operations client_ctl_fops = {
2837 .write = client_ctl_write,
2838 .release = simple_transaction_release,
2839 };
2840
2841 static const struct tree_descr client_files[] = {
2842 [0] = {"info", &client_info_fops, S_IRUSR},
2843 [1] = {"states", &client_states_fops, S_IRUSR},
2844 [2] = {"ctl", &client_ctl_fops, S_IWUSR},
2845 [3] = {""},
2846 };
2847
2848 static struct nfs4_client *create_client(struct xdr_netobj name,
2849 struct svc_rqst *rqstp, nfs4_verifier *verf)
2850 {
2851 struct nfs4_client *clp;
2852 struct sockaddr *sa = svc_addr(rqstp);
2853 int ret;
2854 struct net *net = SVC_NET(rqstp);
2855 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2856 struct dentry *dentries[ARRAY_SIZE(client_files)];
2857
2858 clp = alloc_client(name, nn);
2859 if (clp == NULL)
2860 return NULL;
2861
2862 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2863 if (ret) {
2864 free_client(clp);
2865 return NULL;
2866 }
2867 gen_clid(clp, nn);
2868 kref_init(&clp->cl_nfsdfs.cl_ref);
2869 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2870 clp->cl_time = ktime_get_boottime_seconds();
2871 clear_bit(0, &clp->cl_cb_slot_busy);
2872 copy_verf(clp, verf);
2873 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
2874 clp->cl_cb_session = NULL;
2875 clp->net = net;
2876 clp->cl_nfsd_dentry = nfsd_client_mkdir(
2877 nn, &clp->cl_nfsdfs,
2878 clp->cl_clientid.cl_id - nn->clientid_base,
2879 client_files, dentries);
2880 clp->cl_nfsd_info_dentry = dentries[0];
2881 if (!clp->cl_nfsd_dentry) {
2882 free_client(clp);
2883 return NULL;
2884 }
2885 return clp;
2886 }
2887
2888 static void
2889 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2890 {
2891 struct rb_node **new = &(root->rb_node), *parent = NULL;
2892 struct nfs4_client *clp;
2893
2894 while (*new) {
2895 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2896 parent = *new;
2897
2898 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2899 new = &((*new)->rb_left);
2900 else
2901 new = &((*new)->rb_right);
2902 }
2903
2904 rb_link_node(&new_clp->cl_namenode, parent, new);
2905 rb_insert_color(&new_clp->cl_namenode, root);
2906 }
2907
2908 static struct nfs4_client *
2909 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2910 {
2911 int cmp;
2912 struct rb_node *node = root->rb_node;
2913 struct nfs4_client *clp;
2914
2915 while (node) {
2916 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2917 cmp = compare_blob(&clp->cl_name, name);
2918 if (cmp > 0)
2919 node = node->rb_left;
2920 else if (cmp < 0)
2921 node = node->rb_right;
2922 else
2923 return clp;
2924 }
2925 return NULL;
2926 }
2927
2928 static void
2929 add_to_unconfirmed(struct nfs4_client *clp)
2930 {
2931 unsigned int idhashval;
2932 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2933
2934 lockdep_assert_held(&nn->client_lock);
2935
2936 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2937 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2938 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2939 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2940 renew_client_locked(clp);
2941 }
2942
2943 static void
2944 move_to_confirmed(struct nfs4_client *clp)
2945 {
2946 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2947 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2948
2949 lockdep_assert_held(&nn->client_lock);
2950
2951 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2952 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2953 add_clp_to_name_tree(clp, &nn->conf_name_tree);
2954 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2955 trace_nfsd_clid_confirmed(&clp->cl_clientid);
2956 renew_client_locked(clp);
2957 }
2958
2959 static struct nfs4_client *
2960 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2961 {
2962 struct nfs4_client *clp;
2963 unsigned int idhashval = clientid_hashval(clid->cl_id);
2964
2965 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2966 if (same_clid(&clp->cl_clientid, clid)) {
2967 if ((bool)clp->cl_minorversion != sessions)
2968 return NULL;
2969 renew_client_locked(clp);
2970 return clp;
2971 }
2972 }
2973 return NULL;
2974 }
2975
2976 static struct nfs4_client *
2977 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2978 {
2979 struct list_head *tbl = nn->conf_id_hashtbl;
2980
2981 lockdep_assert_held(&nn->client_lock);
2982 return find_client_in_id_table(tbl, clid, sessions);
2983 }
2984
2985 static struct nfs4_client *
2986 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2987 {
2988 struct list_head *tbl = nn->unconf_id_hashtbl;
2989
2990 lockdep_assert_held(&nn->client_lock);
2991 return find_client_in_id_table(tbl, clid, sessions);
2992 }
2993
2994 static bool clp_used_exchangeid(struct nfs4_client *clp)
2995 {
2996 return clp->cl_exchange_flags != 0;
2997 }
2998
2999 static struct nfs4_client *
3000 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3001 {
3002 lockdep_assert_held(&nn->client_lock);
3003 return find_clp_in_name_tree(name, &nn->conf_name_tree);
3004 }
3005
3006 static struct nfs4_client *
3007 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3008 {
3009 lockdep_assert_held(&nn->client_lock);
3010 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
3011 }
3012
3013 static void
3014 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
3015 {
3016 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
3017 struct sockaddr *sa = svc_addr(rqstp);
3018 u32 scopeid = rpc_get_scope_id(sa);
3019 unsigned short expected_family;
3020
3021
3022 if (se->se_callback_netid_len == 3 &&
3023 !memcmp(se->se_callback_netid_val, "tcp", 3))
3024 expected_family = AF_INET;
3025 else if (se->se_callback_netid_len == 4 &&
3026 !memcmp(se->se_callback_netid_val, "tcp6", 4))
3027 expected_family = AF_INET6;
3028 else
3029 goto out_err;
3030
3031 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
3032 se->se_callback_addr_len,
3033 (struct sockaddr *)&conn->cb_addr,
3034 sizeof(conn->cb_addr));
3035
3036 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
3037 goto out_err;
3038
3039 if (conn->cb_addr.ss_family == AF_INET6)
3040 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
3041
3042 conn->cb_prog = se->se_callback_prog;
3043 conn->cb_ident = se->se_callback_ident;
3044 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
3045 trace_nfsd_cb_args(clp, conn);
3046 return;
3047 out_err:
3048 conn->cb_addr.ss_family = AF_UNSPEC;
3049 conn->cb_addrlen = 0;
3050 trace_nfsd_cb_nodelegs(clp);
3051 return;
3052 }
3053
3054
3055
3056
3057 static void
3058 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
3059 {
3060 struct xdr_buf *buf = resp->xdr->buf;
3061 struct nfsd4_slot *slot = resp->cstate.slot;
3062 unsigned int base;
3063
3064 dprintk("--> %s slot %p\n", __func__, slot);
3065
3066 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
3067 slot->sl_opcnt = resp->opcnt;
3068 slot->sl_status = resp->cstate.status;
3069 free_svc_cred(&slot->sl_cred);
3070 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
3071
3072 if (!nfsd4_cache_this(resp)) {
3073 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
3074 return;
3075 }
3076 slot->sl_flags |= NFSD4_SLOT_CACHED;
3077
3078 base = resp->cstate.data_offset;
3079 slot->sl_datalen = buf->len - base;
3080 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
3081 WARN(1, "%s: sessions DRC could not cache compound\n",
3082 __func__);
3083 return;
3084 }
3085
3086
3087
3088
3089
3090
3091
3092
3093 static __be32
3094 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
3095 struct nfsd4_compoundres *resp)
3096 {
3097 struct nfsd4_op *op;
3098 struct nfsd4_slot *slot = resp->cstate.slot;
3099
3100
3101 op = &args->ops[resp->opcnt - 1];
3102 nfsd4_encode_operation(resp, op);
3103
3104 if (slot->sl_flags & NFSD4_SLOT_CACHED)
3105 return op->status;
3106 if (args->opcnt == 1) {
3107
3108
3109
3110
3111
3112 op->status = nfserr_seq_false_retry;
3113 } else {
3114 op = &args->ops[resp->opcnt++];
3115 op->status = nfserr_retry_uncached_rep;
3116 nfsd4_encode_operation(resp, op);
3117 }
3118 return op->status;
3119 }
3120
3121
3122
3123
3124
3125 static __be32
3126 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
3127 struct nfsd4_sequence *seq)
3128 {
3129 struct nfsd4_slot *slot = resp->cstate.slot;
3130 struct xdr_stream *xdr = resp->xdr;
3131 __be32 *p;
3132 __be32 status;
3133
3134 dprintk("--> %s slot %p\n", __func__, slot);
3135
3136 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
3137 if (status)
3138 return status;
3139
3140 p = xdr_reserve_space(xdr, slot->sl_datalen);
3141 if (!p) {
3142 WARN_ON_ONCE(1);
3143 return nfserr_serverfault;
3144 }
3145 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
3146 xdr_commit_encode(xdr);
3147
3148 resp->opcnt = slot->sl_opcnt;
3149 return slot->sl_status;
3150 }
3151
3152
3153
3154
3155 static void
3156 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
3157 {
3158 #ifdef CONFIG_NFSD_PNFS
3159 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
3160 #else
3161 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
3162 #endif
3163
3164
3165 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
3166
3167
3168 clid->flags = new->cl_exchange_flags;
3169 }
3170
3171 static bool client_has_openowners(struct nfs4_client *clp)
3172 {
3173 struct nfs4_openowner *oo;
3174
3175 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
3176 if (!list_empty(&oo->oo_owner.so_stateids))
3177 return true;
3178 }
3179 return false;
3180 }
3181
3182 static bool client_has_state(struct nfs4_client *clp)
3183 {
3184 return client_has_openowners(clp)
3185 #ifdef CONFIG_NFSD_PNFS
3186 || !list_empty(&clp->cl_lo_states)
3187 #endif
3188 || !list_empty(&clp->cl_delegations)
3189 || !list_empty(&clp->cl_sessions)
3190 || !list_empty(&clp->async_copies);
3191 }
3192
3193 static __be32 copy_impl_id(struct nfs4_client *clp,
3194 struct nfsd4_exchange_id *exid)
3195 {
3196 if (!exid->nii_domain.data)
3197 return 0;
3198 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
3199 if (!clp->cl_nii_domain.data)
3200 return nfserr_jukebox;
3201 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
3202 if (!clp->cl_nii_name.data)
3203 return nfserr_jukebox;
3204 clp->cl_nii_time = exid->nii_time;
3205 return 0;
3206 }
3207
3208 __be32
3209 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3210 union nfsd4_op_u *u)
3211 {
3212 struct nfsd4_exchange_id *exid = &u->exchange_id;
3213 struct nfs4_client *conf, *new;
3214 struct nfs4_client *unconf = NULL;
3215 __be32 status;
3216 char addr_str[INET6_ADDRSTRLEN];
3217 nfs4_verifier verf = exid->verifier;
3218 struct sockaddr *sa = svc_addr(rqstp);
3219 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
3220 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3221
3222 rpc_ntop(sa, addr_str, sizeof(addr_str));
3223 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3224 "ip_addr=%s flags %x, spa_how %u\n",
3225 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
3226 addr_str, exid->flags, exid->spa_how);
3227
3228 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
3229 return nfserr_inval;
3230
3231 new = create_client(exid->clname, rqstp, &verf);
3232 if (new == NULL)
3233 return nfserr_jukebox;
3234 status = copy_impl_id(new, exid);
3235 if (status)
3236 goto out_nolock;
3237
3238 switch (exid->spa_how) {
3239 case SP4_MACH_CRED:
3240 exid->spo_must_enforce[0] = 0;
3241 exid->spo_must_enforce[1] = (
3242 1 << (OP_BIND_CONN_TO_SESSION - 32) |
3243 1 << (OP_EXCHANGE_ID - 32) |
3244 1 << (OP_CREATE_SESSION - 32) |
3245 1 << (OP_DESTROY_SESSION - 32) |
3246 1 << (OP_DESTROY_CLIENTID - 32));
3247
3248 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
3249 1 << (OP_OPEN_DOWNGRADE) |
3250 1 << (OP_LOCKU) |
3251 1 << (OP_DELEGRETURN));
3252
3253 exid->spo_must_allow[1] &= (
3254 1 << (OP_TEST_STATEID - 32) |
3255 1 << (OP_FREE_STATEID - 32));
3256 if (!svc_rqst_integrity_protected(rqstp)) {
3257 status = nfserr_inval;
3258 goto out_nolock;
3259 }
3260
3261
3262
3263
3264
3265 if (!new->cl_cred.cr_principal &&
3266 !new->cl_cred.cr_raw_principal) {
3267 status = nfserr_serverfault;
3268 goto out_nolock;
3269 }
3270 new->cl_mach_cred = true;
3271 break;
3272 case SP4_NONE:
3273 break;
3274 default:
3275 WARN_ON_ONCE(1);
3276 fallthrough;
3277 case SP4_SSV:
3278 status = nfserr_encr_alg_unsupp;
3279 goto out_nolock;
3280 }
3281
3282
3283 spin_lock(&nn->client_lock);
3284 conf = find_confirmed_client_by_name(&exid->clname, nn);
3285 if (conf) {
3286 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
3287 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
3288
3289 if (update) {
3290 if (!clp_used_exchangeid(conf)) {
3291 status = nfserr_inval;
3292 goto out;
3293 }
3294 if (!nfsd4_mach_creds_match(conf, rqstp)) {
3295 status = nfserr_wrong_cred;
3296 goto out;
3297 }
3298 if (!creds_match) {
3299 status = nfserr_perm;
3300 goto out;
3301 }
3302 if (!verfs_match) {
3303 status = nfserr_not_same;
3304 goto out;
3305 }
3306
3307 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
3308 trace_nfsd_clid_confirmed_r(conf);
3309 goto out_copy;
3310 }
3311 if (!creds_match) {
3312 if (client_has_state(conf)) {
3313 status = nfserr_clid_inuse;
3314 trace_nfsd_clid_cred_mismatch(conf, rqstp);
3315 goto out;
3316 }
3317 goto out_new;
3318 }
3319 if (verfs_match) {
3320 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
3321 trace_nfsd_clid_confirmed_r(conf);
3322 goto out_copy;
3323 }
3324
3325 trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf);
3326 conf = NULL;
3327 goto out_new;
3328 }
3329
3330 if (update) {
3331 status = nfserr_noent;
3332 goto out;
3333 }
3334
3335 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
3336 if (unconf)
3337 unhash_client_locked(unconf);
3338
3339
3340 trace_nfsd_clid_fresh(new);
3341
3342 out_new:
3343 if (conf) {
3344 status = mark_client_expired_locked(conf);
3345 if (status)
3346 goto out;
3347 trace_nfsd_clid_replaced(&conf->cl_clientid);
3348 }
3349 new->cl_minorversion = cstate->minorversion;
3350 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
3351 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
3352
3353 add_to_unconfirmed(new);
3354 swap(new, conf);
3355 out_copy:
3356 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
3357 exid->clientid.cl_id = conf->cl_clientid.cl_id;
3358
3359 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
3360 nfsd4_set_ex_flags(conf, exid);
3361
3362 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3363 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
3364 status = nfs_ok;
3365
3366 out:
3367 spin_unlock(&nn->client_lock);
3368 out_nolock:
3369 if (new)
3370 expire_client(new);
3371 if (unconf) {
3372 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
3373 expire_client(unconf);
3374 }
3375 return status;
3376 }
3377
3378 static __be32
3379 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
3380 {
3381 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
3382 slot_seqid);
3383
3384
3385 if (slot_inuse) {
3386 if (seqid == slot_seqid)
3387 return nfserr_jukebox;
3388 else
3389 return nfserr_seq_misordered;
3390 }
3391
3392 if (likely(seqid == slot_seqid + 1))
3393 return nfs_ok;
3394 if (seqid == slot_seqid)
3395 return nfserr_replay_cache;
3396 return nfserr_seq_misordered;
3397 }
3398
3399
3400
3401
3402
3403
3404 static void
3405 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
3406 struct nfsd4_clid_slot *slot, __be32 nfserr)
3407 {
3408 slot->sl_status = nfserr;
3409 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
3410 }
3411
3412 static __be32
3413 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
3414 struct nfsd4_clid_slot *slot)
3415 {
3416 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
3417 return slot->sl_status;
3418 }
3419
3420 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
3421 2 * 2 + \
3422 1 + \
3423 3 + \
3424 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3425 \
3426 4 ) * sizeof(__be32))
3427
3428 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3429 2 + \
3430 1 + \
3431 1 + \
3432 3 + \
3433 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3434 \
3435 5 ) * sizeof(__be32))
3436
3437 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
3438 {
3439 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
3440
3441 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
3442 return nfserr_toosmall;
3443 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
3444 return nfserr_toosmall;
3445 ca->headerpadsz = 0;
3446 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
3447 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
3448 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
3449 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
3450 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
3451 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462 ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
3463
3464 return nfs_ok;
3465 }
3466
3467
3468
3469
3470
3471 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
3472 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3473
3474 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3475 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3476
3477 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
3478 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3479 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
3480 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3481 sizeof(__be32))
3482
3483 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
3484 {
3485 ca->headerpadsz = 0;
3486
3487 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
3488 return nfserr_toosmall;
3489 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
3490 return nfserr_toosmall;
3491 ca->maxresp_cached = 0;
3492 if (ca->maxops < 2)
3493 return nfserr_toosmall;
3494
3495 return nfs_ok;
3496 }
3497
3498 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
3499 {
3500 switch (cbs->flavor) {
3501 case RPC_AUTH_NULL:
3502 case RPC_AUTH_UNIX:
3503 return nfs_ok;
3504 default:
3505
3506
3507
3508
3509
3510
3511
3512 return nfserr_encr_alg_unsupp;
3513 }
3514 }
3515
3516 __be32
3517 nfsd4_create_session(struct svc_rqst *rqstp,
3518 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3519 {
3520 struct nfsd4_create_session *cr_ses = &u->create_session;
3521 struct sockaddr *sa = svc_addr(rqstp);
3522 struct nfs4_client *conf, *unconf;
3523 struct nfs4_client *old = NULL;
3524 struct nfsd4_session *new;
3525 struct nfsd4_conn *conn;
3526 struct nfsd4_clid_slot *cs_slot = NULL;
3527 __be32 status = 0;
3528 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3529
3530 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
3531 return nfserr_inval;
3532 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
3533 if (status)
3534 return status;
3535 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
3536 if (status)
3537 return status;
3538 status = check_backchannel_attrs(&cr_ses->back_channel);
3539 if (status)
3540 goto out_release_drc_mem;
3541 status = nfserr_jukebox;
3542 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
3543 if (!new)
3544 goto out_release_drc_mem;
3545 conn = alloc_conn_from_crses(rqstp, cr_ses);
3546 if (!conn)
3547 goto out_free_session;
3548
3549 spin_lock(&nn->client_lock);
3550 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
3551 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
3552 WARN_ON_ONCE(conf && unconf);
3553
3554 if (conf) {
3555 status = nfserr_wrong_cred;
3556 if (!nfsd4_mach_creds_match(conf, rqstp))
3557 goto out_free_conn;
3558 cs_slot = &conf->cl_cs_slot;
3559 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3560 if (status) {
3561 if (status == nfserr_replay_cache)
3562 status = nfsd4_replay_create_session(cr_ses, cs_slot);
3563 goto out_free_conn;
3564 }
3565 } else if (unconf) {
3566 status = nfserr_clid_inuse;
3567 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
3568 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
3569 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
3570 goto out_free_conn;
3571 }
3572 status = nfserr_wrong_cred;
3573 if (!nfsd4_mach_creds_match(unconf, rqstp))
3574 goto out_free_conn;
3575 cs_slot = &unconf->cl_cs_slot;
3576 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3577 if (status) {
3578
3579 status = nfserr_seq_misordered;
3580 goto out_free_conn;
3581 }
3582 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3583 if (old) {
3584 status = mark_client_expired_locked(old);
3585 if (status) {
3586 old = NULL;
3587 goto out_free_conn;
3588 }
3589 trace_nfsd_clid_replaced(&old->cl_clientid);
3590 }
3591 move_to_confirmed(unconf);
3592 conf = unconf;
3593 } else {
3594 status = nfserr_stale_clientid;
3595 goto out_free_conn;
3596 }
3597 status = nfs_ok;
3598
3599 cr_ses->flags &= ~SESSION4_PERSIST;
3600
3601 cr_ses->flags &= ~SESSION4_RDMA;
3602
3603 init_session(rqstp, new, conf, cr_ses);
3604 nfsd4_get_session_locked(new);
3605
3606 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
3607 NFS4_MAX_SESSIONID_LEN);
3608 cs_slot->sl_seqid++;
3609 cr_ses->seqid = cs_slot->sl_seqid;
3610
3611
3612 nfsd4_cache_create_session(cr_ses, cs_slot, status);
3613 spin_unlock(&nn->client_lock);
3614 if (conf == unconf)
3615 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
3616
3617 nfsd4_init_conn(rqstp, conn, new);
3618 nfsd4_put_session(new);
3619 if (old)
3620 expire_client(old);
3621 return status;
3622 out_free_conn:
3623 spin_unlock(&nn->client_lock);
3624 free_conn(conn);
3625 if (old)
3626 expire_client(old);
3627 out_free_session:
3628 __free_session(new);
3629 out_release_drc_mem:
3630 nfsd4_put_drc_mem(&cr_ses->fore_channel);
3631 return status;
3632 }
3633
3634 static __be32 nfsd4_map_bcts_dir(u32 *dir)
3635 {
3636 switch (*dir) {
3637 case NFS4_CDFC4_FORE:
3638 case NFS4_CDFC4_BACK:
3639 return nfs_ok;
3640 case NFS4_CDFC4_FORE_OR_BOTH:
3641 case NFS4_CDFC4_BACK_OR_BOTH:
3642 *dir = NFS4_CDFC4_BOTH;
3643 return nfs_ok;
3644 }
3645 return nfserr_inval;
3646 }
3647
3648 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
3649 struct nfsd4_compound_state *cstate,
3650 union nfsd4_op_u *u)
3651 {
3652 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
3653 struct nfsd4_session *session = cstate->session;
3654 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3655 __be32 status;
3656
3657 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
3658 if (status)
3659 return status;
3660 spin_lock(&nn->client_lock);
3661 session->se_cb_prog = bc->bc_cb_program;
3662 session->se_cb_sec = bc->bc_cb_sec;
3663 spin_unlock(&nn->client_lock);
3664
3665 nfsd4_probe_callback(session->se_client);
3666
3667 return nfs_ok;
3668 }
3669
3670 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3671 {
3672 struct nfsd4_conn *c;
3673
3674 list_for_each_entry(c, &s->se_conns, cn_persession) {
3675 if (c->cn_xprt == xpt) {
3676 return c;
3677 }
3678 }
3679 return NULL;
3680 }
3681
3682 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
3683 struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
3684 {
3685 struct nfs4_client *clp = session->se_client;
3686 struct svc_xprt *xpt = rqst->rq_xprt;
3687 struct nfsd4_conn *c;
3688 __be32 status;
3689
3690
3691 spin_lock(&clp->cl_lock);
3692 c = __nfsd4_find_conn(xpt, session);
3693 if (!c)
3694 status = nfserr_noent;
3695 else if (req == c->cn_flags)
3696 status = nfs_ok;
3697 else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
3698 c->cn_flags != NFS4_CDFC4_BACK)
3699 status = nfs_ok;
3700 else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
3701 c->cn_flags != NFS4_CDFC4_FORE)
3702 status = nfs_ok;
3703 else
3704 status = nfserr_inval;
3705 spin_unlock(&clp->cl_lock);
3706 if (status == nfs_ok && conn)
3707 *conn = c;
3708 return status;
3709 }
3710
3711 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
3712 struct nfsd4_compound_state *cstate,
3713 union nfsd4_op_u *u)
3714 {
3715 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
3716 __be32 status;
3717 struct nfsd4_conn *conn;
3718 struct nfsd4_session *session;
3719 struct net *net = SVC_NET(rqstp);
3720 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3721
3722 if (!nfsd4_last_compound_op(rqstp))
3723 return nfserr_not_only_op;
3724 spin_lock(&nn->client_lock);
3725 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
3726 spin_unlock(&nn->client_lock);
3727 if (!session)
3728 goto out_no_session;
3729 status = nfserr_wrong_cred;
3730 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
3731 goto out;
3732 status = nfsd4_match_existing_connection(rqstp, session,
3733 bcts->dir, &conn);
3734 if (status == nfs_ok) {
3735 if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
3736 bcts->dir == NFS4_CDFC4_BACK)
3737 conn->cn_flags |= NFS4_CDFC4_BACK;
3738 nfsd4_probe_callback(session->se_client);
3739 goto out;
3740 }
3741 if (status == nfserr_inval)
3742 goto out;
3743 status = nfsd4_map_bcts_dir(&bcts->dir);
3744 if (status)
3745 goto out;
3746 conn = alloc_conn(rqstp, bcts->dir);
3747 status = nfserr_jukebox;
3748 if (!conn)
3749 goto out;
3750 nfsd4_init_conn(rqstp, conn, session);
3751 status = nfs_ok;
3752 out:
3753 nfsd4_put_session(session);
3754 out_no_session:
3755 return status;
3756 }
3757
3758 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
3759 {
3760 if (!cstate->session)
3761 return false;
3762 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
3763 }
3764
3765 __be32
3766 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
3767 union nfsd4_op_u *u)
3768 {
3769 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
3770 struct nfsd4_session *ses;
3771 __be32 status;
3772 int ref_held_by_me = 0;
3773 struct net *net = SVC_NET(r);
3774 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3775
3776 status = nfserr_not_only_op;
3777 if (nfsd4_compound_in_session(cstate, sessionid)) {
3778 if (!nfsd4_last_compound_op(r))
3779 goto out;
3780 ref_held_by_me++;
3781 }
3782 dump_sessionid(__func__, sessionid);
3783 spin_lock(&nn->client_lock);
3784 ses = find_in_sessionid_hashtbl(sessionid, net, &status);
3785 if (!ses)
3786 goto out_client_lock;
3787 status = nfserr_wrong_cred;
3788 if (!nfsd4_mach_creds_match(ses->se_client, r))
3789 goto out_put_session;
3790 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
3791 if (status)
3792 goto out_put_session;
3793 unhash_session(ses);
3794 spin_unlock(&nn->client_lock);
3795
3796 nfsd4_probe_callback_sync(ses->se_client);
3797
3798 spin_lock(&nn->client_lock);
3799 status = nfs_ok;
3800 out_put_session:
3801 nfsd4_put_session_locked(ses);
3802 out_client_lock:
3803 spin_unlock(&nn->client_lock);
3804 out:
3805 return status;
3806 }
3807
3808 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3809 {
3810 struct nfs4_client *clp = ses->se_client;
3811 struct nfsd4_conn *c;
3812 __be32 status = nfs_ok;
3813 int ret;
3814
3815 spin_lock(&clp->cl_lock);
3816 c = __nfsd4_find_conn(new->cn_xprt, ses);
3817 if (c)
3818 goto out_free;
3819 status = nfserr_conn_not_bound_to_session;
3820 if (clp->cl_mach_cred)
3821 goto out_free;
3822 __nfsd4_hash_conn(new, ses);
3823 spin_unlock(&clp->cl_lock);
3824 ret = nfsd4_register_conn(new);
3825 if (ret)
3826
3827 nfsd4_conn_lost(&new->cn_xpt_user);
3828 return nfs_ok;
3829 out_free:
3830 spin_unlock(&clp->cl_lock);
3831 free_conn(new);
3832 return status;
3833 }
3834
3835 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3836 {
3837 struct nfsd4_compoundargs *args = rqstp->rq_argp;
3838
3839 return args->opcnt > session->se_fchannel.maxops;
3840 }
3841
3842 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3843 struct nfsd4_session *session)
3844 {
3845 struct xdr_buf *xb = &rqstp->rq_arg;
3846
3847 return xb->len > session->se_fchannel.maxreq_sz;
3848 }
3849
3850 static bool replay_matches_cache(struct svc_rqst *rqstp,
3851 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3852 {
3853 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3854
3855 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3856 (bool)seq->cachethis)
3857 return false;
3858
3859
3860
3861
3862 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
3863 return false;
3864
3865
3866
3867
3868
3869 if (slot->sl_opcnt > argp->opcnt)
3870 return false;
3871
3872 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3873 return false;
3874
3875
3876
3877
3878
3879
3880 return true;
3881 }
3882
3883 __be32
3884 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3885 union nfsd4_op_u *u)
3886 {
3887 struct nfsd4_sequence *seq = &u->sequence;
3888 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3889 struct xdr_stream *xdr = resp->xdr;
3890 struct nfsd4_session *session;
3891 struct nfs4_client *clp;
3892 struct nfsd4_slot *slot;
3893 struct nfsd4_conn *conn;
3894 __be32 status;
3895 int buflen;
3896 struct net *net = SVC_NET(rqstp);
3897 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3898
3899 if (resp->opcnt != 1)
3900 return nfserr_sequence_pos;
3901
3902
3903
3904
3905
3906 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3907 if (!conn)
3908 return nfserr_jukebox;
3909
3910 spin_lock(&nn->client_lock);
3911 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3912 if (!session)
3913 goto out_no_session;
3914 clp = session->se_client;
3915
3916 status = nfserr_too_many_ops;
3917 if (nfsd4_session_too_many_ops(rqstp, session))
3918 goto out_put_session;
3919
3920 status = nfserr_req_too_big;
3921 if (nfsd4_request_too_big(rqstp, session))
3922 goto out_put_session;
3923
3924 status = nfserr_badslot;
3925 if (seq->slotid >= session->se_fchannel.maxreqs)
3926 goto out_put_session;
3927
3928 slot = session->se_slots[seq->slotid];
3929 dprintk("%s: slotid %d\n", __func__, seq->slotid);
3930
3931
3932
3933
3934 seq->maxslots = session->se_fchannel.maxreqs;
3935
3936 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3937 slot->sl_flags & NFSD4_SLOT_INUSE);
3938 if (status == nfserr_replay_cache) {
3939 status = nfserr_seq_misordered;
3940 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
3941 goto out_put_session;
3942 status = nfserr_seq_false_retry;
3943 if (!replay_matches_cache(rqstp, seq, slot))
3944 goto out_put_session;
3945 cstate->slot = slot;
3946 cstate->session = session;
3947 cstate->clp = clp;
3948
3949
3950 status = nfsd4_replay_cache_entry(resp, seq);
3951 cstate->status = nfserr_replay_cache;
3952 goto out;
3953 }
3954 if (status)
3955 goto out_put_session;
3956
3957 status = nfsd4_sequence_check_conn(conn, session);
3958 conn = NULL;
3959 if (status)
3960 goto out_put_session;
3961
3962 buflen = (seq->cachethis) ?
3963 session->se_fchannel.maxresp_cached :
3964 session->se_fchannel.maxresp_sz;
3965 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3966 nfserr_rep_too_big;
3967 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3968 goto out_put_session;
3969 svc_reserve(rqstp, buflen);
3970
3971 status = nfs_ok;
3972
3973 slot->sl_seqid = seq->seqid;
3974 slot->sl_flags |= NFSD4_SLOT_INUSE;
3975 if (seq->cachethis)
3976 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
3977 else
3978 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
3979
3980 cstate->slot = slot;
3981 cstate->session = session;
3982 cstate->clp = clp;
3983
3984 out:
3985 switch (clp->cl_cb_state) {
3986 case NFSD4_CB_DOWN:
3987 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3988 break;
3989 case NFSD4_CB_FAULT:
3990 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3991 break;
3992 default:
3993 seq->status_flags = 0;
3994 }
3995 if (!list_empty(&clp->cl_revoked))
3996 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
3997 out_no_session:
3998 if (conn)
3999 free_conn(conn);
4000 spin_unlock(&nn->client_lock);
4001 return status;
4002 out_put_session:
4003 nfsd4_put_session_locked(session);
4004 goto out_no_session;
4005 }
4006
4007 void
4008 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
4009 {
4010 struct nfsd4_compound_state *cs = &resp->cstate;
4011
4012 if (nfsd4_has_session(cs)) {
4013 if (cs->status != nfserr_replay_cache) {
4014 nfsd4_store_cache_entry(resp);
4015 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
4016 }
4017
4018 nfsd4_put_session(cs->session);
4019 } else if (cs->clp)
4020 put_client_renew(cs->clp);
4021 }
4022
4023 __be32
4024 nfsd4_destroy_clientid(struct svc_rqst *rqstp,
4025 struct nfsd4_compound_state *cstate,
4026 union nfsd4_op_u *u)
4027 {
4028 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
4029 struct nfs4_client *conf, *unconf;
4030 struct nfs4_client *clp = NULL;
4031 __be32 status = 0;
4032 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4033
4034 spin_lock(&nn->client_lock);
4035 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
4036 conf = find_confirmed_client(&dc->clientid, true, nn);
4037 WARN_ON_ONCE(conf && unconf);
4038
4039 if (conf) {
4040 if (client_has_state(conf)) {
4041 status = nfserr_clientid_busy;
4042 goto out;
4043 }
4044 status = mark_client_expired_locked(conf);
4045 if (status)
4046 goto out;
4047 clp = conf;
4048 } else if (unconf)
4049 clp = unconf;
4050 else {
4051 status = nfserr_stale_clientid;
4052 goto out;
4053 }
4054 if (!nfsd4_mach_creds_match(clp, rqstp)) {
4055 clp = NULL;
4056 status = nfserr_wrong_cred;
4057 goto out;
4058 }
4059 trace_nfsd_clid_destroyed(&clp->cl_clientid);
4060 unhash_client_locked(clp);
4061 out:
4062 spin_unlock(&nn->client_lock);
4063 if (clp)
4064 expire_client(clp);
4065 return status;
4066 }
4067
4068 __be32
4069 nfsd4_reclaim_complete(struct svc_rqst *rqstp,
4070 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
4071 {
4072 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
4073 struct nfs4_client *clp = cstate->clp;
4074 __be32 status = 0;
4075
4076 if (rc->rca_one_fs) {
4077 if (!cstate->current_fh.fh_dentry)
4078 return nfserr_nofilehandle;
4079
4080
4081
4082
4083 return nfs_ok;
4084 }
4085
4086 status = nfserr_complete_already;
4087 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
4088 goto out;
4089
4090 status = nfserr_stale_clientid;
4091 if (is_client_expired(clp))
4092
4093
4094
4095
4096
4097
4098
4099 goto out;
4100
4101 status = nfs_ok;
4102 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid);
4103 nfsd4_client_record_create(clp);
4104 inc_reclaim_complete(clp);
4105 out:
4106 return status;
4107 }
4108
4109 __be32
4110 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4111 union nfsd4_op_u *u)
4112 {
4113 struct nfsd4_setclientid *setclid = &u->setclientid;
4114 struct xdr_netobj clname = setclid->se_name;
4115 nfs4_verifier clverifier = setclid->se_verf;
4116 struct nfs4_client *conf, *new;
4117 struct nfs4_client *unconf = NULL;
4118 __be32 status;
4119 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4120
4121 new = create_client(clname, rqstp, &clverifier);
4122 if (new == NULL)
4123 return nfserr_jukebox;
4124 spin_lock(&nn->client_lock);
4125 conf = find_confirmed_client_by_name(&clname, nn);
4126 if (conf && client_has_state(conf)) {
4127 status = nfserr_clid_inuse;
4128 if (clp_used_exchangeid(conf))
4129 goto out;
4130 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4131 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4132 goto out;
4133 }
4134 }
4135 unconf = find_unconfirmed_client_by_name(&clname, nn);
4136 if (unconf)
4137 unhash_client_locked(unconf);
4138 if (conf) {
4139 if (same_verf(&conf->cl_verifier, &clverifier)) {
4140 copy_clid(new, conf);
4141 gen_confirm(new, nn);
4142 } else
4143 trace_nfsd_clid_verf_mismatch(conf, rqstp,
4144 &clverifier);
4145 } else
4146 trace_nfsd_clid_fresh(new);
4147 new->cl_minorversion = 0;
4148 gen_callback(new, setclid, rqstp);
4149 add_to_unconfirmed(new);
4150 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
4151 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
4152 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
4153 new = NULL;
4154 status = nfs_ok;
4155 out:
4156 spin_unlock(&nn->client_lock);
4157 if (new)
4158 free_client(new);
4159 if (unconf) {
4160 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
4161 expire_client(unconf);
4162 }
4163 return status;
4164 }
4165
4166 __be32
4167 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
4168 struct nfsd4_compound_state *cstate,
4169 union nfsd4_op_u *u)
4170 {
4171 struct nfsd4_setclientid_confirm *setclientid_confirm =
4172 &u->setclientid_confirm;
4173 struct nfs4_client *conf, *unconf;
4174 struct nfs4_client *old = NULL;
4175 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
4176 clientid_t * clid = &setclientid_confirm->sc_clientid;
4177 __be32 status;
4178 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4179
4180 if (STALE_CLIENTID(clid, nn))
4181 return nfserr_stale_clientid;
4182
4183 spin_lock(&nn->client_lock);
4184 conf = find_confirmed_client(clid, false, nn);
4185 unconf = find_unconfirmed_client(clid, false, nn);
4186
4187
4188
4189
4190
4191
4192
4193 status = nfserr_clid_inuse;
4194 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
4195 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
4196 goto out;
4197 }
4198 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4199 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4200 goto out;
4201 }
4202 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
4203 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
4204 status = nfs_ok;
4205 } else
4206 status = nfserr_stale_clientid;
4207 goto out;
4208 }
4209 status = nfs_ok;
4210 if (conf) {
4211 old = unconf;
4212 unhash_client_locked(old);
4213 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
4214 } else {
4215 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
4216 if (old) {
4217 status = nfserr_clid_inuse;
4218 if (client_has_state(old)
4219 && !same_creds(&unconf->cl_cred,
4220 &old->cl_cred)) {
4221 old = NULL;
4222 goto out;
4223 }
4224 status = mark_client_expired_locked(old);
4225 if (status) {
4226 old = NULL;
4227 goto out;
4228 }
4229 trace_nfsd_clid_replaced(&old->cl_clientid);
4230 }
4231 move_to_confirmed(unconf);
4232 conf = unconf;
4233 }
4234 get_client_locked(conf);
4235 spin_unlock(&nn->client_lock);
4236 if (conf == unconf)
4237 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
4238 nfsd4_probe_callback(conf);
4239 spin_lock(&nn->client_lock);
4240 put_client_renew_locked(conf);
4241 out:
4242 spin_unlock(&nn->client_lock);
4243 if (old)
4244 expire_client(old);
4245 return status;
4246 }
4247
4248 static struct nfs4_file *nfsd4_alloc_file(void)
4249 {
4250 return kmem_cache_alloc(file_slab, GFP_KERNEL);
4251 }
4252
4253
4254 static void nfsd4_init_file(struct svc_fh *fh, unsigned int hashval,
4255 struct nfs4_file *fp)
4256 {
4257 lockdep_assert_held(&state_lock);
4258
4259 refcount_set(&fp->fi_ref, 1);
4260 spin_lock_init(&fp->fi_lock);
4261 INIT_LIST_HEAD(&fp->fi_stateids);
4262 INIT_LIST_HEAD(&fp->fi_delegations);
4263 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
4264 fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle);
4265 fp->fi_deleg_file = NULL;
4266 fp->fi_had_conflict = false;
4267 fp->fi_share_deny = 0;
4268 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
4269 memset(fp->fi_access, 0, sizeof(fp->fi_access));
4270 fp->fi_aliased = false;
4271 fp->fi_inode = d_inode(fh->fh_dentry);
4272 #ifdef CONFIG_NFSD_PNFS
4273 INIT_LIST_HEAD(&fp->fi_lo_states);
4274 atomic_set(&fp->fi_lo_recalls, 0);
4275 #endif
4276 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
4277 }
4278
4279 void
4280 nfsd4_free_slabs(void)
4281 {
4282 kmem_cache_destroy(client_slab);
4283 kmem_cache_destroy(openowner_slab);
4284 kmem_cache_destroy(lockowner_slab);
4285 kmem_cache_destroy(file_slab);
4286 kmem_cache_destroy(stateid_slab);
4287 kmem_cache_destroy(deleg_slab);
4288 kmem_cache_destroy(odstate_slab);
4289 }
4290
4291 int
4292 nfsd4_init_slabs(void)
4293 {
4294 client_slab = kmem_cache_create("nfsd4_clients",
4295 sizeof(struct nfs4_client), 0, 0, NULL);
4296 if (client_slab == NULL)
4297 goto out;
4298 openowner_slab = kmem_cache_create("nfsd4_openowners",
4299 sizeof(struct nfs4_openowner), 0, 0, NULL);
4300 if (openowner_slab == NULL)
4301 goto out_free_client_slab;
4302 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
4303 sizeof(struct nfs4_lockowner), 0, 0, NULL);
4304 if (lockowner_slab == NULL)
4305 goto out_free_openowner_slab;
4306 file_slab = kmem_cache_create("nfsd4_files",
4307 sizeof(struct nfs4_file), 0, 0, NULL);
4308 if (file_slab == NULL)
4309 goto out_free_lockowner_slab;
4310 stateid_slab = kmem_cache_create("nfsd4_stateids",
4311 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
4312 if (stateid_slab == NULL)
4313 goto out_free_file_slab;
4314 deleg_slab = kmem_cache_create("nfsd4_delegations",
4315 sizeof(struct nfs4_delegation), 0, 0, NULL);
4316 if (deleg_slab == NULL)
4317 goto out_free_stateid_slab;
4318 odstate_slab = kmem_cache_create("nfsd4_odstate",
4319 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
4320 if (odstate_slab == NULL)
4321 goto out_free_deleg_slab;
4322 return 0;
4323
4324 out_free_deleg_slab:
4325 kmem_cache_destroy(deleg_slab);
4326 out_free_stateid_slab:
4327 kmem_cache_destroy(stateid_slab);
4328 out_free_file_slab:
4329 kmem_cache_destroy(file_slab);
4330 out_free_lockowner_slab:
4331 kmem_cache_destroy(lockowner_slab);
4332 out_free_openowner_slab:
4333 kmem_cache_destroy(openowner_slab);
4334 out_free_client_slab:
4335 kmem_cache_destroy(client_slab);
4336 out:
4337 return -ENOMEM;
4338 }
4339
4340 void nfsd4_init_leases_net(struct nfsd_net *nn)
4341 {
4342 struct sysinfo si;
4343 u64 max_clients;
4344
4345 nn->nfsd4_lease = 90;
4346 nn->nfsd4_grace = 90;
4347 nn->somebody_reclaimed = false;
4348 nn->track_reclaim_completes = false;
4349 nn->clverifier_counter = prandom_u32();
4350 nn->clientid_base = prandom_u32();
4351 nn->clientid_counter = nn->clientid_base + 1;
4352 nn->s2s_cp_cl_id = nn->clientid_counter++;
4353
4354 atomic_set(&nn->nfs4_client_count, 0);
4355 si_meminfo(&si);
4356 max_clients = (u64)si.totalram * si.mem_unit / (1024 * 1024 * 1024);
4357 max_clients *= NFS4_CLIENTS_PER_GB;
4358 nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB);
4359 }
4360
4361 static void init_nfs4_replay(struct nfs4_replay *rp)
4362 {
4363 rp->rp_status = nfserr_serverfault;
4364 rp->rp_buflen = 0;
4365 rp->rp_buf = rp->rp_ibuf;
4366 mutex_init(&rp->rp_mutex);
4367 }
4368
4369 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
4370 struct nfs4_stateowner *so)
4371 {
4372 if (!nfsd4_has_session(cstate)) {
4373 mutex_lock(&so->so_replay.rp_mutex);
4374 cstate->replay_owner = nfs4_get_stateowner(so);
4375 }
4376 }
4377
4378 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
4379 {
4380 struct nfs4_stateowner *so = cstate->replay_owner;
4381
4382 if (so != NULL) {
4383 cstate->replay_owner = NULL;
4384 mutex_unlock(&so->so_replay.rp_mutex);
4385 nfs4_put_stateowner(so);
4386 }
4387 }
4388
4389 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4390 {
4391 struct nfs4_stateowner *sop;
4392
4393 sop = kmem_cache_alloc(slab, GFP_KERNEL);
4394 if (!sop)
4395 return NULL;
4396
4397 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
4398 if (!sop->so_owner.data) {
4399 kmem_cache_free(slab, sop);
4400 return NULL;
4401 }
4402
4403 INIT_LIST_HEAD(&sop->so_stateids);
4404 sop->so_client = clp;
4405 init_nfs4_replay(&sop->so_replay);
4406 atomic_set(&sop->so_count, 1);
4407 return sop;
4408 }
4409
4410 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4411 {
4412 lockdep_assert_held(&clp->cl_lock);
4413
4414 list_add(&oo->oo_owner.so_strhash,
4415 &clp->cl_ownerstr_hashtbl[strhashval]);
4416 list_add(&oo->oo_perclient, &clp->cl_openowners);
4417 }
4418
4419 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
4420 {
4421 unhash_openowner_locked(openowner(so));
4422 }
4423
4424 static void nfs4_free_openowner(struct nfs4_stateowner *so)
4425 {
4426 struct nfs4_openowner *oo = openowner(so);
4427
4428 kmem_cache_free(openowner_slab, oo);
4429 }
4430
4431 static const struct nfs4_stateowner_operations openowner_ops = {
4432 .so_unhash = nfs4_unhash_openowner,
4433 .so_free = nfs4_free_openowner,
4434 };
4435
4436 static struct nfs4_ol_stateid *
4437 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4438 {
4439 struct nfs4_ol_stateid *local, *ret = NULL;
4440 struct nfs4_openowner *oo = open->op_openowner;
4441
4442 lockdep_assert_held(&fp->fi_lock);
4443
4444 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
4445
4446 if (local->st_stateowner->so_is_open_owner == 0)
4447 continue;
4448 if (local->st_stateowner != &oo->oo_owner)
4449 continue;
4450 if (local->st_stid.sc_type == NFS4_OPEN_STID) {
4451 ret = local;
4452 refcount_inc(&ret->st_stid.sc_count);
4453 break;
4454 }
4455 }
4456 return ret;
4457 }
4458
4459 static __be32
4460 nfsd4_verify_open_stid(struct nfs4_stid *s)
4461 {
4462 __be32 ret = nfs_ok;
4463
4464 switch (s->sc_type) {
4465 default:
4466 break;
4467 case 0:
4468 case NFS4_CLOSED_STID:
4469 case NFS4_CLOSED_DELEG_STID:
4470 ret = nfserr_bad_stateid;
4471 break;
4472 case NFS4_REVOKED_DELEG_STID:
4473 ret = nfserr_deleg_revoked;
4474 }
4475 return ret;
4476 }
4477
4478
4479 static __be32
4480 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
4481 {
4482 __be32 ret;
4483
4484 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
4485 ret = nfsd4_verify_open_stid(&stp->st_stid);
4486 if (ret != nfs_ok)
4487 mutex_unlock(&stp->st_mutex);
4488 return ret;
4489 }
4490
4491 static struct nfs4_ol_stateid *
4492 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4493 {
4494 struct nfs4_ol_stateid *stp;
4495 for (;;) {
4496 spin_lock(&fp->fi_lock);
4497 stp = nfsd4_find_existing_open(fp, open);
4498 spin_unlock(&fp->fi_lock);
4499 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
4500 break;
4501 nfs4_put_stid(&stp->st_stid);
4502 }
4503 return stp;
4504 }
4505
4506 static struct nfs4_openowner *
4507 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
4508 struct nfsd4_compound_state *cstate)
4509 {
4510 struct nfs4_client *clp = cstate->clp;
4511 struct nfs4_openowner *oo, *ret;
4512
4513 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4514 if (!oo)
4515 return NULL;
4516 oo->oo_owner.so_ops = &openowner_ops;
4517 oo->oo_owner.so_is_open_owner = 1;
4518 oo->oo_owner.so_seqid = open->op_seqid;
4519 oo->oo_flags = 0;
4520 if (nfsd4_has_session(cstate))
4521 oo->oo_flags |= NFS4_OO_CONFIRMED;
4522 oo->oo_time = 0;
4523 oo->oo_last_closed_stid = NULL;
4524 INIT_LIST_HEAD(&oo->oo_close_lru);
4525 spin_lock(&clp->cl_lock);
4526 ret = find_openstateowner_str_locked(strhashval, open, clp);
4527 if (ret == NULL) {
4528 hash_openowner(oo, clp, strhashval);
4529 ret = oo;
4530 } else
4531 nfs4_free_stateowner(&oo->oo_owner);
4532
4533 spin_unlock(&clp->cl_lock);
4534 return ret;
4535 }
4536
4537 static struct nfs4_ol_stateid *
4538 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
4539 {
4540
4541 struct nfs4_openowner *oo = open->op_openowner;
4542 struct nfs4_ol_stateid *retstp = NULL;
4543 struct nfs4_ol_stateid *stp;
4544
4545 stp = open->op_stp;
4546
4547 mutex_init(&stp->st_mutex);
4548 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
4549
4550 retry:
4551 spin_lock(&oo->oo_owner.so_client->cl_lock);
4552 spin_lock(&fp->fi_lock);
4553
4554 retstp = nfsd4_find_existing_open(fp, open);
4555 if (retstp)
4556 goto out_unlock;
4557
4558 open->op_stp = NULL;
4559 refcount_inc(&stp->st_stid.sc_count);
4560 stp->st_stid.sc_type = NFS4_OPEN_STID;
4561 INIT_LIST_HEAD(&stp->st_locks);
4562 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
4563 get_nfs4_file(fp);
4564 stp->st_stid.sc_file = fp;
4565 stp->st_access_bmap = 0;
4566 stp->st_deny_bmap = 0;
4567 stp->st_openstp = NULL;
4568 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
4569 list_add(&stp->st_perfile, &fp->fi_stateids);
4570
4571 out_unlock:
4572 spin_unlock(&fp->fi_lock);
4573 spin_unlock(&oo->oo_owner.so_client->cl_lock);
4574 if (retstp) {
4575
4576 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
4577 nfs4_put_stid(&retstp->st_stid);
4578 goto retry;
4579 }
4580
4581 mutex_unlock(&stp->st_mutex);
4582 stp = retstp;
4583 }
4584 return stp;
4585 }
4586
4587
4588
4589
4590
4591
4592 static void
4593 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
4594 {
4595 struct nfs4_ol_stateid *last;
4596 struct nfs4_openowner *oo = openowner(s->st_stateowner);
4597 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
4598 nfsd_net_id);
4599
4600 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
4612
4613 release_all_access(s);
4614 if (s->st_stid.sc_file) {
4615 put_nfs4_file(s->st_stid.sc_file);
4616 s->st_stid.sc_file = NULL;
4617 }
4618
4619 spin_lock(&nn->client_lock);
4620 last = oo->oo_last_closed_stid;
4621 oo->oo_last_closed_stid = s;
4622 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
4623 oo->oo_time = ktime_get_boottime_seconds();
4624 spin_unlock(&nn->client_lock);
4625 if (last)
4626 nfs4_put_stid(&last->st_stid);
4627 }
4628
4629
4630 static struct nfs4_file *
4631 find_file_locked(struct svc_fh *fh, unsigned int hashval)
4632 {
4633 struct nfs4_file *fp;
4634
4635 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
4636 lockdep_is_held(&state_lock)) {
4637 if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
4638 if (refcount_inc_not_zero(&fp->fi_ref))
4639 return fp;
4640 }
4641 }
4642 return NULL;
4643 }
4644
4645 static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh,
4646 unsigned int hashval)
4647 {
4648 struct nfs4_file *fp;
4649 struct nfs4_file *ret = NULL;
4650 bool alias_found = false;
4651
4652 spin_lock(&state_lock);
4653 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
4654 lockdep_is_held(&state_lock)) {
4655 if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
4656 if (refcount_inc_not_zero(&fp->fi_ref))
4657 ret = fp;
4658 } else if (d_inode(fh->fh_dentry) == fp->fi_inode)
4659 fp->fi_aliased = alias_found = true;
4660 }
4661 if (likely(ret == NULL)) {
4662 nfsd4_init_file(fh, hashval, new);
4663 new->fi_aliased = alias_found;
4664 ret = new;
4665 }
4666 spin_unlock(&state_lock);
4667 return ret;
4668 }
4669
4670 static struct nfs4_file * find_file(struct svc_fh *fh)
4671 {
4672 struct nfs4_file *fp;
4673 unsigned int hashval = file_hashval(fh);
4674
4675 rcu_read_lock();
4676 fp = find_file_locked(fh, hashval);
4677 rcu_read_unlock();
4678 return fp;
4679 }
4680
4681 static struct nfs4_file *
4682 find_or_add_file(struct nfs4_file *new, struct svc_fh *fh)
4683 {
4684 struct nfs4_file *fp;
4685 unsigned int hashval = file_hashval(fh);
4686
4687 rcu_read_lock();
4688 fp = find_file_locked(fh, hashval);
4689 rcu_read_unlock();
4690 if (fp)
4691 return fp;
4692
4693 return insert_file(new, fh, hashval);
4694 }
4695
4696
4697
4698
4699
4700 static __be32
4701 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
4702 {
4703 struct nfs4_file *fp;
4704 __be32 ret = nfs_ok;
4705
4706 fp = find_file(current_fh);
4707 if (!fp)
4708 return ret;
4709
4710 spin_lock(&fp->fi_lock);
4711 if (fp->fi_share_deny & deny_type)
4712 ret = nfserr_locked;
4713 spin_unlock(&fp->fi_lock);
4714 put_nfs4_file(fp);
4715 return ret;
4716 }
4717
4718 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
4719 {
4720 struct nfs4_delegation *dp = cb_to_delegation(cb);
4721 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
4722 nfsd_net_id);
4723
4724 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
4725
4726
4727
4728
4729
4730
4731
4732
4733 spin_lock(&state_lock);
4734 if (delegation_hashed(dp) && dp->dl_time == 0) {
4735 dp->dl_time = ktime_get_boottime_seconds();
4736 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
4737 }
4738 spin_unlock(&state_lock);
4739 }
4740
4741 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
4742 struct rpc_task *task)
4743 {
4744 struct nfs4_delegation *dp = cb_to_delegation(cb);
4745
4746 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID ||
4747 dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4748 return 1;
4749
4750 switch (task->tk_status) {
4751 case 0:
4752 return 1;
4753 case -NFS4ERR_DELAY:
4754 rpc_delay(task, 2 * HZ);
4755 return 0;
4756 case -EBADHANDLE:
4757 case -NFS4ERR_BAD_STATEID:
4758
4759
4760
4761
4762 if (dp->dl_retries--) {
4763 rpc_delay(task, 2 * HZ);
4764 return 0;
4765 }
4766 fallthrough;
4767 default:
4768 return 1;
4769 }
4770 }
4771
4772 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
4773 {
4774 struct nfs4_delegation *dp = cb_to_delegation(cb);
4775
4776 nfs4_put_stid(&dp->dl_stid);
4777 }
4778
4779 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
4780 .prepare = nfsd4_cb_recall_prepare,
4781 .done = nfsd4_cb_recall_done,
4782 .release = nfsd4_cb_recall_release,
4783 };
4784
4785 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
4786 {
4787
4788
4789
4790
4791
4792
4793
4794 refcount_inc(&dp->dl_stid.sc_count);
4795 nfsd4_run_cb(&dp->dl_recall);
4796 }
4797
4798
4799 static bool
4800 nfsd_break_deleg_cb(struct file_lock *fl)
4801 {
4802 bool ret = false;
4803 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
4804 struct nfs4_file *fp = dp->dl_stid.sc_file;
4805 struct nfs4_client *clp = dp->dl_stid.sc_client;
4806 struct nfsd_net *nn;
4807
4808 trace_nfsd_cb_recall(&dp->dl_stid);
4809
4810 dp->dl_recalled = true;
4811 atomic_inc(&clp->cl_delegs_in_recall);
4812 if (try_to_expire_client(clp)) {
4813 nn = net_generic(clp->net, nfsd_net_id);
4814 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
4815 }
4816
4817
4818
4819
4820
4821
4822 fl->fl_break_time = 0;
4823
4824 spin_lock(&fp->fi_lock);
4825 fp->fi_had_conflict = true;
4826 nfsd_break_one_deleg(dp);
4827 spin_unlock(&fp->fi_lock);
4828 return ret;
4829 }
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839 static bool nfsd_breaker_owns_lease(struct file_lock *fl)
4840 {
4841 struct nfs4_delegation *dl = fl->fl_owner;
4842 struct svc_rqst *rqst;
4843 struct nfs4_client *clp;
4844
4845 if (!i_am_nfsd())
4846 return false;
4847 rqst = kthread_data(current);
4848
4849 if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
4850 return false;
4851 clp = *(rqst->rq_lease_breaker);
4852 return dl->dl_stid.sc_client == clp;
4853 }
4854
4855 static int
4856 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
4857 struct list_head *dispose)
4858 {
4859 struct nfs4_delegation *dp = (struct nfs4_delegation *)onlist->fl_owner;
4860 struct nfs4_client *clp = dp->dl_stid.sc_client;
4861
4862 if (arg & F_UNLCK) {
4863 if (dp->dl_recalled)
4864 atomic_dec(&clp->cl_delegs_in_recall);
4865 return lease_modify(onlist, arg, dispose);
4866 } else
4867 return -EAGAIN;
4868 }
4869
4870 static const struct lock_manager_operations nfsd_lease_mng_ops = {
4871 .lm_breaker_owns_lease = nfsd_breaker_owns_lease,
4872 .lm_break = nfsd_break_deleg_cb,
4873 .lm_change = nfsd_change_deleg_cb,
4874 };
4875
4876 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
4877 {
4878 if (nfsd4_has_session(cstate))
4879 return nfs_ok;
4880 if (seqid == so->so_seqid - 1)
4881 return nfserr_replay_me;
4882 if (seqid == so->so_seqid)
4883 return nfs_ok;
4884 return nfserr_bad_seqid;
4885 }
4886
4887 static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
4888 struct nfsd_net *nn)
4889 {
4890 struct nfs4_client *found;
4891
4892 spin_lock(&nn->client_lock);
4893 found = find_confirmed_client(clid, sessions, nn);
4894 if (found)
4895 atomic_inc(&found->cl_rpc_users);
4896 spin_unlock(&nn->client_lock);
4897 return found;
4898 }
4899
4900 static __be32 set_client(clientid_t *clid,
4901 struct nfsd4_compound_state *cstate,
4902 struct nfsd_net *nn)
4903 {
4904 if (cstate->clp) {
4905 if (!same_clid(&cstate->clp->cl_clientid, clid))
4906 return nfserr_stale_clientid;
4907 return nfs_ok;
4908 }
4909 if (STALE_CLIENTID(clid, nn))
4910 return nfserr_stale_clientid;
4911
4912
4913
4914
4915 cstate->clp = lookup_clientid(clid, false, nn);
4916 if (!cstate->clp)
4917 return nfserr_expired;
4918 return nfs_ok;
4919 }
4920
4921 __be32
4922 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
4923 struct nfsd4_open *open, struct nfsd_net *nn)
4924 {
4925 clientid_t *clientid = &open->op_clientid;
4926 struct nfs4_client *clp = NULL;
4927 unsigned int strhashval;
4928 struct nfs4_openowner *oo = NULL;
4929 __be32 status;
4930
4931
4932
4933
4934
4935 open->op_file = nfsd4_alloc_file();
4936 if (open->op_file == NULL)
4937 return nfserr_jukebox;
4938
4939 status = set_client(clientid, cstate, nn);
4940 if (status)
4941 return status;
4942 clp = cstate->clp;
4943
4944 strhashval = ownerstr_hashval(&open->op_owner);
4945 oo = find_openstateowner_str(strhashval, open, clp);
4946 open->op_openowner = oo;
4947 if (!oo) {
4948 goto new_owner;
4949 }
4950 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4951
4952 release_openowner(oo);
4953 open->op_openowner = NULL;
4954 goto new_owner;
4955 }
4956 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
4957 if (status)
4958 return status;
4959 goto alloc_stateid;
4960 new_owner:
4961 oo = alloc_init_open_stateowner(strhashval, open, cstate);
4962 if (oo == NULL)
4963 return nfserr_jukebox;
4964 open->op_openowner = oo;
4965 alloc_stateid:
4966 open->op_stp = nfs4_alloc_open_stateid(clp);
4967 if (!open->op_stp)
4968 return nfserr_jukebox;
4969
4970 if (nfsd4_has_session(cstate) &&
4971 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
4972 open->op_odstate = alloc_clnt_odstate(clp);
4973 if (!open->op_odstate)
4974 return nfserr_jukebox;
4975 }
4976
4977 return nfs_ok;
4978 }
4979
4980 static inline __be32
4981 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
4982 {
4983 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
4984 return nfserr_openmode;
4985 else
4986 return nfs_ok;
4987 }
4988
4989 static int share_access_to_flags(u32 share_access)
4990 {
4991 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
4992 }
4993
4994 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
4995 {
4996 struct nfs4_stid *ret;
4997
4998 ret = find_stateid_by_type(cl, s,
4999 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
5000 if (!ret)
5001 return NULL;
5002 return delegstateid(ret);
5003 }
5004
5005 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
5006 {
5007 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
5008 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
5009 }
5010
5011 static __be32
5012 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
5013 struct nfs4_delegation **dp)
5014 {
5015 int flags;
5016 __be32 status = nfserr_bad_stateid;
5017 struct nfs4_delegation *deleg;
5018
5019 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
5020 if (deleg == NULL)
5021 goto out;
5022 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
5023 nfs4_put_stid(&deleg->dl_stid);
5024 if (cl->cl_minorversion)
5025 status = nfserr_deleg_revoked;
5026 goto out;
5027 }
5028 flags = share_access_to_flags(open->op_share_access);
5029 status = nfs4_check_delegmode(deleg, flags);
5030 if (status) {
5031 nfs4_put_stid(&deleg->dl_stid);
5032 goto out;
5033 }
5034 *dp = deleg;
5035 out:
5036 if (!nfsd4_is_deleg_cur(open))
5037 return nfs_ok;
5038 if (status)
5039 return status;
5040 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5041 return nfs_ok;
5042 }
5043
5044 static inline int nfs4_access_to_access(u32 nfs4_access)
5045 {
5046 int flags = 0;
5047
5048 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
5049 flags |= NFSD_MAY_READ;
5050 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
5051 flags |= NFSD_MAY_WRITE;
5052 return flags;
5053 }
5054
5055 static inline __be32
5056 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
5057 struct nfsd4_open *open)
5058 {
5059 struct iattr iattr = {
5060 .ia_valid = ATTR_SIZE,
5061 .ia_size = 0,
5062 };
5063 struct nfsd_attrs attrs = {
5064 .na_iattr = &iattr,
5065 };
5066 if (!open->op_truncate)
5067 return 0;
5068 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
5069 return nfserr_inval;
5070 return nfsd_setattr(rqstp, fh, &attrs, 0, (time64_t)0);
5071 }
5072
5073 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
5074 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5075 struct nfsd4_open *open, bool new_stp)
5076 {
5077 struct nfsd_file *nf = NULL;
5078 __be32 status;
5079 int oflag = nfs4_access_to_omode(open->op_share_access);
5080 int access = nfs4_access_to_access(open->op_share_access);
5081 unsigned char old_access_bmap, old_deny_bmap;
5082
5083 spin_lock(&fp->fi_lock);
5084
5085
5086
5087
5088
5089 status = nfs4_file_check_deny(fp, open->op_share_deny);
5090 if (status != nfs_ok) {
5091 if (status != nfserr_share_denied) {
5092 spin_unlock(&fp->fi_lock);
5093 goto out;
5094 }
5095 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5096 stp, open->op_share_deny, false))
5097 status = nfserr_jukebox;
5098 spin_unlock(&fp->fi_lock);
5099 goto out;
5100 }
5101
5102
5103 status = nfs4_file_get_access(fp, open->op_share_access);
5104 if (status != nfs_ok) {
5105 if (status != nfserr_share_denied) {
5106 spin_unlock(&fp->fi_lock);
5107 goto out;
5108 }
5109 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5110 stp, open->op_share_access, true))
5111 status = nfserr_jukebox;
5112 spin_unlock(&fp->fi_lock);
5113 goto out;
5114 }
5115
5116
5117 old_access_bmap = stp->st_access_bmap;
5118 set_access(open->op_share_access, stp);
5119
5120
5121 old_deny_bmap = stp->st_deny_bmap;
5122 set_deny(open->op_share_deny, stp);
5123 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5124
5125 if (!fp->fi_fds[oflag]) {
5126 spin_unlock(&fp->fi_lock);
5127
5128 if (!open->op_filp) {
5129 status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
5130 if (status != nfs_ok)
5131 goto out_put_access;
5132 } else {
5133 status = nfsd_file_create(rqstp, cur_fh, access, &nf);
5134 if (status != nfs_ok)
5135 goto out_put_access;
5136 nf->nf_file = open->op_filp;
5137 open->op_filp = NULL;
5138 trace_nfsd_file_create(rqstp, access, nf);
5139 }
5140
5141 spin_lock(&fp->fi_lock);
5142 if (!fp->fi_fds[oflag]) {
5143 fp->fi_fds[oflag] = nf;
5144 nf = NULL;
5145 }
5146 }
5147 spin_unlock(&fp->fi_lock);
5148 if (nf)
5149 nfsd_file_put(nf);
5150
5151 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
5152 access));
5153 if (status)
5154 goto out_put_access;
5155
5156 status = nfsd4_truncate(rqstp, cur_fh, open);
5157 if (status)
5158 goto out_put_access;
5159 out:
5160 return status;
5161 out_put_access:
5162 stp->st_access_bmap = old_access_bmap;
5163 nfs4_file_put_access(fp, open->op_share_access);
5164 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
5165 goto out;
5166 }
5167
5168 static __be32
5169 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp,
5170 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5171 struct nfsd4_open *open)
5172 {
5173 __be32 status;
5174 unsigned char old_deny_bmap = stp->st_deny_bmap;
5175
5176 if (!test_access(open->op_share_access, stp))
5177 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open, false);
5178
5179
5180 spin_lock(&fp->fi_lock);
5181 status = nfs4_file_check_deny(fp, open->op_share_deny);
5182 if (status == nfs_ok) {
5183 if (status != nfserr_share_denied) {
5184 set_deny(open->op_share_deny, stp);
5185 fp->fi_share_deny |=
5186 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5187 } else {
5188 if (nfs4_resolve_deny_conflicts_locked(fp, false,
5189 stp, open->op_share_deny, false))
5190 status = nfserr_jukebox;
5191 }
5192 }
5193 spin_unlock(&fp->fi_lock);
5194
5195 if (status != nfs_ok)
5196 return status;
5197
5198 status = nfsd4_truncate(rqstp, cur_fh, open);
5199 if (status != nfs_ok)
5200 reset_union_bmap_deny(old_deny_bmap, stp);
5201 return status;
5202 }
5203
5204
5205 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
5206 {
5207 if (clp->cl_cb_state == NFSD4_CB_UP)
5208 return true;
5209
5210
5211
5212
5213
5214 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
5215 }
5216
5217 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
5218 int flag)
5219 {
5220 struct file_lock *fl;
5221
5222 fl = locks_alloc_lock();
5223 if (!fl)
5224 return NULL;
5225 fl->fl_lmops = &nfsd_lease_mng_ops;
5226 fl->fl_flags = FL_DELEG;
5227 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
5228 fl->fl_end = OFFSET_MAX;
5229 fl->fl_owner = (fl_owner_t)dp;
5230 fl->fl_pid = current->tgid;
5231 fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
5232 return fl;
5233 }
5234
5235 static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
5236 struct nfs4_file *fp)
5237 {
5238 struct nfs4_ol_stateid *st;
5239 struct file *f = fp->fi_deleg_file->nf_file;
5240 struct inode *ino = locks_inode(f);
5241 int writes;
5242
5243 writes = atomic_read(&ino->i_writecount);
5244 if (!writes)
5245 return 0;
5246
5247
5248
5249
5250
5251
5252
5253 if (fp->fi_aliased)
5254 return -EAGAIN;
5255
5256
5257
5258
5259
5260 smp_mb__after_atomic();
5261
5262 if (fp->fi_fds[O_WRONLY])
5263 writes--;
5264 if (fp->fi_fds[O_RDWR])
5265 writes--;
5266 if (writes > 0)
5267 return -EAGAIN;
5268
5269
5270
5271
5272
5273
5274
5275 spin_lock(&fp->fi_lock);
5276 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
5277 if (st->st_openstp == NULL &&
5278 access_permit_write(st) &&
5279 st->st_stid.sc_client != clp) {
5280 spin_unlock(&fp->fi_lock);
5281 return -EAGAIN;
5282 }
5283 }
5284 spin_unlock(&fp->fi_lock);
5285
5286
5287
5288
5289
5290
5291 return 0;
5292 }
5293
5294
5295
5296
5297
5298
5299 static int
5300 nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp,
5301 struct svc_fh *parent)
5302 {
5303 struct svc_export *exp;
5304 struct dentry *child;
5305 __be32 err;
5306
5307 err = nfsd_lookup_dentry(open->op_rqstp, parent,
5308 open->op_fname, open->op_fnamelen,
5309 &exp, &child);
5310
5311 if (err)
5312 return -EAGAIN;
5313
5314 dput(child);
5315 if (child != file_dentry(fp->fi_deleg_file->nf_file))
5316 return -EAGAIN;
5317
5318 return 0;
5319 }
5320
5321 static struct nfs4_delegation *
5322 nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
5323 struct svc_fh *parent)
5324 {
5325 int status = 0;
5326 struct nfs4_client *clp = stp->st_stid.sc_client;
5327 struct nfs4_file *fp = stp->st_stid.sc_file;
5328 struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate;
5329 struct nfs4_delegation *dp;
5330 struct nfsd_file *nf;
5331 struct file_lock *fl;
5332
5333
5334
5335
5336
5337
5338 if (fp->fi_had_conflict)
5339 return ERR_PTR(-EAGAIN);
5340
5341 nf = find_readable_file(fp);
5342 if (!nf) {
5343
5344
5345
5346
5347
5348 return ERR_PTR(-EAGAIN);
5349 }
5350 spin_lock(&state_lock);
5351 spin_lock(&fp->fi_lock);
5352 if (nfs4_delegation_exists(clp, fp))
5353 status = -EAGAIN;
5354 else if (!fp->fi_deleg_file) {
5355 fp->fi_deleg_file = nf;
5356
5357
5358 fp->fi_delegees = 1;
5359 nf = NULL;
5360 } else
5361 fp->fi_delegees++;
5362 spin_unlock(&fp->fi_lock);
5363 spin_unlock(&state_lock);
5364 if (nf)
5365 nfsd_file_put(nf);
5366 if (status)
5367 return ERR_PTR(status);
5368
5369 status = -ENOMEM;
5370 dp = alloc_init_deleg(clp, fp, odstate);
5371 if (!dp)
5372 goto out_delegees;
5373
5374 fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
5375 if (!fl)
5376 goto out_clnt_odstate;
5377
5378 status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
5379 if (fl)
5380 locks_free_lock(fl);
5381 if (status)
5382 goto out_clnt_odstate;
5383
5384 if (parent) {
5385 status = nfsd4_verify_deleg_dentry(open, fp, parent);
5386 if (status)
5387 goto out_unlock;
5388 }
5389
5390 status = nfsd4_check_conflicting_opens(clp, fp);
5391 if (status)
5392 goto out_unlock;
5393
5394 spin_lock(&state_lock);
5395 spin_lock(&fp->fi_lock);
5396 if (fp->fi_had_conflict)
5397 status = -EAGAIN;
5398 else
5399 status = hash_delegation_locked(dp, fp);
5400 spin_unlock(&fp->fi_lock);
5401 spin_unlock(&state_lock);
5402
5403 if (status)
5404 goto out_unlock;
5405
5406 return dp;
5407 out_unlock:
5408 vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
5409 out_clnt_odstate:
5410 put_clnt_odstate(dp->dl_clnt_odstate);
5411 nfs4_put_stid(&dp->dl_stid);
5412 out_delegees:
5413 put_deleg_file(fp);
5414 return ERR_PTR(status);
5415 }
5416
5417 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
5418 {
5419 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5420 if (status == -EAGAIN)
5421 open->op_why_no_deleg = WND4_CONTENTION;
5422 else {
5423 open->op_why_no_deleg = WND4_RESOURCE;
5424 switch (open->op_deleg_want) {
5425 case NFS4_SHARE_WANT_READ_DELEG:
5426 case NFS4_SHARE_WANT_WRITE_DELEG:
5427 case NFS4_SHARE_WANT_ANY_DELEG:
5428 break;
5429 case NFS4_SHARE_WANT_CANCEL:
5430 open->op_why_no_deleg = WND4_CANCELLED;
5431 break;
5432 case NFS4_SHARE_WANT_NO_DELEG:
5433 WARN_ON_ONCE(1);
5434 }
5435 }
5436 }
5437
5438
5439
5440
5441
5442
5443
5444 static void
5445 nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
5446 struct svc_fh *currentfh)
5447 {
5448 struct nfs4_delegation *dp;
5449 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
5450 struct nfs4_client *clp = stp->st_stid.sc_client;
5451 struct svc_fh *parent = NULL;
5452 int cb_up;
5453 int status = 0;
5454
5455 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
5456 open->op_recall = 0;
5457 switch (open->op_claim_type) {
5458 case NFS4_OPEN_CLAIM_PREVIOUS:
5459 if (!cb_up)
5460 open->op_recall = 1;
5461 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
5462 goto out_no_deleg;
5463 break;
5464 case NFS4_OPEN_CLAIM_NULL:
5465 parent = currentfh;
5466 fallthrough;
5467 case NFS4_OPEN_CLAIM_FH:
5468
5469
5470
5471
5472
5473 if (locks_in_grace(clp->net))
5474 goto out_no_deleg;
5475 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
5476 goto out_no_deleg;
5477 break;
5478 default:
5479 goto out_no_deleg;
5480 }
5481 dp = nfs4_set_delegation(open, stp, parent);
5482 if (IS_ERR(dp))
5483 goto out_no_deleg;
5484
5485 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
5486
5487 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
5488 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
5489 nfs4_put_stid(&dp->dl_stid);
5490 return;
5491 out_no_deleg:
5492 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
5493 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
5494 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
5495 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
5496 open->op_recall = 1;
5497 }
5498
5499
5500 if (open->op_deleg_want)
5501 nfsd4_open_deleg_none_ext(open, status);
5502 return;
5503 }
5504
5505 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
5506 struct nfs4_delegation *dp)
5507 {
5508 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
5509 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5510 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5511 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
5512 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
5513 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5514 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5515 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
5516 }
5517
5518
5519
5520
5521 }
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535 __be32
5536 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
5537 {
5538 struct nfsd4_compoundres *resp = rqstp->rq_resp;
5539 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
5540 struct nfs4_file *fp = NULL;
5541 struct nfs4_ol_stateid *stp = NULL;
5542 struct nfs4_delegation *dp = NULL;
5543 __be32 status;
5544 bool new_stp = false;
5545
5546
5547
5548
5549
5550
5551 fp = find_or_add_file(open->op_file, current_fh);
5552 if (fp != open->op_file) {
5553 status = nfs4_check_deleg(cl, open, &dp);
5554 if (status)
5555 goto out;
5556 stp = nfsd4_find_and_lock_existing_open(fp, open);
5557 } else {
5558 open->op_file = NULL;
5559 status = nfserr_bad_stateid;
5560 if (nfsd4_is_deleg_cur(open))
5561 goto out;
5562 }
5563
5564 if (!stp) {
5565 stp = init_open_stateid(fp, open);
5566 if (!open->op_stp)
5567 new_stp = true;
5568 }
5569
5570
5571
5572
5573
5574
5575
5576 if (!new_stp) {
5577
5578 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
5579 if (status) {
5580 mutex_unlock(&stp->st_mutex);
5581 goto out;
5582 }
5583 } else {
5584 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open, true);
5585 if (status) {
5586 stp->st_stid.sc_type = NFS4_CLOSED_STID;
5587 release_open_stateid(stp);
5588 mutex_unlock(&stp->st_mutex);
5589 goto out;
5590 }
5591
5592 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
5593 open->op_odstate);
5594 if (stp->st_clnt_odstate == open->op_odstate)
5595 open->op_odstate = NULL;
5596 }
5597
5598 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
5599 mutex_unlock(&stp->st_mutex);
5600
5601 if (nfsd4_has_session(&resp->cstate)) {
5602 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
5603 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5604 open->op_why_no_deleg = WND4_NOT_WANTED;
5605 goto nodeleg;
5606 }
5607 }
5608
5609
5610
5611
5612
5613 nfs4_open_delegation(open, stp, &resp->cstate.current_fh);
5614 nodeleg:
5615 status = nfs_ok;
5616 trace_nfsd_open(&stp->st_stid.sc_stateid);
5617 out:
5618
5619 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
5620 open->op_deleg_want)
5621 nfsd4_deleg_xgrade_none_ext(open, dp);
5622
5623 if (fp)
5624 put_nfs4_file(fp);
5625 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
5626 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5627
5628
5629
5630 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
5631 if (nfsd4_has_session(&resp->cstate))
5632 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
5633 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
5634 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
5635
5636 if (dp)
5637 nfs4_put_stid(&dp->dl_stid);
5638 if (stp)
5639 nfs4_put_stid(&stp->st_stid);
5640
5641 return status;
5642 }
5643
5644 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
5645 struct nfsd4_open *open)
5646 {
5647 if (open->op_openowner) {
5648 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
5649
5650 nfsd4_cstate_assign_replay(cstate, so);
5651 nfs4_put_stateowner(so);
5652 }
5653 if (open->op_file)
5654 kmem_cache_free(file_slab, open->op_file);
5655 if (open->op_stp)
5656 nfs4_put_stid(&open->op_stp->st_stid);
5657 if (open->op_odstate)
5658 kmem_cache_free(odstate_slab, open->op_odstate);
5659 }
5660
5661 __be32
5662 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5663 union nfsd4_op_u *u)
5664 {
5665 clientid_t *clid = &u->renew;
5666 struct nfs4_client *clp;
5667 __be32 status;
5668 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5669
5670 trace_nfsd_clid_renew(clid);
5671 status = set_client(clid, cstate, nn);
5672 if (status)
5673 return status;
5674 clp = cstate->clp;
5675 if (!list_empty(&clp->cl_delegations)
5676 && clp->cl_cb_state != NFSD4_CB_UP)
5677 return nfserr_cb_path_down;
5678 return nfs_ok;
5679 }
5680
5681 void
5682 nfsd4_end_grace(struct nfsd_net *nn)
5683 {
5684
5685 if (nn->grace_ended)
5686 return;
5687
5688 trace_nfsd_grace_complete(nn);
5689 nn->grace_ended = true;
5690
5691
5692
5693
5694
5695
5696 nfsd4_record_grace_done(nn);
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706 locks_end_grace(&nn->nfsd4_manager);
5707
5708
5709
5710
5711
5712 }
5713
5714
5715
5716
5717
5718 static bool clients_still_reclaiming(struct nfsd_net *nn)
5719 {
5720 time64_t double_grace_period_end = nn->boot_time +
5721 2 * nn->nfsd4_lease;
5722
5723 if (nn->track_reclaim_completes &&
5724 atomic_read(&nn->nr_reclaim_complete) ==
5725 nn->reclaim_str_hashtbl_size)
5726 return false;
5727 if (!nn->somebody_reclaimed)
5728 return false;
5729 nn->somebody_reclaimed = false;
5730
5731
5732
5733
5734 if (ktime_get_boottime_seconds() > double_grace_period_end)
5735 return false;
5736 return true;
5737 }
5738
5739 struct laundry_time {
5740 time64_t cutoff;
5741 time64_t new_timeo;
5742 };
5743
5744 static bool state_expired(struct laundry_time *lt, time64_t last_refresh)
5745 {
5746 time64_t time_remaining;
5747
5748 if (last_refresh < lt->cutoff)
5749 return true;
5750 time_remaining = last_refresh - lt->cutoff;
5751 lt->new_timeo = min(lt->new_timeo, time_remaining);
5752 return false;
5753 }
5754
5755 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
5756 void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
5757 {
5758 spin_lock_init(&nn->nfsd_ssc_lock);
5759 INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
5760 init_waitqueue_head(&nn->nfsd_ssc_waitq);
5761 }
5762 EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work);
5763
5764
5765
5766
5767
5768 static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn)
5769 {
5770 struct nfsd4_ssc_umount_item *ni = NULL;
5771 struct nfsd4_ssc_umount_item *tmp;
5772
5773 spin_lock(&nn->nfsd_ssc_lock);
5774 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5775 list_del(&ni->nsui_list);
5776 spin_unlock(&nn->nfsd_ssc_lock);
5777 mntput(ni->nsui_vfsmount);
5778 kfree(ni);
5779 spin_lock(&nn->nfsd_ssc_lock);
5780 }
5781 spin_unlock(&nn->nfsd_ssc_lock);
5782 }
5783
5784 static void nfsd4_ssc_expire_umount(struct nfsd_net *nn)
5785 {
5786 bool do_wakeup = false;
5787 struct nfsd4_ssc_umount_item *ni = NULL;
5788 struct nfsd4_ssc_umount_item *tmp;
5789
5790 spin_lock(&nn->nfsd_ssc_lock);
5791 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5792 if (time_after(jiffies, ni->nsui_expire)) {
5793 if (refcount_read(&ni->nsui_refcnt) > 1)
5794 continue;
5795
5796
5797 ni->nsui_busy = true;
5798 spin_unlock(&nn->nfsd_ssc_lock);
5799 mntput(ni->nsui_vfsmount);
5800 spin_lock(&nn->nfsd_ssc_lock);
5801
5802
5803 list_del(&ni->nsui_list);
5804 kfree(ni);
5805
5806
5807 do_wakeup = true;
5808 continue;
5809 }
5810 break;
5811 }
5812 if (do_wakeup)
5813 wake_up_all(&nn->nfsd_ssc_waitq);
5814 spin_unlock(&nn->nfsd_ssc_lock);
5815 }
5816 #endif
5817
5818
5819 static bool
5820 nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo)
5821 {
5822 struct file_lock_context *ctx;
5823 struct nfs4_ol_stateid *stp;
5824 struct nfs4_file *nf;
5825
5826 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
5827 nf = stp->st_stid.sc_file;
5828 ctx = nf->fi_inode->i_flctx;
5829 if (!ctx)
5830 continue;
5831 if (locks_owner_has_blockers(ctx, lo))
5832 return true;
5833 }
5834 return false;
5835 }
5836
5837 static bool
5838 nfs4_anylock_blockers(struct nfs4_client *clp)
5839 {
5840 int i;
5841 struct nfs4_stateowner *so;
5842 struct nfs4_lockowner *lo;
5843
5844 if (atomic_read(&clp->cl_delegs_in_recall))
5845 return true;
5846 spin_lock(&clp->cl_lock);
5847 for (i = 0; i < OWNER_HASH_SIZE; i++) {
5848 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i],
5849 so_strhash) {
5850 if (so->so_is_open_owner)
5851 continue;
5852 lo = lockowner(so);
5853 if (nfs4_lockowner_has_blockers(lo)) {
5854 spin_unlock(&clp->cl_lock);
5855 return true;
5856 }
5857 }
5858 }
5859 spin_unlock(&clp->cl_lock);
5860 return false;
5861 }
5862
5863 static void
5864 nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
5865 struct laundry_time *lt)
5866 {
5867 unsigned int maxreap, reapcnt = 0;
5868 struct list_head *pos, *next;
5869 struct nfs4_client *clp;
5870
5871 maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ?
5872 NFSD_CLIENT_MAX_TRIM_PER_RUN : 0;
5873 INIT_LIST_HEAD(reaplist);
5874 spin_lock(&nn->client_lock);
5875 list_for_each_safe(pos, next, &nn->client_lru) {
5876 clp = list_entry(pos, struct nfs4_client, cl_lru);
5877 if (clp->cl_state == NFSD4_EXPIRABLE)
5878 goto exp_client;
5879 if (!state_expired(lt, clp->cl_time))
5880 break;
5881 if (!atomic_read(&clp->cl_rpc_users))
5882 clp->cl_state = NFSD4_COURTESY;
5883 if (!client_has_state(clp))
5884 goto exp_client;
5885 if (!nfs4_anylock_blockers(clp))
5886 if (reapcnt >= maxreap)
5887 continue;
5888 exp_client:
5889 if (!mark_client_expired_locked(clp)) {
5890 list_add(&clp->cl_lru, reaplist);
5891 reapcnt++;
5892 }
5893 }
5894 spin_unlock(&nn->client_lock);
5895 }
5896
5897 static time64_t
5898 nfs4_laundromat(struct nfsd_net *nn)
5899 {
5900 struct nfs4_client *clp;
5901 struct nfs4_openowner *oo;
5902 struct nfs4_delegation *dp;
5903 struct nfs4_ol_stateid *stp;
5904 struct nfsd4_blocked_lock *nbl;
5905 struct list_head *pos, *next, reaplist;
5906 struct laundry_time lt = {
5907 .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease,
5908 .new_timeo = nn->nfsd4_lease
5909 };
5910 struct nfs4_cpntf_state *cps;
5911 copy_stateid_t *cps_t;
5912 int i;
5913
5914 if (clients_still_reclaiming(nn)) {
5915 lt.new_timeo = 0;
5916 goto out;
5917 }
5918 nfsd4_end_grace(nn);
5919
5920 spin_lock(&nn->s2s_cp_lock);
5921 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
5922 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
5923 if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&
5924 state_expired(<, cps->cpntf_time))
5925 _free_cpntf_state_locked(nn, cps);
5926 }
5927 spin_unlock(&nn->s2s_cp_lock);
5928 nfs4_get_client_reaplist(nn, &reaplist, <);
5929 list_for_each_safe(pos, next, &reaplist) {
5930 clp = list_entry(pos, struct nfs4_client, cl_lru);
5931 trace_nfsd_clid_purged(&clp->cl_clientid);
5932 list_del_init(&clp->cl_lru);
5933 expire_client(clp);
5934 }
5935 spin_lock(&state_lock);
5936 list_for_each_safe(pos, next, &nn->del_recall_lru) {
5937 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5938 if (!state_expired(<, dp->dl_time))
5939 break;
5940 WARN_ON(!unhash_delegation_locked(dp));
5941 list_add(&dp->dl_recall_lru, &reaplist);
5942 }
5943 spin_unlock(&state_lock);
5944 while (!list_empty(&reaplist)) {
5945 dp = list_first_entry(&reaplist, struct nfs4_delegation,
5946 dl_recall_lru);
5947 list_del_init(&dp->dl_recall_lru);
5948 revoke_delegation(dp);
5949 }
5950
5951 spin_lock(&nn->client_lock);
5952 while (!list_empty(&nn->close_lru)) {
5953 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
5954 oo_close_lru);
5955 if (!state_expired(<, oo->oo_time))
5956 break;
5957 list_del_init(&oo->oo_close_lru);
5958 stp = oo->oo_last_closed_stid;
5959 oo->oo_last_closed_stid = NULL;
5960 spin_unlock(&nn->client_lock);
5961 nfs4_put_stid(&stp->st_stid);
5962 spin_lock(&nn->client_lock);
5963 }
5964 spin_unlock(&nn->client_lock);
5965
5966
5967
5968
5969
5970
5971
5972
5973
5974
5975
5976
5977 BUG_ON(!list_empty(&reaplist));
5978 spin_lock(&nn->blocked_locks_lock);
5979 while (!list_empty(&nn->blocked_locks_lru)) {
5980 nbl = list_first_entry(&nn->blocked_locks_lru,
5981 struct nfsd4_blocked_lock, nbl_lru);
5982 if (!state_expired(<, nbl->nbl_time))
5983 break;
5984 list_move(&nbl->nbl_lru, &reaplist);
5985 list_del_init(&nbl->nbl_list);
5986 }
5987 spin_unlock(&nn->blocked_locks_lock);
5988
5989 while (!list_empty(&reaplist)) {
5990 nbl = list_first_entry(&reaplist,
5991 struct nfsd4_blocked_lock, nbl_lru);
5992 list_del_init(&nbl->nbl_lru);
5993 free_blocked_lock(nbl);
5994 }
5995 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
5996
5997 nfsd4_ssc_expire_umount(nn);
5998 #endif
5999 out:
6000 return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
6001 }
6002
6003 static void laundromat_main(struct work_struct *);
6004
6005 static void
6006 laundromat_main(struct work_struct *laundry)
6007 {
6008 time64_t t;
6009 struct delayed_work *dwork = to_delayed_work(laundry);
6010 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
6011 laundromat_work);
6012
6013 t = nfs4_laundromat(nn);
6014 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
6015 }
6016
6017 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
6018 {
6019 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
6020 return nfserr_bad_stateid;
6021 return nfs_ok;
6022 }
6023
6024 static
6025 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
6026 {
6027 __be32 status = nfserr_openmode;
6028
6029
6030 if (stp->st_openstp)
6031 stp = stp->st_openstp;
6032 if ((flags & WR_STATE) && !access_permit_write(stp))
6033 goto out;
6034 if ((flags & RD_STATE) && !access_permit_read(stp))
6035 goto out;
6036 status = nfs_ok;
6037 out:
6038 return status;
6039 }
6040
6041 static inline __be32
6042 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
6043 {
6044 if (ONE_STATEID(stateid) && (flags & RD_STATE))
6045 return nfs_ok;
6046 else if (opens_in_grace(net)) {
6047
6048
6049 return nfserr_grace;
6050 } else if (flags & WR_STATE)
6051 return nfs4_share_conflict(current_fh,
6052 NFS4_SHARE_DENY_WRITE);
6053 else
6054 return nfs4_share_conflict(current_fh,
6055 NFS4_SHARE_DENY_READ);
6056 }
6057
6058 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
6059 {
6060
6061
6062
6063
6064 if (has_session && in->si_generation == 0)
6065 return nfs_ok;
6066
6067 if (in->si_generation == ref->si_generation)
6068 return nfs_ok;
6069
6070
6071 if (nfsd4_stateid_generation_after(in, ref))
6072 return nfserr_bad_stateid;
6073
6074
6075
6076
6077
6078
6079
6080
6081
6082
6083 return nfserr_old_stateid;
6084 }
6085
6086 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
6087 {
6088 __be32 ret;
6089
6090 spin_lock(&s->sc_lock);
6091 ret = nfsd4_verify_open_stid(s);
6092 if (ret == nfs_ok)
6093 ret = check_stateid_generation(in, &s->sc_stateid, has_session);
6094 spin_unlock(&s->sc_lock);
6095 return ret;
6096 }
6097
6098 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
6099 {
6100 if (ols->st_stateowner->so_is_open_owner &&
6101 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
6102 return nfserr_bad_stateid;
6103 return nfs_ok;
6104 }
6105
6106 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
6107 {
6108 struct nfs4_stid *s;
6109 __be32 status = nfserr_bad_stateid;
6110
6111 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6112 CLOSE_STATEID(stateid))
6113 return status;
6114 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid))
6115 return status;
6116 spin_lock(&cl->cl_lock);
6117 s = find_stateid_locked(cl, stateid);
6118 if (!s)
6119 goto out_unlock;
6120 status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
6121 if (status)
6122 goto out_unlock;
6123 switch (s->sc_type) {
6124 case NFS4_DELEG_STID:
6125 status = nfs_ok;
6126 break;
6127 case NFS4_REVOKED_DELEG_STID:
6128 status = nfserr_deleg_revoked;
6129 break;
6130 case NFS4_OPEN_STID:
6131 case NFS4_LOCK_STID:
6132 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
6133 break;
6134 default:
6135 printk("unknown stateid type %x\n", s->sc_type);
6136 fallthrough;
6137 case NFS4_CLOSED_STID:
6138 case NFS4_CLOSED_DELEG_STID:
6139 status = nfserr_bad_stateid;
6140 }
6141 out_unlock:
6142 spin_unlock(&cl->cl_lock);
6143 return status;
6144 }
6145
6146 __be32
6147 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
6148 stateid_t *stateid, unsigned char typemask,
6149 struct nfs4_stid **s, struct nfsd_net *nn)
6150 {
6151 __be32 status;
6152 bool return_revoked = false;
6153
6154
6155
6156
6157
6158 if (typemask & NFS4_REVOKED_DELEG_STID)
6159 return_revoked = true;
6160 else if (typemask & NFS4_DELEG_STID)
6161 typemask |= NFS4_REVOKED_DELEG_STID;
6162
6163 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6164 CLOSE_STATEID(stateid))
6165 return nfserr_bad_stateid;
6166 status = set_client(&stateid->si_opaque.so_clid, cstate, nn);
6167 if (status == nfserr_stale_clientid) {
6168 if (cstate->session)
6169 return nfserr_bad_stateid;
6170 return nfserr_stale_stateid;
6171 }
6172 if (status)
6173 return status;
6174 *s = find_stateid_by_type(cstate->clp, stateid, typemask);
6175 if (!*s)
6176 return nfserr_bad_stateid;
6177 if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
6178 nfs4_put_stid(*s);
6179 if (cstate->minorversion)
6180 return nfserr_deleg_revoked;
6181 return nfserr_bad_stateid;
6182 }
6183 return nfs_ok;
6184 }
6185
6186 static struct nfsd_file *
6187 nfs4_find_file(struct nfs4_stid *s, int flags)
6188 {
6189 if (!s)
6190 return NULL;
6191
6192 switch (s->sc_type) {
6193 case NFS4_DELEG_STID:
6194 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
6195 return NULL;
6196 return nfsd_file_get(s->sc_file->fi_deleg_file);
6197 case NFS4_OPEN_STID:
6198 case NFS4_LOCK_STID:
6199 if (flags & RD_STATE)
6200 return find_readable_file(s->sc_file);
6201 else
6202 return find_writeable_file(s->sc_file);
6203 }
6204
6205 return NULL;
6206 }
6207
6208 static __be32
6209 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
6210 {
6211 __be32 status;
6212
6213 status = nfsd4_check_openowner_confirmed(ols);
6214 if (status)
6215 return status;
6216 return nfs4_check_openmode(ols, flags);
6217 }
6218
6219 static __be32
6220 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
6221 struct nfsd_file **nfp, int flags)
6222 {
6223 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
6224 struct nfsd_file *nf;
6225 __be32 status;
6226
6227 nf = nfs4_find_file(s, flags);
6228 if (nf) {
6229 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
6230 acc | NFSD_MAY_OWNER_OVERRIDE);
6231 if (status) {
6232 nfsd_file_put(nf);
6233 goto out;
6234 }
6235 } else {
6236 status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
6237 if (status)
6238 return status;
6239 }
6240 *nfp = nf;
6241 out:
6242 return status;
6243 }
6244 static void
6245 _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6246 {
6247 WARN_ON_ONCE(cps->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID);
6248 if (!refcount_dec_and_test(&cps->cp_stateid.sc_count))
6249 return;
6250 list_del(&cps->cp_list);
6251 idr_remove(&nn->s2s_cp_stateids,
6252 cps->cp_stateid.stid.si_opaque.so_id);
6253 kfree(cps);
6254 }
6255
6256
6257
6258
6259
6260 __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
6261 struct nfs4_client *clp,
6262 struct nfs4_cpntf_state **cps)
6263 {
6264 copy_stateid_t *cps_t;
6265 struct nfs4_cpntf_state *state = NULL;
6266
6267 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
6268 return nfserr_bad_stateid;
6269 spin_lock(&nn->s2s_cp_lock);
6270 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
6271 if (cps_t) {
6272 state = container_of(cps_t, struct nfs4_cpntf_state,
6273 cp_stateid);
6274 if (state->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID) {
6275 state = NULL;
6276 goto unlock;
6277 }
6278 if (!clp)
6279 refcount_inc(&state->cp_stateid.sc_count);
6280 else
6281 _free_cpntf_state_locked(nn, state);
6282 }
6283 unlock:
6284 spin_unlock(&nn->s2s_cp_lock);
6285 if (!state)
6286 return nfserr_bad_stateid;
6287 if (!clp && state)
6288 *cps = state;
6289 return 0;
6290 }
6291
6292 static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
6293 struct nfs4_stid **stid)
6294 {
6295 __be32 status;
6296 struct nfs4_cpntf_state *cps = NULL;
6297 struct nfs4_client *found;
6298
6299 status = manage_cpntf_state(nn, st, NULL, &cps);
6300 if (status)
6301 return status;
6302
6303 cps->cpntf_time = ktime_get_boottime_seconds();
6304
6305 status = nfserr_expired;
6306 found = lookup_clientid(&cps->cp_p_clid, true, nn);
6307 if (!found)
6308 goto out;
6309
6310 *stid = find_stateid_by_type(found, &cps->cp_p_stateid,
6311 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID);
6312 if (*stid)
6313 status = nfs_ok;
6314 else
6315 status = nfserr_bad_stateid;
6316
6317 put_client_renew(found);
6318 out:
6319 nfs4_put_cpntf_state(nn, cps);
6320 return status;
6321 }
6322
6323 void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6324 {
6325 spin_lock(&nn->s2s_cp_lock);
6326 _free_cpntf_state_locked(nn, cps);
6327 spin_unlock(&nn->s2s_cp_lock);
6328 }
6329
6330
6331
6332
6333 __be32
6334 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
6335 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
6336 stateid_t *stateid, int flags, struct nfsd_file **nfp,
6337 struct nfs4_stid **cstid)
6338 {
6339 struct net *net = SVC_NET(rqstp);
6340 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6341 struct nfs4_stid *s = NULL;
6342 __be32 status;
6343
6344 if (nfp)
6345 *nfp = NULL;
6346
6347 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
6348 if (cstid)
6349 status = nfserr_bad_stateid;
6350 else
6351 status = check_special_stateids(net, fhp, stateid,
6352 flags);
6353 goto done;
6354 }
6355
6356 status = nfsd4_lookup_stateid(cstate, stateid,
6357 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
6358 &s, nn);
6359 if (status == nfserr_bad_stateid)
6360 status = find_cpntf_state(nn, stateid, &s);
6361 if (status)
6362 return status;
6363 status = nfsd4_stid_check_stateid_generation(stateid, s,
6364 nfsd4_has_session(cstate));
6365 if (status)
6366 goto out;
6367
6368 switch (s->sc_type) {
6369 case NFS4_DELEG_STID:
6370 status = nfs4_check_delegmode(delegstateid(s), flags);
6371 break;
6372 case NFS4_OPEN_STID:
6373 case NFS4_LOCK_STID:
6374 status = nfs4_check_olstateid(openlockstateid(s), flags);
6375 break;
6376 default:
6377 status = nfserr_bad_stateid;
6378 break;
6379 }
6380 if (status)
6381 goto out;
6382 status = nfs4_check_fh(fhp, s);
6383
6384 done:
6385 if (status == nfs_ok && nfp)
6386 status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
6387 out:
6388 if (s) {
6389 if (!status && cstid)
6390 *cstid = s;
6391 else
6392 nfs4_put_stid(s);
6393 }
6394 return status;
6395 }
6396
6397
6398
6399
6400 __be32
6401 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6402 union nfsd4_op_u *u)
6403 {
6404 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
6405 struct nfsd4_test_stateid_id *stateid;
6406 struct nfs4_client *cl = cstate->clp;
6407
6408 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
6409 stateid->ts_id_status =
6410 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
6411
6412 return nfs_ok;
6413 }
6414
6415 static __be32
6416 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
6417 {
6418 struct nfs4_ol_stateid *stp = openlockstateid(s);
6419 __be32 ret;
6420
6421 ret = nfsd4_lock_ol_stateid(stp);
6422 if (ret)
6423 goto out_put_stid;
6424
6425 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6426 if (ret)
6427 goto out;
6428
6429 ret = nfserr_locks_held;
6430 if (check_for_locks(stp->st_stid.sc_file,
6431 lockowner(stp->st_stateowner)))
6432 goto out;
6433
6434 release_lock_stateid(stp);
6435 ret = nfs_ok;
6436
6437 out:
6438 mutex_unlock(&stp->st_mutex);
6439 out_put_stid:
6440 nfs4_put_stid(s);
6441 return ret;
6442 }
6443
6444 __be32
6445 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6446 union nfsd4_op_u *u)
6447 {
6448 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
6449 stateid_t *stateid = &free_stateid->fr_stateid;
6450 struct nfs4_stid *s;
6451 struct nfs4_delegation *dp;
6452 struct nfs4_client *cl = cstate->clp;
6453 __be32 ret = nfserr_bad_stateid;
6454
6455 spin_lock(&cl->cl_lock);
6456 s = find_stateid_locked(cl, stateid);
6457 if (!s)
6458 goto out_unlock;
6459 spin_lock(&s->sc_lock);
6460 switch (s->sc_type) {
6461 case NFS4_DELEG_STID:
6462 ret = nfserr_locks_held;
6463 break;
6464 case NFS4_OPEN_STID:
6465 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6466 if (ret)
6467 break;
6468 ret = nfserr_locks_held;
6469 break;
6470 case NFS4_LOCK_STID:
6471 spin_unlock(&s->sc_lock);
6472 refcount_inc(&s->sc_count);
6473 spin_unlock(&cl->cl_lock);
6474 ret = nfsd4_free_lock_stateid(stateid, s);
6475 goto out;
6476 case NFS4_REVOKED_DELEG_STID:
6477 spin_unlock(&s->sc_lock);
6478 dp = delegstateid(s);
6479 list_del_init(&dp->dl_recall_lru);
6480 spin_unlock(&cl->cl_lock);
6481 nfs4_put_stid(s);
6482 ret = nfs_ok;
6483 goto out;
6484
6485 }
6486 spin_unlock(&s->sc_lock);
6487 out_unlock:
6488 spin_unlock(&cl->cl_lock);
6489 out:
6490 return ret;
6491 }
6492
6493 static inline int
6494 setlkflg (int type)
6495 {
6496 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
6497 RD_STATE : WR_STATE;
6498 }
6499
6500 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
6501 {
6502 struct svc_fh *current_fh = &cstate->current_fh;
6503 struct nfs4_stateowner *sop = stp->st_stateowner;
6504 __be32 status;
6505
6506 status = nfsd4_check_seqid(cstate, sop, seqid);
6507 if (status)
6508 return status;
6509 status = nfsd4_lock_ol_stateid(stp);
6510 if (status != nfs_ok)
6511 return status;
6512 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
6513 if (status == nfs_ok)
6514 status = nfs4_check_fh(current_fh, &stp->st_stid);
6515 if (status != nfs_ok)
6516 mutex_unlock(&stp->st_mutex);
6517 return status;
6518 }
6519
6520
6521
6522
6523 static __be32
6524 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6525 stateid_t *stateid, char typemask,
6526 struct nfs4_ol_stateid **stpp,
6527 struct nfsd_net *nn)
6528 {
6529 __be32 status;
6530 struct nfs4_stid *s;
6531 struct nfs4_ol_stateid *stp = NULL;
6532
6533 trace_nfsd_preprocess(seqid, stateid);
6534
6535 *stpp = NULL;
6536 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
6537 if (status)
6538 return status;
6539 stp = openlockstateid(s);
6540 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
6541
6542 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
6543 if (!status)
6544 *stpp = stp;
6545 else
6546 nfs4_put_stid(&stp->st_stid);
6547 return status;
6548 }
6549
6550 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6551 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
6552 {
6553 __be32 status;
6554 struct nfs4_openowner *oo;
6555 struct nfs4_ol_stateid *stp;
6556
6557 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
6558 NFS4_OPEN_STID, &stp, nn);
6559 if (status)
6560 return status;
6561 oo = openowner(stp->st_stateowner);
6562 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
6563 mutex_unlock(&stp->st_mutex);
6564 nfs4_put_stid(&stp->st_stid);
6565 return nfserr_bad_stateid;
6566 }
6567 *stpp = stp;
6568 return nfs_ok;
6569 }
6570
6571 __be32
6572 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6573 union nfsd4_op_u *u)
6574 {
6575 struct nfsd4_open_confirm *oc = &u->open_confirm;
6576 __be32 status;
6577 struct nfs4_openowner *oo;
6578 struct nfs4_ol_stateid *stp;
6579 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6580
6581 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
6582 cstate->current_fh.fh_dentry);
6583
6584 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
6585 if (status)
6586 return status;
6587
6588 status = nfs4_preprocess_seqid_op(cstate,
6589 oc->oc_seqid, &oc->oc_req_stateid,
6590 NFS4_OPEN_STID, &stp, nn);
6591 if (status)
6592 goto out;
6593 oo = openowner(stp->st_stateowner);
6594 status = nfserr_bad_stateid;
6595 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
6596 mutex_unlock(&stp->st_mutex);
6597 goto put_stateid;
6598 }
6599 oo->oo_flags |= NFS4_OO_CONFIRMED;
6600 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
6601 mutex_unlock(&stp->st_mutex);
6602 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
6603 nfsd4_client_record_create(oo->oo_owner.so_client);
6604 status = nfs_ok;
6605 put_stateid:
6606 nfs4_put_stid(&stp->st_stid);
6607 out:
6608 nfsd4_bump_seqid(cstate, status);
6609 return status;
6610 }
6611
6612 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
6613 {
6614 if (!test_access(access, stp))
6615 return;
6616 nfs4_file_put_access(stp->st_stid.sc_file, access);
6617 clear_access(access, stp);
6618 }
6619
6620 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
6621 {
6622 switch (to_access) {
6623 case NFS4_SHARE_ACCESS_READ:
6624 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
6625 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6626 break;
6627 case NFS4_SHARE_ACCESS_WRITE:
6628 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
6629 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6630 break;
6631 case NFS4_SHARE_ACCESS_BOTH:
6632 break;
6633 default:
6634 WARN_ON_ONCE(1);
6635 }
6636 }
6637
6638 __be32
6639 nfsd4_open_downgrade(struct svc_rqst *rqstp,
6640 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
6641 {
6642 struct nfsd4_open_downgrade *od = &u->open_downgrade;
6643 __be32 status;
6644 struct nfs4_ol_stateid *stp;
6645 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6646
6647 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
6648 cstate->current_fh.fh_dentry);
6649
6650
6651 if (od->od_deleg_want)
6652 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
6653 od->od_deleg_want);
6654
6655 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
6656 &od->od_stateid, &stp, nn);
6657 if (status)
6658 goto out;
6659 status = nfserr_inval;
6660 if (!test_access(od->od_share_access, stp)) {
6661 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
6662 stp->st_access_bmap, od->od_share_access);
6663 goto put_stateid;
6664 }
6665 if (!test_deny(od->od_share_deny, stp)) {
6666 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
6667 stp->st_deny_bmap, od->od_share_deny);
6668 goto put_stateid;
6669 }
6670 nfs4_stateid_downgrade(stp, od->od_share_access);
6671 reset_union_bmap_deny(od->od_share_deny, stp);
6672 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
6673 status = nfs_ok;
6674 put_stateid:
6675 mutex_unlock(&stp->st_mutex);
6676 nfs4_put_stid(&stp->st_stid);
6677 out:
6678 nfsd4_bump_seqid(cstate, status);
6679 return status;
6680 }
6681
6682 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
6683 {
6684 struct nfs4_client *clp = s->st_stid.sc_client;
6685 bool unhashed;
6686 LIST_HEAD(reaplist);
6687
6688 spin_lock(&clp->cl_lock);
6689 unhashed = unhash_open_stateid(s, &reaplist);
6690
6691 if (clp->cl_minorversion) {
6692 if (unhashed)
6693 put_ol_stateid_locked(s, &reaplist);
6694 spin_unlock(&clp->cl_lock);
6695 free_ol_stateid_reaplist(&reaplist);
6696 } else {
6697 spin_unlock(&clp->cl_lock);
6698 free_ol_stateid_reaplist(&reaplist);
6699 if (unhashed)
6700 move_to_close_lru(s, clp->net);
6701 }
6702 }
6703
6704
6705
6706
6707 __be32
6708 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6709 union nfsd4_op_u *u)
6710 {
6711 struct nfsd4_close *close = &u->close;
6712 __be32 status;
6713 struct nfs4_ol_stateid *stp;
6714 struct net *net = SVC_NET(rqstp);
6715 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6716
6717 dprintk("NFSD: nfsd4_close on file %pd\n",
6718 cstate->current_fh.fh_dentry);
6719
6720 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
6721 &close->cl_stateid,
6722 NFS4_OPEN_STID|NFS4_CLOSED_STID,
6723 &stp, nn);
6724 nfsd4_bump_seqid(cstate, status);
6725 if (status)
6726 goto out;
6727
6728 stp->st_stid.sc_type = NFS4_CLOSED_STID;
6729
6730
6731
6732
6733
6734
6735
6736 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
6737
6738 nfsd4_close_open_stateid(stp);
6739 mutex_unlock(&stp->st_mutex);
6740
6741
6742
6743
6744
6745
6746
6747
6748 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
6749
6750
6751 nfs4_put_stid(&stp->st_stid);
6752 out:
6753 return status;
6754 }
6755
6756 __be32
6757 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6758 union nfsd4_op_u *u)
6759 {
6760 struct nfsd4_delegreturn *dr = &u->delegreturn;
6761 struct nfs4_delegation *dp;
6762 stateid_t *stateid = &dr->dr_stateid;
6763 struct nfs4_stid *s;
6764 __be32 status;
6765 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6766
6767 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6768 return status;
6769
6770 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
6771 if (status)
6772 goto out;
6773 dp = delegstateid(s);
6774 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
6775 if (status)
6776 goto put_stateid;
6777
6778 destroy_delegation(dp);
6779 put_stateid:
6780 nfs4_put_stid(&dp->dl_stid);
6781 out:
6782 return status;
6783 }
6784
6785
6786 static inline u64
6787 last_byte_offset(u64 start, u64 len)
6788 {
6789 u64 end;
6790
6791 WARN_ON_ONCE(!len);
6792 end = start + len;
6793 return end > start ? end - 1: NFS4_MAX_UINT64;
6794 }
6795
6796
6797
6798
6799
6800
6801
6802
6803
6804 static inline void
6805 nfs4_transform_lock_offset(struct file_lock *lock)
6806 {
6807 if (lock->fl_start < 0)
6808 lock->fl_start = OFFSET_MAX;
6809 if (lock->fl_end < 0)
6810 lock->fl_end = OFFSET_MAX;
6811 }
6812
6813 static fl_owner_t
6814 nfsd4_lm_get_owner(fl_owner_t owner)
6815 {
6816 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6817
6818 nfs4_get_stateowner(&lo->lo_owner);
6819 return owner;
6820 }
6821
6822 static void
6823 nfsd4_lm_put_owner(fl_owner_t owner)
6824 {
6825 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6826
6827 if (lo)
6828 nfs4_put_stateowner(&lo->lo_owner);
6829 }
6830
6831
6832 static bool
6833 nfsd4_lm_lock_expirable(struct file_lock *cfl)
6834 {
6835 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)cfl->fl_owner;
6836 struct nfs4_client *clp = lo->lo_owner.so_client;
6837 struct nfsd_net *nn;
6838
6839 if (try_to_expire_client(clp)) {
6840 nn = net_generic(clp->net, nfsd_net_id);
6841 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
6842 return true;
6843 }
6844 return false;
6845 }
6846
6847
6848 static void
6849 nfsd4_lm_expire_lock(void)
6850 {
6851 flush_workqueue(laundry_wq);
6852 }
6853
6854 static void
6855 nfsd4_lm_notify(struct file_lock *fl)
6856 {
6857 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
6858 struct net *net = lo->lo_owner.so_client->net;
6859 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6860 struct nfsd4_blocked_lock *nbl = container_of(fl,
6861 struct nfsd4_blocked_lock, nbl_lock);
6862 bool queue = false;
6863
6864
6865 spin_lock(&nn->blocked_locks_lock);
6866 if (!list_empty(&nbl->nbl_list)) {
6867 list_del_init(&nbl->nbl_list);
6868 list_del_init(&nbl->nbl_lru);
6869 queue = true;
6870 }
6871 spin_unlock(&nn->blocked_locks_lock);
6872
6873 if (queue) {
6874 trace_nfsd_cb_notify_lock(lo, nbl);
6875 nfsd4_run_cb(&nbl->nbl_cb);
6876 }
6877 }
6878
6879 static const struct lock_manager_operations nfsd_posix_mng_ops = {
6880 .lm_mod_owner = THIS_MODULE,
6881 .lm_notify = nfsd4_lm_notify,
6882 .lm_get_owner = nfsd4_lm_get_owner,
6883 .lm_put_owner = nfsd4_lm_put_owner,
6884 .lm_lock_expirable = nfsd4_lm_lock_expirable,
6885 .lm_expire_lock = nfsd4_lm_expire_lock,
6886 };
6887
6888 static inline void
6889 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
6890 {
6891 struct nfs4_lockowner *lo;
6892
6893 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
6894 lo = (struct nfs4_lockowner *) fl->fl_owner;
6895 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
6896 GFP_KERNEL);
6897 if (!deny->ld_owner.data)
6898
6899 goto nevermind;
6900 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
6901 } else {
6902 nevermind:
6903 deny->ld_owner.len = 0;
6904 deny->ld_owner.data = NULL;
6905 deny->ld_clientid.cl_boot = 0;
6906 deny->ld_clientid.cl_id = 0;
6907 }
6908 deny->ld_start = fl->fl_start;
6909 deny->ld_length = NFS4_MAX_UINT64;
6910 if (fl->fl_end != NFS4_MAX_UINT64)
6911 deny->ld_length = fl->fl_end - fl->fl_start + 1;
6912 deny->ld_type = NFS4_READ_LT;
6913 if (fl->fl_type != F_RDLCK)
6914 deny->ld_type = NFS4_WRITE_LT;
6915 }
6916
6917 static struct nfs4_lockowner *
6918 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
6919 {
6920 unsigned int strhashval = ownerstr_hashval(owner);
6921 struct nfs4_stateowner *so;
6922
6923 lockdep_assert_held(&clp->cl_lock);
6924
6925 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
6926 so_strhash) {
6927 if (so->so_is_open_owner)
6928 continue;
6929 if (same_owner_str(so, owner))
6930 return lockowner(nfs4_get_stateowner(so));
6931 }
6932 return NULL;
6933 }
6934
6935 static struct nfs4_lockowner *
6936 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
6937 {
6938 struct nfs4_lockowner *lo;
6939
6940 spin_lock(&clp->cl_lock);
6941 lo = find_lockowner_str_locked(clp, owner);
6942 spin_unlock(&clp->cl_lock);
6943 return lo;
6944 }
6945
6946 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
6947 {
6948 unhash_lockowner_locked(lockowner(sop));
6949 }
6950
6951 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
6952 {
6953 struct nfs4_lockowner *lo = lockowner(sop);
6954
6955 kmem_cache_free(lockowner_slab, lo);
6956 }
6957
6958 static const struct nfs4_stateowner_operations lockowner_ops = {
6959 .so_unhash = nfs4_unhash_lockowner,
6960 .so_free = nfs4_free_lockowner,
6961 };
6962
6963
6964
6965
6966
6967
6968
6969
6970 static struct nfs4_lockowner *
6971 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
6972 struct nfs4_ol_stateid *open_stp,
6973 struct nfsd4_lock *lock)
6974 {
6975 struct nfs4_lockowner *lo, *ret;
6976
6977 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
6978 if (!lo)
6979 return NULL;
6980 INIT_LIST_HEAD(&lo->lo_blocked);
6981 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
6982 lo->lo_owner.so_is_open_owner = 0;
6983 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
6984 lo->lo_owner.so_ops = &lockowner_ops;
6985 spin_lock(&clp->cl_lock);
6986 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
6987 if (ret == NULL) {
6988 list_add(&lo->lo_owner.so_strhash,
6989 &clp->cl_ownerstr_hashtbl[strhashval]);
6990 ret = lo;
6991 } else
6992 nfs4_free_stateowner(&lo->lo_owner);
6993
6994 spin_unlock(&clp->cl_lock);
6995 return ret;
6996 }
6997
6998 static struct nfs4_ol_stateid *
6999 find_lock_stateid(const struct nfs4_lockowner *lo,
7000 const struct nfs4_ol_stateid *ost)
7001 {
7002 struct nfs4_ol_stateid *lst;
7003
7004 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
7005
7006
7007 if (!nfs4_ol_stateid_unhashed(ost))
7008 list_for_each_entry(lst, &ost->st_locks, st_locks) {
7009 if (lst->st_stateowner == &lo->lo_owner) {
7010 refcount_inc(&lst->st_stid.sc_count);
7011 return lst;
7012 }
7013 }
7014 return NULL;
7015 }
7016
7017 static struct nfs4_ol_stateid *
7018 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
7019 struct nfs4_file *fp, struct inode *inode,
7020 struct nfs4_ol_stateid *open_stp)
7021 {
7022 struct nfs4_client *clp = lo->lo_owner.so_client;
7023 struct nfs4_ol_stateid *retstp;
7024
7025 mutex_init(&stp->st_mutex);
7026 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
7027 retry:
7028 spin_lock(&clp->cl_lock);
7029 if (nfs4_ol_stateid_unhashed(open_stp))
7030 goto out_close;
7031 retstp = find_lock_stateid(lo, open_stp);
7032 if (retstp)
7033 goto out_found;
7034 refcount_inc(&stp->st_stid.sc_count);
7035 stp->st_stid.sc_type = NFS4_LOCK_STID;
7036 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
7037 get_nfs4_file(fp);
7038 stp->st_stid.sc_file = fp;
7039 stp->st_access_bmap = 0;
7040 stp->st_deny_bmap = open_stp->st_deny_bmap;
7041 stp->st_openstp = open_stp;
7042 spin_lock(&fp->fi_lock);
7043 list_add(&stp->st_locks, &open_stp->st_locks);
7044 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
7045 list_add(&stp->st_perfile, &fp->fi_stateids);
7046 spin_unlock(&fp->fi_lock);
7047 spin_unlock(&clp->cl_lock);
7048 return stp;
7049 out_found:
7050 spin_unlock(&clp->cl_lock);
7051 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
7052 nfs4_put_stid(&retstp->st_stid);
7053 goto retry;
7054 }
7055
7056 mutex_unlock(&stp->st_mutex);
7057 return retstp;
7058 out_close:
7059 spin_unlock(&clp->cl_lock);
7060 mutex_unlock(&stp->st_mutex);
7061 return NULL;
7062 }
7063
7064 static struct nfs4_ol_stateid *
7065 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
7066 struct inode *inode, struct nfs4_ol_stateid *ost,
7067 bool *new)
7068 {
7069 struct nfs4_stid *ns = NULL;
7070 struct nfs4_ol_stateid *lst;
7071 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7072 struct nfs4_client *clp = oo->oo_owner.so_client;
7073
7074 *new = false;
7075 spin_lock(&clp->cl_lock);
7076 lst = find_lock_stateid(lo, ost);
7077 spin_unlock(&clp->cl_lock);
7078 if (lst != NULL) {
7079 if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
7080 goto out;
7081 nfs4_put_stid(&lst->st_stid);
7082 }
7083 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
7084 if (ns == NULL)
7085 return NULL;
7086
7087 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
7088 if (lst == openlockstateid(ns))
7089 *new = true;
7090 else
7091 nfs4_put_stid(ns);
7092 out:
7093 return lst;
7094 }
7095
7096 static int
7097 check_lock_length(u64 offset, u64 length)
7098 {
7099 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
7100 (length > ~offset)));
7101 }
7102
7103 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
7104 {
7105 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
7106
7107 lockdep_assert_held(&fp->fi_lock);
7108
7109 if (test_access(access, lock_stp))
7110 return;
7111 __nfs4_file_get_access(fp, access);
7112 set_access(access, lock_stp);
7113 }
7114
7115 static __be32
7116 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
7117 struct nfs4_ol_stateid *ost,
7118 struct nfsd4_lock *lock,
7119 struct nfs4_ol_stateid **plst, bool *new)
7120 {
7121 __be32 status;
7122 struct nfs4_file *fi = ost->st_stid.sc_file;
7123 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7124 struct nfs4_client *cl = oo->oo_owner.so_client;
7125 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
7126 struct nfs4_lockowner *lo;
7127 struct nfs4_ol_stateid *lst;
7128 unsigned int strhashval;
7129
7130 lo = find_lockowner_str(cl, &lock->lk_new_owner);
7131 if (!lo) {
7132 strhashval = ownerstr_hashval(&lock->lk_new_owner);
7133 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
7134 if (lo == NULL)
7135 return nfserr_jukebox;
7136 } else {
7137
7138 status = nfserr_bad_seqid;
7139 if (!cstate->minorversion &&
7140 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
7141 goto out;
7142 }
7143
7144 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
7145 if (lst == NULL) {
7146 status = nfserr_jukebox;
7147 goto out;
7148 }
7149
7150 status = nfs_ok;
7151 *plst = lst;
7152 out:
7153 nfs4_put_stateowner(&lo->lo_owner);
7154 return status;
7155 }
7156
7157
7158
7159
7160 __be32
7161 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7162 union nfsd4_op_u *u)
7163 {
7164 struct nfsd4_lock *lock = &u->lock;
7165 struct nfs4_openowner *open_sop = NULL;
7166 struct nfs4_lockowner *lock_sop = NULL;
7167 struct nfs4_ol_stateid *lock_stp = NULL;
7168 struct nfs4_ol_stateid *open_stp = NULL;
7169 struct nfs4_file *fp;
7170 struct nfsd_file *nf = NULL;
7171 struct nfsd4_blocked_lock *nbl = NULL;
7172 struct file_lock *file_lock = NULL;
7173 struct file_lock *conflock = NULL;
7174 __be32 status = 0;
7175 int lkflg;
7176 int err;
7177 bool new = false;
7178 unsigned char fl_type;
7179 unsigned int fl_flags = FL_POSIX;
7180 struct net *net = SVC_NET(rqstp);
7181 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7182
7183 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
7184 (long long) lock->lk_offset,
7185 (long long) lock->lk_length);
7186
7187 if (check_lock_length(lock->lk_offset, lock->lk_length))
7188 return nfserr_inval;
7189
7190 if ((status = fh_verify(rqstp, &cstate->current_fh,
7191 S_IFREG, NFSD_MAY_LOCK))) {
7192 dprintk("NFSD: nfsd4_lock: permission denied!\n");
7193 return status;
7194 }
7195
7196 if (lock->lk_is_new) {
7197 if (nfsd4_has_session(cstate))
7198
7199 memcpy(&lock->lk_new_clientid,
7200 &cstate->clp->cl_clientid,
7201 sizeof(clientid_t));
7202
7203
7204 status = nfs4_preprocess_confirmed_seqid_op(cstate,
7205 lock->lk_new_open_seqid,
7206 &lock->lk_new_open_stateid,
7207 &open_stp, nn);
7208 if (status)
7209 goto out;
7210 mutex_unlock(&open_stp->st_mutex);
7211 open_sop = openowner(open_stp->st_stateowner);
7212 status = nfserr_bad_stateid;
7213 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
7214 &lock->lk_new_clientid))
7215 goto out;
7216 status = lookup_or_create_lock_state(cstate, open_stp, lock,
7217 &lock_stp, &new);
7218 } else {
7219 status = nfs4_preprocess_seqid_op(cstate,
7220 lock->lk_old_lock_seqid,
7221 &lock->lk_old_lock_stateid,
7222 NFS4_LOCK_STID, &lock_stp, nn);
7223 }
7224 if (status)
7225 goto out;
7226 lock_sop = lockowner(lock_stp->st_stateowner);
7227
7228 lkflg = setlkflg(lock->lk_type);
7229 status = nfs4_check_openmode(lock_stp, lkflg);
7230 if (status)
7231 goto out;
7232
7233 status = nfserr_grace;
7234 if (locks_in_grace(net) && !lock->lk_reclaim)
7235 goto out;
7236 status = nfserr_no_grace;
7237 if (!locks_in_grace(net) && lock->lk_reclaim)
7238 goto out;
7239
7240 if (lock->lk_reclaim)
7241 fl_flags |= FL_RECLAIM;
7242
7243 fp = lock_stp->st_stid.sc_file;
7244 switch (lock->lk_type) {
7245 case NFS4_READW_LT:
7246 if (nfsd4_has_session(cstate))
7247 fl_flags |= FL_SLEEP;
7248 fallthrough;
7249 case NFS4_READ_LT:
7250 spin_lock(&fp->fi_lock);
7251 nf = find_readable_file_locked(fp);
7252 if (nf)
7253 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
7254 spin_unlock(&fp->fi_lock);
7255 fl_type = F_RDLCK;
7256 break;
7257 case NFS4_WRITEW_LT:
7258 if (nfsd4_has_session(cstate))
7259 fl_flags |= FL_SLEEP;
7260 fallthrough;
7261 case NFS4_WRITE_LT:
7262 spin_lock(&fp->fi_lock);
7263 nf = find_writeable_file_locked(fp);
7264 if (nf)
7265 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
7266 spin_unlock(&fp->fi_lock);
7267 fl_type = F_WRLCK;
7268 break;
7269 default:
7270 status = nfserr_inval;
7271 goto out;
7272 }
7273
7274 if (!nf) {
7275 status = nfserr_openmode;
7276 goto out;
7277 }
7278
7279
7280
7281
7282
7283
7284
7285
7286 if (nf->nf_file->f_op->lock)
7287 fl_flags &= ~FL_SLEEP;
7288
7289 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
7290 if (!nbl) {
7291 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
7292 status = nfserr_jukebox;
7293 goto out;
7294 }
7295
7296 file_lock = &nbl->nbl_lock;
7297 file_lock->fl_type = fl_type;
7298 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
7299 file_lock->fl_pid = current->tgid;
7300 file_lock->fl_file = nf->nf_file;
7301 file_lock->fl_flags = fl_flags;
7302 file_lock->fl_lmops = &nfsd_posix_mng_ops;
7303 file_lock->fl_start = lock->lk_offset;
7304 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
7305 nfs4_transform_lock_offset(file_lock);
7306
7307 conflock = locks_alloc_lock();
7308 if (!conflock) {
7309 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7310 status = nfserr_jukebox;
7311 goto out;
7312 }
7313
7314 if (fl_flags & FL_SLEEP) {
7315 nbl->nbl_time = ktime_get_boottime_seconds();
7316 spin_lock(&nn->blocked_locks_lock);
7317 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
7318 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
7319 kref_get(&nbl->nbl_kref);
7320 spin_unlock(&nn->blocked_locks_lock);
7321 }
7322
7323 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
7324 switch (err) {
7325 case 0:
7326 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
7327 status = 0;
7328 if (lock->lk_reclaim)
7329 nn->somebody_reclaimed = true;
7330 break;
7331 case FILE_LOCK_DEFERRED:
7332 kref_put(&nbl->nbl_kref, free_nbl);
7333 nbl = NULL;
7334 fallthrough;
7335 case -EAGAIN:
7336 status = nfserr_denied;
7337 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
7338 nfs4_set_lock_denied(conflock, &lock->lk_denied);
7339 break;
7340 case -EDEADLK:
7341 status = nfserr_deadlock;
7342 break;
7343 default:
7344 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
7345 status = nfserrno(err);
7346 break;
7347 }
7348 out:
7349 if (nbl) {
7350
7351 if (fl_flags & FL_SLEEP) {
7352 spin_lock(&nn->blocked_locks_lock);
7353 if (!list_empty(&nbl->nbl_list) &&
7354 !list_empty(&nbl->nbl_lru)) {
7355 list_del_init(&nbl->nbl_list);
7356 list_del_init(&nbl->nbl_lru);
7357 kref_put(&nbl->nbl_kref, free_nbl);
7358 }
7359
7360 spin_unlock(&nn->blocked_locks_lock);
7361 }
7362 free_blocked_lock(nbl);
7363 }
7364 if (nf)
7365 nfsd_file_put(nf);
7366 if (lock_stp) {
7367
7368 if (cstate->replay_owner &&
7369 cstate->replay_owner != &lock_sop->lo_owner &&
7370 seqid_mutating_err(ntohl(status)))
7371 lock_sop->lo_owner.so_seqid++;
7372
7373
7374
7375
7376
7377 if (status && new)
7378 release_lock_stateid(lock_stp);
7379
7380 mutex_unlock(&lock_stp->st_mutex);
7381
7382 nfs4_put_stid(&lock_stp->st_stid);
7383 }
7384 if (open_stp)
7385 nfs4_put_stid(&open_stp->st_stid);
7386 nfsd4_bump_seqid(cstate, status);
7387 if (conflock)
7388 locks_free_lock(conflock);
7389 return status;
7390 }
7391
7392
7393
7394
7395
7396
7397 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
7398 {
7399 struct nfsd_file *nf;
7400 struct inode *inode;
7401 __be32 err;
7402
7403 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
7404 if (err)
7405 return err;
7406 inode = fhp->fh_dentry->d_inode;
7407 inode_lock(inode);
7408 err = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
7409 if (err)
7410 goto out;
7411 lock->fl_file = nf->nf_file;
7412 err = nfserrno(vfs_test_lock(nf->nf_file, lock));
7413 lock->fl_file = NULL;
7414 out:
7415 inode_unlock(inode);
7416 nfsd_file_put(nf);
7417 return err;
7418 }
7419
7420
7421
7422
7423 __be32
7424 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7425 union nfsd4_op_u *u)
7426 {
7427 struct nfsd4_lockt *lockt = &u->lockt;
7428 struct file_lock *file_lock = NULL;
7429 struct nfs4_lockowner *lo = NULL;
7430 __be32 status;
7431 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7432
7433 if (locks_in_grace(SVC_NET(rqstp)))
7434 return nfserr_grace;
7435
7436 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
7437 return nfserr_inval;
7438
7439 if (!nfsd4_has_session(cstate)) {
7440 status = set_client(&lockt->lt_clientid, cstate, nn);
7441 if (status)
7442 goto out;
7443 }
7444
7445 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
7446 goto out;
7447
7448 file_lock = locks_alloc_lock();
7449 if (!file_lock) {
7450 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7451 status = nfserr_jukebox;
7452 goto out;
7453 }
7454
7455 switch (lockt->lt_type) {
7456 case NFS4_READ_LT:
7457 case NFS4_READW_LT:
7458 file_lock->fl_type = F_RDLCK;
7459 break;
7460 case NFS4_WRITE_LT:
7461 case NFS4_WRITEW_LT:
7462 file_lock->fl_type = F_WRLCK;
7463 break;
7464 default:
7465 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
7466 status = nfserr_inval;
7467 goto out;
7468 }
7469
7470 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
7471 if (lo)
7472 file_lock->fl_owner = (fl_owner_t)lo;
7473 file_lock->fl_pid = current->tgid;
7474 file_lock->fl_flags = FL_POSIX;
7475
7476 file_lock->fl_start = lockt->lt_offset;
7477 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
7478
7479 nfs4_transform_lock_offset(file_lock);
7480
7481 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
7482 if (status)
7483 goto out;
7484
7485 if (file_lock->fl_type != F_UNLCK) {
7486 status = nfserr_denied;
7487 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
7488 }
7489 out:
7490 if (lo)
7491 nfs4_put_stateowner(&lo->lo_owner);
7492 if (file_lock)
7493 locks_free_lock(file_lock);
7494 return status;
7495 }
7496
7497 __be32
7498 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7499 union nfsd4_op_u *u)
7500 {
7501 struct nfsd4_locku *locku = &u->locku;
7502 struct nfs4_ol_stateid *stp;
7503 struct nfsd_file *nf = NULL;
7504 struct file_lock *file_lock = NULL;
7505 __be32 status;
7506 int err;
7507 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7508
7509 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
7510 (long long) locku->lu_offset,
7511 (long long) locku->lu_length);
7512
7513 if (check_lock_length(locku->lu_offset, locku->lu_length))
7514 return nfserr_inval;
7515
7516 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
7517 &locku->lu_stateid, NFS4_LOCK_STID,
7518 &stp, nn);
7519 if (status)
7520 goto out;
7521 nf = find_any_file(stp->st_stid.sc_file);
7522 if (!nf) {
7523 status = nfserr_lock_range;
7524 goto put_stateid;
7525 }
7526 file_lock = locks_alloc_lock();
7527 if (!file_lock) {
7528 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7529 status = nfserr_jukebox;
7530 goto put_file;
7531 }
7532
7533 file_lock->fl_type = F_UNLCK;
7534 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
7535 file_lock->fl_pid = current->tgid;
7536 file_lock->fl_file = nf->nf_file;
7537 file_lock->fl_flags = FL_POSIX;
7538 file_lock->fl_lmops = &nfsd_posix_mng_ops;
7539 file_lock->fl_start = locku->lu_offset;
7540
7541 file_lock->fl_end = last_byte_offset(locku->lu_offset,
7542 locku->lu_length);
7543 nfs4_transform_lock_offset(file_lock);
7544
7545 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
7546 if (err) {
7547 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
7548 goto out_nfserr;
7549 }
7550 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
7551 put_file:
7552 nfsd_file_put(nf);
7553 put_stateid:
7554 mutex_unlock(&stp->st_mutex);
7555 nfs4_put_stid(&stp->st_stid);
7556 out:
7557 nfsd4_bump_seqid(cstate, status);
7558 if (file_lock)
7559 locks_free_lock(file_lock);
7560 return status;
7561
7562 out_nfserr:
7563 status = nfserrno(err);
7564 goto put_file;
7565 }
7566
7567
7568
7569
7570
7571
7572 static bool
7573 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
7574 {
7575 struct file_lock *fl;
7576 int status = false;
7577 struct nfsd_file *nf = find_any_file(fp);
7578 struct inode *inode;
7579 struct file_lock_context *flctx;
7580
7581 if (!nf) {
7582
7583 WARN_ON_ONCE(1);
7584 return status;
7585 }
7586
7587 inode = locks_inode(nf->nf_file);
7588 flctx = inode->i_flctx;
7589
7590 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
7591 spin_lock(&flctx->flc_lock);
7592 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
7593 if (fl->fl_owner == (fl_owner_t)lowner) {
7594 status = true;
7595 break;
7596 }
7597 }
7598 spin_unlock(&flctx->flc_lock);
7599 }
7600 nfsd_file_put(nf);
7601 return status;
7602 }
7603
7604
7605
7606
7607
7608
7609
7610
7611
7612
7613
7614
7615
7616
7617
7618
7619
7620
7621 __be32
7622 nfsd4_release_lockowner(struct svc_rqst *rqstp,
7623 struct nfsd4_compound_state *cstate,
7624 union nfsd4_op_u *u)
7625 {
7626 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
7627 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7628 clientid_t *clid = &rlockowner->rl_clientid;
7629 struct nfs4_ol_stateid *stp;
7630 struct nfs4_lockowner *lo;
7631 struct nfs4_client *clp;
7632 LIST_HEAD(reaplist);
7633 __be32 status;
7634
7635 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
7636 clid->cl_boot, clid->cl_id);
7637
7638 status = set_client(clid, cstate, nn);
7639 if (status)
7640 return status;
7641 clp = cstate->clp;
7642
7643 spin_lock(&clp->cl_lock);
7644 lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner);
7645 if (!lo) {
7646 spin_unlock(&clp->cl_lock);
7647 return nfs_ok;
7648 }
7649 if (atomic_read(&lo->lo_owner.so_count) != 2) {
7650 spin_unlock(&clp->cl_lock);
7651 nfs4_put_stateowner(&lo->lo_owner);
7652 return nfserr_locks_held;
7653 }
7654 unhash_lockowner_locked(lo);
7655 while (!list_empty(&lo->lo_owner.so_stateids)) {
7656 stp = list_first_entry(&lo->lo_owner.so_stateids,
7657 struct nfs4_ol_stateid,
7658 st_perstateowner);
7659 WARN_ON(!unhash_lock_stateid(stp));
7660 put_ol_stateid_locked(stp, &reaplist);
7661 }
7662 spin_unlock(&clp->cl_lock);
7663
7664 free_ol_stateid_reaplist(&reaplist);
7665 remove_blocked_locks(lo);
7666 nfs4_put_stateowner(&lo->lo_owner);
7667 return nfs_ok;
7668 }
7669
7670 static inline struct nfs4_client_reclaim *
7671 alloc_reclaim(void)
7672 {
7673 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
7674 }
7675
7676 bool
7677 nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
7678 {
7679 struct nfs4_client_reclaim *crp;
7680
7681 crp = nfsd4_find_reclaim_client(name, nn);
7682 return (crp && crp->cr_clp);
7683 }
7684
7685
7686
7687
7688
7689
7690
7691 struct nfs4_client_reclaim *
7692 nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
7693 struct nfsd_net *nn)
7694 {
7695 unsigned int strhashval;
7696 struct nfs4_client_reclaim *crp;
7697
7698 crp = alloc_reclaim();
7699 if (crp) {
7700 strhashval = clientstr_hashval(name);
7701 INIT_LIST_HEAD(&crp->cr_strhash);
7702 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
7703 crp->cr_name.data = name.data;
7704 crp->cr_name.len = name.len;
7705 crp->cr_princhash.data = princhash.data;
7706 crp->cr_princhash.len = princhash.len;
7707 crp->cr_clp = NULL;
7708 nn->reclaim_str_hashtbl_size++;
7709 }
7710 return crp;
7711 }
7712
7713 void
7714 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
7715 {
7716 list_del(&crp->cr_strhash);
7717 kfree(crp->cr_name.data);
7718 kfree(crp->cr_princhash.data);
7719 kfree(crp);
7720 nn->reclaim_str_hashtbl_size--;
7721 }
7722
7723 void
7724 nfs4_release_reclaim(struct nfsd_net *nn)
7725 {
7726 struct nfs4_client_reclaim *crp = NULL;
7727 int i;
7728
7729 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7730 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
7731 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
7732 struct nfs4_client_reclaim, cr_strhash);
7733 nfs4_remove_reclaim_record(crp, nn);
7734 }
7735 }
7736 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
7737 }
7738
7739
7740
7741 struct nfs4_client_reclaim *
7742 nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
7743 {
7744 unsigned int strhashval;
7745 struct nfs4_client_reclaim *crp = NULL;
7746
7747 strhashval = clientstr_hashval(name);
7748 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
7749 if (compare_blob(&crp->cr_name, &name) == 0) {
7750 return crp;
7751 }
7752 }
7753 return NULL;
7754 }
7755
7756 __be32
7757 nfs4_check_open_reclaim(struct nfs4_client *clp)
7758 {
7759 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
7760 return nfserr_no_grace;
7761
7762 if (nfsd4_client_record_check(clp))
7763 return nfserr_reclaim_bad;
7764
7765 return nfs_ok;
7766 }
7767
7768
7769
7770
7771
7772
7773
7774
7775
7776
7777 static void
7778 set_max_delegations(void)
7779 {
7780
7781
7782
7783
7784
7785
7786 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7787 }
7788
7789 static int nfs4_state_create_net(struct net *net)
7790 {
7791 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7792 int i;
7793
7794 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7795 sizeof(struct list_head),
7796 GFP_KERNEL);
7797 if (!nn->conf_id_hashtbl)
7798 goto err;
7799 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7800 sizeof(struct list_head),
7801 GFP_KERNEL);
7802 if (!nn->unconf_id_hashtbl)
7803 goto err_unconf_id;
7804 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
7805 sizeof(struct list_head),
7806 GFP_KERNEL);
7807 if (!nn->sessionid_hashtbl)
7808 goto err_sessionid;
7809
7810 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7811 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
7812 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
7813 }
7814 for (i = 0; i < SESSION_HASH_SIZE; i++)
7815 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
7816 nn->conf_name_tree = RB_ROOT;
7817 nn->unconf_name_tree = RB_ROOT;
7818 nn->boot_time = ktime_get_real_seconds();
7819 nn->grace_ended = false;
7820 nn->nfsd4_manager.block_opens = true;
7821 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
7822 INIT_LIST_HEAD(&nn->client_lru);
7823 INIT_LIST_HEAD(&nn->close_lru);
7824 INIT_LIST_HEAD(&nn->del_recall_lru);
7825 spin_lock_init(&nn->client_lock);
7826 spin_lock_init(&nn->s2s_cp_lock);
7827 idr_init(&nn->s2s_cp_stateids);
7828
7829 spin_lock_init(&nn->blocked_locks_lock);
7830 INIT_LIST_HEAD(&nn->blocked_locks_lru);
7831
7832 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
7833 get_net(net);
7834
7835 return 0;
7836
7837 err_sessionid:
7838 kfree(nn->unconf_id_hashtbl);
7839 err_unconf_id:
7840 kfree(nn->conf_id_hashtbl);
7841 err:
7842 return -ENOMEM;
7843 }
7844
7845 static void
7846 nfs4_state_destroy_net(struct net *net)
7847 {
7848 int i;
7849 struct nfs4_client *clp = NULL;
7850 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7851
7852 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7853 while (!list_empty(&nn->conf_id_hashtbl[i])) {
7854 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7855 destroy_client(clp);
7856 }
7857 }
7858
7859 WARN_ON(!list_empty(&nn->blocked_locks_lru));
7860
7861 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7862 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7863 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7864 destroy_client(clp);
7865 }
7866 }
7867
7868 kfree(nn->sessionid_hashtbl);
7869 kfree(nn->unconf_id_hashtbl);
7870 kfree(nn->conf_id_hashtbl);
7871 put_net(net);
7872 }
7873
7874 int
7875 nfs4_state_start_net(struct net *net)
7876 {
7877 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7878 int ret;
7879
7880 ret = nfs4_state_create_net(net);
7881 if (ret)
7882 return ret;
7883 locks_start_grace(net, &nn->nfsd4_manager);
7884 nfsd4_client_tracking_init(net);
7885 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
7886 goto skip_grace;
7887 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
7888 nn->nfsd4_grace, net->ns.inum);
7889 trace_nfsd_grace_start(nn);
7890 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
7891 return 0;
7892
7893 skip_grace:
7894 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
7895 net->ns.inum);
7896 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
7897 nfsd4_end_grace(nn);
7898 return 0;
7899 }
7900
7901
7902
7903 int
7904 nfs4_state_start(void)
7905 {
7906 int ret;
7907
7908 ret = nfsd4_create_callback_queue();
7909 if (ret)
7910 return ret;
7911
7912 set_max_delegations();
7913 return 0;
7914 }
7915
7916 void
7917 nfs4_state_shutdown_net(struct net *net)
7918 {
7919 struct nfs4_delegation *dp = NULL;
7920 struct list_head *pos, *next, reaplist;
7921 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7922
7923 cancel_delayed_work_sync(&nn->laundromat_work);
7924 locks_end_grace(&nn->nfsd4_manager);
7925
7926 INIT_LIST_HEAD(&reaplist);
7927 spin_lock(&state_lock);
7928 list_for_each_safe(pos, next, &nn->del_recall_lru) {
7929 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7930 WARN_ON(!unhash_delegation_locked(dp));
7931 list_add(&dp->dl_recall_lru, &reaplist);
7932 }
7933 spin_unlock(&state_lock);
7934 list_for_each_safe(pos, next, &reaplist) {
7935 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7936 list_del_init(&dp->dl_recall_lru);
7937 destroy_unhashed_deleg(dp);
7938 }
7939
7940 nfsd4_client_tracking_exit(net);
7941 nfs4_state_destroy_net(net);
7942 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
7943 nfsd4_ssc_shutdown_umount(nn);
7944 #endif
7945 }
7946
7947 void
7948 nfs4_state_shutdown(void)
7949 {
7950 nfsd4_destroy_callback_queue();
7951 }
7952
7953 static void
7954 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7955 {
7956 if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
7957 CURRENT_STATEID(stateid))
7958 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
7959 }
7960
7961 static void
7962 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7963 {
7964 if (cstate->minorversion) {
7965 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
7966 SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
7967 }
7968 }
7969
7970 void
7971 clear_current_stateid(struct nfsd4_compound_state *cstate)
7972 {
7973 CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
7974 }
7975
7976
7977
7978
7979 void
7980 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
7981 union nfsd4_op_u *u)
7982 {
7983 put_stateid(cstate, &u->open_downgrade.od_stateid);
7984 }
7985
7986 void
7987 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
7988 union nfsd4_op_u *u)
7989 {
7990 put_stateid(cstate, &u->open.op_stateid);
7991 }
7992
7993 void
7994 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
7995 union nfsd4_op_u *u)
7996 {
7997 put_stateid(cstate, &u->close.cl_stateid);
7998 }
7999
8000 void
8001 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
8002 union nfsd4_op_u *u)
8003 {
8004 put_stateid(cstate, &u->lock.lk_resp_stateid);
8005 }
8006
8007
8008
8009
8010
8011 void
8012 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
8013 union nfsd4_op_u *u)
8014 {
8015 get_stateid(cstate, &u->open_downgrade.od_stateid);
8016 }
8017
8018 void
8019 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
8020 union nfsd4_op_u *u)
8021 {
8022 get_stateid(cstate, &u->delegreturn.dr_stateid);
8023 }
8024
8025 void
8026 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
8027 union nfsd4_op_u *u)
8028 {
8029 get_stateid(cstate, &u->free_stateid.fr_stateid);
8030 }
8031
8032 void
8033 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
8034 union nfsd4_op_u *u)
8035 {
8036 get_stateid(cstate, &u->setattr.sa_stateid);
8037 }
8038
8039 void
8040 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
8041 union nfsd4_op_u *u)
8042 {
8043 get_stateid(cstate, &u->close.cl_stateid);
8044 }
8045
8046 void
8047 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
8048 union nfsd4_op_u *u)
8049 {
8050 get_stateid(cstate, &u->locku.lu_stateid);
8051 }
8052
8053 void
8054 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
8055 union nfsd4_op_u *u)
8056 {
8057 get_stateid(cstate, &u->read.rd_stateid);
8058 }
8059
8060 void
8061 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
8062 union nfsd4_op_u *u)
8063 {
8064 get_stateid(cstate, &u->write.wr_stateid);
8065 }