0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/nfs_fs.h>
0011 #include <linux/nfs_mount.h>
0012 #include <linux/nfs_page.h>
0013 #include <linux/module.h>
0014 #include <linux/sched/mm.h>
0015
0016 #include <linux/sunrpc/metrics.h>
0017
0018 #include "flexfilelayout.h"
0019 #include "../nfs4session.h"
0020 #include "../nfs4idmap.h"
0021 #include "../internal.h"
0022 #include "../delegation.h"
0023 #include "../nfs4trace.h"
0024 #include "../iostat.h"
0025 #include "../nfs.h"
0026 #include "../nfs42.h"
0027
0028 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
0029
0030 #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
0031 #define FF_LAYOUTRETURN_MAXERR 20
0032
0033 static unsigned short io_maxretrans;
0034
0035 static const struct pnfs_commit_ops ff_layout_commit_ops;
0036 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
0037 struct nfs_pgio_header *hdr);
0038 static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
0039 struct nfs42_layoutstat_devinfo *devinfo,
0040 int dev_limit);
0041 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
0042 const struct nfs42_layoutstat_devinfo *devinfo,
0043 struct nfs4_ff_layout_mirror *mirror);
0044
0045 static struct pnfs_layout_hdr *
0046 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
0047 {
0048 struct nfs4_flexfile_layout *ffl;
0049
0050 ffl = kzalloc(sizeof(*ffl), gfp_flags);
0051 if (ffl) {
0052 pnfs_init_ds_commit_info(&ffl->commit_info);
0053 INIT_LIST_HEAD(&ffl->error_list);
0054 INIT_LIST_HEAD(&ffl->mirrors);
0055 ffl->last_report_time = ktime_get();
0056 ffl->commit_info.ops = &ff_layout_commit_ops;
0057 return &ffl->generic_hdr;
0058 } else
0059 return NULL;
0060 }
0061
0062 static void
0063 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
0064 {
0065 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
0066 struct nfs4_ff_layout_ds_err *err, *n;
0067
0068 list_for_each_entry_safe(err, n, &ffl->error_list, list) {
0069 list_del(&err->list);
0070 kfree(err);
0071 }
0072 kfree_rcu(ffl, generic_hdr.plh_rcu);
0073 }
0074
0075 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
0076 {
0077 __be32 *p;
0078
0079 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
0080 if (unlikely(p == NULL))
0081 return -ENOBUFS;
0082 stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
0083 memcpy(stateid->data, p, NFS4_STATEID_SIZE);
0084 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
0085 p[0], p[1], p[2], p[3]);
0086 return 0;
0087 }
0088
0089 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
0090 {
0091 __be32 *p;
0092
0093 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
0094 if (unlikely(!p))
0095 return -ENOBUFS;
0096 memcpy(devid, p, NFS4_DEVICEID4_SIZE);
0097 nfs4_print_deviceid(devid);
0098 return 0;
0099 }
0100
0101 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
0102 {
0103 __be32 *p;
0104
0105 p = xdr_inline_decode(xdr, 4);
0106 if (unlikely(!p))
0107 return -ENOBUFS;
0108 fh->size = be32_to_cpup(p++);
0109 if (fh->size > NFS_MAXFHSIZE) {
0110 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
0111 fh->size);
0112 return -EOVERFLOW;
0113 }
0114
0115 p = xdr_inline_decode(xdr, fh->size);
0116 if (unlikely(!p))
0117 return -ENOBUFS;
0118 memcpy(&fh->data, p, fh->size);
0119 dprintk("%s: fh len %d\n", __func__, fh->size);
0120
0121 return 0;
0122 }
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132 static int
0133 decode_name(struct xdr_stream *xdr, u32 *id)
0134 {
0135 __be32 *p;
0136 int len;
0137
0138
0139 p = xdr_inline_decode(xdr, 4);
0140 if (unlikely(!p))
0141 return -ENOBUFS;
0142 len = be32_to_cpup(p++);
0143 if (len < 0)
0144 return -EINVAL;
0145
0146 dprintk("%s: len %u\n", __func__, len);
0147
0148
0149 p = xdr_inline_decode(xdr, len);
0150 if (unlikely(!p))
0151 return -ENOBUFS;
0152
0153 if (!nfs_map_string_to_numeric((char *)p, len, id))
0154 return -EINVAL;
0155
0156 return 0;
0157 }
0158
0159 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
0160 const struct nfs4_ff_layout_mirror *m2)
0161 {
0162 int i, j;
0163
0164 if (m1->fh_versions_cnt != m2->fh_versions_cnt)
0165 return false;
0166 for (i = 0; i < m1->fh_versions_cnt; i++) {
0167 bool found_fh = false;
0168 for (j = 0; j < m2->fh_versions_cnt; j++) {
0169 if (nfs_compare_fh(&m1->fh_versions[i],
0170 &m2->fh_versions[j]) == 0) {
0171 found_fh = true;
0172 break;
0173 }
0174 }
0175 if (!found_fh)
0176 return false;
0177 }
0178 return true;
0179 }
0180
0181 static struct nfs4_ff_layout_mirror *
0182 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
0183 struct nfs4_ff_layout_mirror *mirror)
0184 {
0185 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
0186 struct nfs4_ff_layout_mirror *pos;
0187 struct inode *inode = lo->plh_inode;
0188
0189 spin_lock(&inode->i_lock);
0190 list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
0191 if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
0192 continue;
0193 if (!ff_mirror_match_fh(mirror, pos))
0194 continue;
0195 if (refcount_inc_not_zero(&pos->ref)) {
0196 spin_unlock(&inode->i_lock);
0197 return pos;
0198 }
0199 }
0200 list_add(&mirror->mirrors, &ff_layout->mirrors);
0201 mirror->layout = lo;
0202 spin_unlock(&inode->i_lock);
0203 return mirror;
0204 }
0205
0206 static void
0207 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
0208 {
0209 struct inode *inode;
0210 if (mirror->layout == NULL)
0211 return;
0212 inode = mirror->layout->plh_inode;
0213 spin_lock(&inode->i_lock);
0214 list_del(&mirror->mirrors);
0215 spin_unlock(&inode->i_lock);
0216 mirror->layout = NULL;
0217 }
0218
0219 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
0220 {
0221 struct nfs4_ff_layout_mirror *mirror;
0222
0223 mirror = kzalloc(sizeof(*mirror), gfp_flags);
0224 if (mirror != NULL) {
0225 spin_lock_init(&mirror->lock);
0226 refcount_set(&mirror->ref, 1);
0227 INIT_LIST_HEAD(&mirror->mirrors);
0228 }
0229 return mirror;
0230 }
0231
0232 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
0233 {
0234 const struct cred *cred;
0235
0236 ff_layout_remove_mirror(mirror);
0237 kfree(mirror->fh_versions);
0238 cred = rcu_access_pointer(mirror->ro_cred);
0239 put_cred(cred);
0240 cred = rcu_access_pointer(mirror->rw_cred);
0241 put_cred(cred);
0242 nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
0243 kfree(mirror);
0244 }
0245
0246 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
0247 {
0248 if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
0249 ff_layout_free_mirror(mirror);
0250 }
0251
0252 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
0253 {
0254 u32 i;
0255
0256 for (i = 0; i < fls->mirror_array_cnt; i++)
0257 ff_layout_put_mirror(fls->mirror_array[i]);
0258 }
0259
0260 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
0261 {
0262 if (fls) {
0263 ff_layout_free_mirror_array(fls);
0264 kfree(fls);
0265 }
0266 }
0267
0268 static bool
0269 ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
0270 struct pnfs_layout_segment *l2)
0271 {
0272 const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
0273 const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
0274 u32 i;
0275
0276 if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
0277 return false;
0278 for (i = 0; i < fl1->mirror_array_cnt; i++) {
0279 if (fl1->mirror_array[i] != fl2->mirror_array[i])
0280 return false;
0281 }
0282 return true;
0283 }
0284
0285 static bool
0286 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
0287 const struct pnfs_layout_range *l2)
0288 {
0289 u64 end1, end2;
0290
0291 if (l1->iomode != l2->iomode)
0292 return l1->iomode != IOMODE_READ;
0293 end1 = pnfs_calc_offset_end(l1->offset, l1->length);
0294 end2 = pnfs_calc_offset_end(l2->offset, l2->length);
0295 if (end1 < l2->offset)
0296 return false;
0297 if (end2 < l1->offset)
0298 return true;
0299 return l2->offset <= l1->offset;
0300 }
0301
0302 static bool
0303 ff_lseg_merge(struct pnfs_layout_segment *new,
0304 struct pnfs_layout_segment *old)
0305 {
0306 u64 new_end, old_end;
0307
0308 if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
0309 return false;
0310 if (new->pls_range.iomode != old->pls_range.iomode)
0311 return false;
0312 old_end = pnfs_calc_offset_end(old->pls_range.offset,
0313 old->pls_range.length);
0314 if (old_end < new->pls_range.offset)
0315 return false;
0316 new_end = pnfs_calc_offset_end(new->pls_range.offset,
0317 new->pls_range.length);
0318 if (new_end < old->pls_range.offset)
0319 return false;
0320 if (!ff_lseg_match_mirrors(new, old))
0321 return false;
0322
0323
0324 if (new_end < old_end)
0325 new_end = old_end;
0326 if (new->pls_range.offset < old->pls_range.offset)
0327 new->pls_range.offset = old->pls_range.offset;
0328 new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
0329 new_end);
0330 if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
0331 set_bit(NFS_LSEG_ROC, &new->pls_flags);
0332 return true;
0333 }
0334
0335 static void
0336 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
0337 struct pnfs_layout_segment *lseg,
0338 struct list_head *free_me)
0339 {
0340 pnfs_generic_layout_insert_lseg(lo, lseg,
0341 ff_lseg_range_is_after,
0342 ff_lseg_merge,
0343 free_me);
0344 }
0345
0346 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
0347 {
0348 int i, j;
0349
0350 for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
0351 for (j = i + 1; j < fls->mirror_array_cnt; j++)
0352 if (fls->mirror_array[i]->efficiency <
0353 fls->mirror_array[j]->efficiency)
0354 swap(fls->mirror_array[i],
0355 fls->mirror_array[j]);
0356 }
0357 }
0358
0359 static struct pnfs_layout_segment *
0360 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
0361 struct nfs4_layoutget_res *lgr,
0362 gfp_t gfp_flags)
0363 {
0364 struct pnfs_layout_segment *ret;
0365 struct nfs4_ff_layout_segment *fls = NULL;
0366 struct xdr_stream stream;
0367 struct xdr_buf buf;
0368 struct page *scratch;
0369 u64 stripe_unit;
0370 u32 mirror_array_cnt;
0371 __be32 *p;
0372 int i, rc;
0373
0374 dprintk("--> %s\n", __func__);
0375 scratch = alloc_page(gfp_flags);
0376 if (!scratch)
0377 return ERR_PTR(-ENOMEM);
0378
0379 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
0380 lgr->layoutp->len);
0381 xdr_set_scratch_page(&stream, scratch);
0382
0383
0384 rc = -EIO;
0385 p = xdr_inline_decode(&stream, 8 + 4);
0386 if (!p)
0387 goto out_err_free;
0388
0389 p = xdr_decode_hyper(p, &stripe_unit);
0390 mirror_array_cnt = be32_to_cpup(p++);
0391 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
0392 stripe_unit, mirror_array_cnt);
0393
0394 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
0395 mirror_array_cnt == 0)
0396 goto out_err_free;
0397
0398 rc = -ENOMEM;
0399 fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
0400 gfp_flags);
0401 if (!fls)
0402 goto out_err_free;
0403
0404 fls->mirror_array_cnt = mirror_array_cnt;
0405 fls->stripe_unit = stripe_unit;
0406
0407 for (i = 0; i < fls->mirror_array_cnt; i++) {
0408 struct nfs4_ff_layout_mirror *mirror;
0409 struct cred *kcred;
0410 const struct cred __rcu *cred;
0411 kuid_t uid;
0412 kgid_t gid;
0413 u32 ds_count, fh_count, id;
0414 int j;
0415
0416 rc = -EIO;
0417 p = xdr_inline_decode(&stream, 4);
0418 if (!p)
0419 goto out_err_free;
0420 ds_count = be32_to_cpup(p);
0421
0422
0423 if (ds_count != 1)
0424 goto out_err_free;
0425
0426 fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
0427 if (fls->mirror_array[i] == NULL) {
0428 rc = -ENOMEM;
0429 goto out_err_free;
0430 }
0431
0432 fls->mirror_array[i]->ds_count = ds_count;
0433
0434
0435 rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
0436 if (rc)
0437 goto out_err_free;
0438
0439
0440 rc = -EIO;
0441 p = xdr_inline_decode(&stream, 4);
0442 if (!p)
0443 goto out_err_free;
0444 fls->mirror_array[i]->efficiency = be32_to_cpup(p);
0445
0446
0447 rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
0448 if (rc)
0449 goto out_err_free;
0450
0451
0452 rc = -EIO;
0453 p = xdr_inline_decode(&stream, 4);
0454 if (!p)
0455 goto out_err_free;
0456 fh_count = be32_to_cpup(p);
0457
0458 fls->mirror_array[i]->fh_versions =
0459 kcalloc(fh_count, sizeof(struct nfs_fh),
0460 gfp_flags);
0461 if (fls->mirror_array[i]->fh_versions == NULL) {
0462 rc = -ENOMEM;
0463 goto out_err_free;
0464 }
0465
0466 for (j = 0; j < fh_count; j++) {
0467 rc = decode_nfs_fh(&stream,
0468 &fls->mirror_array[i]->fh_versions[j]);
0469 if (rc)
0470 goto out_err_free;
0471 }
0472
0473 fls->mirror_array[i]->fh_versions_cnt = fh_count;
0474
0475
0476 rc = decode_name(&stream, &id);
0477 if (rc)
0478 goto out_err_free;
0479
0480 uid = make_kuid(&init_user_ns, id);
0481
0482
0483 rc = decode_name(&stream, &id);
0484 if (rc)
0485 goto out_err_free;
0486
0487 gid = make_kgid(&init_user_ns, id);
0488
0489 if (gfp_flags & __GFP_FS)
0490 kcred = prepare_kernel_cred(NULL);
0491 else {
0492 unsigned int nofs_flags = memalloc_nofs_save();
0493 kcred = prepare_kernel_cred(NULL);
0494 memalloc_nofs_restore(nofs_flags);
0495 }
0496 rc = -ENOMEM;
0497 if (!kcred)
0498 goto out_err_free;
0499 kcred->fsuid = uid;
0500 kcred->fsgid = gid;
0501 cred = RCU_INITIALIZER(kcred);
0502
0503 if (lgr->range.iomode == IOMODE_READ)
0504 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
0505 else
0506 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
0507
0508 mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
0509 if (mirror != fls->mirror_array[i]) {
0510
0511 if (lgr->range.iomode == IOMODE_READ) {
0512 cred = xchg(&mirror->ro_cred, cred);
0513 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
0514 } else {
0515 cred = xchg(&mirror->rw_cred, cred);
0516 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
0517 }
0518 ff_layout_free_mirror(fls->mirror_array[i]);
0519 fls->mirror_array[i] = mirror;
0520 }
0521
0522 dprintk("%s: iomode %s uid %u gid %u\n", __func__,
0523 lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
0524 from_kuid(&init_user_ns, uid),
0525 from_kgid(&init_user_ns, gid));
0526 }
0527
0528 p = xdr_inline_decode(&stream, 4);
0529 if (!p)
0530 goto out_sort_mirrors;
0531 fls->flags = be32_to_cpup(p);
0532
0533 p = xdr_inline_decode(&stream, 4);
0534 if (!p)
0535 goto out_sort_mirrors;
0536 for (i=0; i < fls->mirror_array_cnt; i++)
0537 fls->mirror_array[i]->report_interval = be32_to_cpup(p);
0538
0539 out_sort_mirrors:
0540 ff_layout_sort_mirrors(fls);
0541 ret = &fls->generic_hdr;
0542 dprintk("<-- %s (success)\n", __func__);
0543 out_free_page:
0544 __free_page(scratch);
0545 return ret;
0546 out_err_free:
0547 _ff_layout_free_lseg(fls);
0548 ret = ERR_PTR(rc);
0549 dprintk("<-- %s (%d)\n", __func__, rc);
0550 goto out_free_page;
0551 }
0552
0553 static void
0554 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
0555 {
0556 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
0557
0558 dprintk("--> %s\n", __func__);
0559
0560 if (lseg->pls_range.iomode == IOMODE_RW) {
0561 struct nfs4_flexfile_layout *ffl;
0562 struct inode *inode;
0563
0564 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
0565 inode = ffl->generic_hdr.plh_inode;
0566 spin_lock(&inode->i_lock);
0567 pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
0568 spin_unlock(&inode->i_lock);
0569 }
0570 _ff_layout_free_lseg(fls);
0571 }
0572
0573 static void
0574 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
0575 {
0576
0577 if (atomic_inc_return(&timer->n_ops) == 1) {
0578 timer->start_time = now;
0579 }
0580 }
0581
0582 static ktime_t
0583 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
0584 {
0585 ktime_t start;
0586
0587 if (atomic_dec_return(&timer->n_ops) < 0)
0588 WARN_ON_ONCE(1);
0589
0590 start = timer->start_time;
0591 timer->start_time = now;
0592 return ktime_sub(now, start);
0593 }
0594
0595 static bool
0596 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
0597 struct nfs4_ff_layoutstat *layoutstat,
0598 ktime_t now)
0599 {
0600 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
0601 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
0602
0603 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
0604 if (!mirror->start_time)
0605 mirror->start_time = now;
0606 if (mirror->report_interval != 0)
0607 report_interval = (s64)mirror->report_interval * 1000LL;
0608 else if (layoutstats_timer != 0)
0609 report_interval = (s64)layoutstats_timer * 1000LL;
0610 if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
0611 report_interval) {
0612 ffl->last_report_time = now;
0613 return true;
0614 }
0615
0616 return false;
0617 }
0618
0619 static void
0620 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
0621 __u64 requested)
0622 {
0623 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
0624
0625 iostat->ops_requested++;
0626 iostat->bytes_requested += requested;
0627 }
0628
0629 static void
0630 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
0631 __u64 requested,
0632 __u64 completed,
0633 ktime_t time_completed,
0634 ktime_t time_started)
0635 {
0636 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
0637 ktime_t completion_time = ktime_sub(time_completed, time_started);
0638 ktime_t timer;
0639
0640 iostat->ops_completed++;
0641 iostat->bytes_completed += completed;
0642 iostat->bytes_not_delivered += requested - completed;
0643
0644 timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
0645 iostat->total_busy_time =
0646 ktime_add(iostat->total_busy_time, timer);
0647 iostat->aggregate_completion_time =
0648 ktime_add(iostat->aggregate_completion_time,
0649 completion_time);
0650 }
0651
0652 static void
0653 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
0654 struct nfs4_ff_layout_mirror *mirror,
0655 __u64 requested, ktime_t now)
0656 {
0657 bool report;
0658
0659 spin_lock(&mirror->lock);
0660 report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
0661 nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
0662 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
0663 spin_unlock(&mirror->lock);
0664
0665 if (report)
0666 pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
0667 }
0668
0669 static void
0670 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
0671 struct nfs4_ff_layout_mirror *mirror,
0672 __u64 requested,
0673 __u64 completed)
0674 {
0675 spin_lock(&mirror->lock);
0676 nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
0677 requested, completed,
0678 ktime_get(), task->tk_start);
0679 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
0680 spin_unlock(&mirror->lock);
0681 }
0682
0683 static void
0684 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
0685 struct nfs4_ff_layout_mirror *mirror,
0686 __u64 requested, ktime_t now)
0687 {
0688 bool report;
0689
0690 spin_lock(&mirror->lock);
0691 report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
0692 nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
0693 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
0694 spin_unlock(&mirror->lock);
0695
0696 if (report)
0697 pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
0698 }
0699
0700 static void
0701 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
0702 struct nfs4_ff_layout_mirror *mirror,
0703 __u64 requested,
0704 __u64 completed,
0705 enum nfs3_stable_how committed)
0706 {
0707 if (committed == NFS_UNSTABLE)
0708 requested = completed = 0;
0709
0710 spin_lock(&mirror->lock);
0711 nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
0712 requested, completed, ktime_get(), task->tk_start);
0713 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
0714 spin_unlock(&mirror->lock);
0715 }
0716
0717 static void
0718 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
0719 {
0720 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
0721
0722 if (devid)
0723 nfs4_mark_deviceid_unavailable(devid);
0724 }
0725
0726 static void
0727 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
0728 {
0729 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
0730
0731 if (devid)
0732 nfs4_mark_deviceid_available(devid);
0733 }
0734
0735 static struct nfs4_pnfs_ds *
0736 ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
0737 u32 start_idx, u32 *best_idx,
0738 bool check_device)
0739 {
0740 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
0741 struct nfs4_ff_layout_mirror *mirror;
0742 struct nfs4_pnfs_ds *ds;
0743 u32 idx;
0744
0745
0746 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
0747 mirror = FF_LAYOUT_COMP(lseg, idx);
0748 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
0749 if (!ds)
0750 continue;
0751
0752 if (check_device &&
0753 nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
0754 continue;
0755
0756 *best_idx = idx;
0757 return ds;
0758 }
0759
0760 return NULL;
0761 }
0762
0763 static struct nfs4_pnfs_ds *
0764 ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
0765 u32 start_idx, u32 *best_idx)
0766 {
0767 return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
0768 }
0769
0770 static struct nfs4_pnfs_ds *
0771 ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
0772 u32 start_idx, u32 *best_idx)
0773 {
0774 return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
0775 }
0776
0777 static struct nfs4_pnfs_ds *
0778 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
0779 u32 start_idx, u32 *best_idx)
0780 {
0781 struct nfs4_pnfs_ds *ds;
0782
0783 ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
0784 if (ds)
0785 return ds;
0786 return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
0787 }
0788
0789 static struct nfs4_pnfs_ds *
0790 ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
0791 u32 *best_idx)
0792 {
0793 struct pnfs_layout_segment *lseg = pgio->pg_lseg;
0794 struct nfs4_pnfs_ds *ds;
0795
0796 ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
0797 best_idx);
0798 if (ds || !pgio->pg_mirror_idx)
0799 return ds;
0800 return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
0801 }
0802
0803 static void
0804 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
0805 struct nfs_page *req,
0806 bool strict_iomode)
0807 {
0808 pnfs_put_lseg(pgio->pg_lseg);
0809 pgio->pg_lseg =
0810 pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
0811 req_offset(req), req->wb_bytes, IOMODE_READ,
0812 strict_iomode, nfs_io_gfp_mask());
0813 if (IS_ERR(pgio->pg_lseg)) {
0814 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
0815 pgio->pg_lseg = NULL;
0816 }
0817 }
0818
0819 static void
0820 ff_layout_pg_check_layout(struct nfs_pageio_descriptor *pgio,
0821 struct nfs_page *req)
0822 {
0823 pnfs_generic_pg_check_layout(pgio);
0824 pnfs_generic_pg_check_range(pgio, req);
0825 }
0826
0827 static void
0828 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
0829 struct nfs_page *req)
0830 {
0831 struct nfs_pgio_mirror *pgm;
0832 struct nfs4_ff_layout_mirror *mirror;
0833 struct nfs4_pnfs_ds *ds;
0834 u32 ds_idx;
0835
0836 retry:
0837 ff_layout_pg_check_layout(pgio, req);
0838
0839 if (!pgio->pg_lseg) {
0840 ff_layout_pg_get_read(pgio, req, false);
0841 if (!pgio->pg_lseg)
0842 goto out_nolseg;
0843 }
0844 if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
0845 ff_layout_pg_get_read(pgio, req, true);
0846 if (!pgio->pg_lseg)
0847 goto out_nolseg;
0848 }
0849
0850 ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
0851 if (!ds) {
0852 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
0853 goto out_mds;
0854 pnfs_generic_pg_cleanup(pgio);
0855
0856 ssleep(1);
0857 goto retry;
0858 }
0859
0860 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
0861 pgm = &pgio->pg_mirrors[0];
0862 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
0863
0864 pgio->pg_mirror_idx = ds_idx;
0865
0866 if (NFS_SERVER(pgio->pg_inode)->flags &
0867 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
0868 pgio->pg_maxretrans = io_maxretrans;
0869 return;
0870 out_nolseg:
0871 if (pgio->pg_error < 0)
0872 return;
0873 out_mds:
0874 trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
0875 0, NFS4_MAX_UINT64, IOMODE_READ,
0876 NFS_I(pgio->pg_inode)->layout,
0877 pgio->pg_lseg);
0878 pgio->pg_maxretrans = 0;
0879 nfs_pageio_reset_read_mds(pgio);
0880 }
0881
0882 static void
0883 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
0884 struct nfs_page *req)
0885 {
0886 struct nfs4_ff_layout_mirror *mirror;
0887 struct nfs_pgio_mirror *pgm;
0888 struct nfs4_pnfs_ds *ds;
0889 u32 i;
0890
0891 retry:
0892 ff_layout_pg_check_layout(pgio, req);
0893 if (!pgio->pg_lseg) {
0894 pgio->pg_lseg =
0895 pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
0896 req_offset(req), req->wb_bytes,
0897 IOMODE_RW, false, nfs_io_gfp_mask());
0898 if (IS_ERR(pgio->pg_lseg)) {
0899 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
0900 pgio->pg_lseg = NULL;
0901 return;
0902 }
0903 }
0904
0905 if (pgio->pg_lseg == NULL)
0906 goto out_mds;
0907
0908
0909 if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
0910 goto out_eagain;
0911
0912 for (i = 0; i < pgio->pg_mirror_count; i++) {
0913 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
0914 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
0915 if (!ds) {
0916 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
0917 goto out_mds;
0918 pnfs_generic_pg_cleanup(pgio);
0919
0920 ssleep(1);
0921 goto retry;
0922 }
0923 pgm = &pgio->pg_mirrors[i];
0924 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
0925 }
0926
0927 if (NFS_SERVER(pgio->pg_inode)->flags &
0928 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
0929 pgio->pg_maxretrans = io_maxretrans;
0930 return;
0931 out_eagain:
0932 pnfs_generic_pg_cleanup(pgio);
0933 pgio->pg_error = -EAGAIN;
0934 return;
0935 out_mds:
0936 trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
0937 0, NFS4_MAX_UINT64, IOMODE_RW,
0938 NFS_I(pgio->pg_inode)->layout,
0939 pgio->pg_lseg);
0940 pgio->pg_maxretrans = 0;
0941 nfs_pageio_reset_write_mds(pgio);
0942 pgio->pg_error = -EAGAIN;
0943 }
0944
0945 static unsigned int
0946 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
0947 struct nfs_page *req)
0948 {
0949 if (!pgio->pg_lseg) {
0950 pgio->pg_lseg =
0951 pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
0952 req_offset(req), req->wb_bytes,
0953 IOMODE_RW, false, nfs_io_gfp_mask());
0954 if (IS_ERR(pgio->pg_lseg)) {
0955 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
0956 pgio->pg_lseg = NULL;
0957 goto out;
0958 }
0959 }
0960 if (pgio->pg_lseg)
0961 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
0962
0963 trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
0964 0, NFS4_MAX_UINT64, IOMODE_RW,
0965 NFS_I(pgio->pg_inode)->layout,
0966 pgio->pg_lseg);
0967
0968 nfs_pageio_reset_write_mds(pgio);
0969 out:
0970 return 1;
0971 }
0972
0973 static u32
0974 ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
0975 {
0976 u32 old = desc->pg_mirror_idx;
0977
0978 desc->pg_mirror_idx = idx;
0979 return old;
0980 }
0981
0982 static struct nfs_pgio_mirror *
0983 ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
0984 {
0985 return &desc->pg_mirrors[idx];
0986 }
0987
0988 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
0989 .pg_init = ff_layout_pg_init_read,
0990 .pg_test = pnfs_generic_pg_test,
0991 .pg_doio = pnfs_generic_pg_readpages,
0992 .pg_cleanup = pnfs_generic_pg_cleanup,
0993 };
0994
0995 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
0996 .pg_init = ff_layout_pg_init_write,
0997 .pg_test = pnfs_generic_pg_test,
0998 .pg_doio = pnfs_generic_pg_writepages,
0999 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1000 .pg_cleanup = pnfs_generic_pg_cleanup,
1001 .pg_get_mirror = ff_layout_pg_get_mirror_write,
1002 .pg_set_mirror = ff_layout_pg_set_mirror_write,
1003 };
1004
1005 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1006 {
1007 struct rpc_task *task = &hdr->task;
1008
1009 pnfs_layoutcommit_inode(hdr->inode, false);
1010
1011 if (retry_pnfs) {
1012 dprintk("%s Reset task %5u for i/o through pNFS "
1013 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1014 hdr->task.tk_pid,
1015 hdr->inode->i_sb->s_id,
1016 (unsigned long long)NFS_FILEID(hdr->inode),
1017 hdr->args.count,
1018 (unsigned long long)hdr->args.offset);
1019
1020 hdr->completion_ops->reschedule_io(hdr);
1021 return;
1022 }
1023
1024 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1025 dprintk("%s Reset task %5u for i/o through MDS "
1026 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1027 hdr->task.tk_pid,
1028 hdr->inode->i_sb->s_id,
1029 (unsigned long long)NFS_FILEID(hdr->inode),
1030 hdr->args.count,
1031 (unsigned long long)hdr->args.offset);
1032
1033 trace_pnfs_mds_fallback_write_done(hdr->inode,
1034 hdr->args.offset, hdr->args.count,
1035 IOMODE_RW, NFS_I(hdr->inode)->layout,
1036 hdr->lseg);
1037 task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1038 }
1039 }
1040
1041 static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
1042 {
1043 u32 idx = hdr->pgio_mirror_idx + 1;
1044 u32 new_idx = 0;
1045
1046 if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
1047 ff_layout_send_layouterror(hdr->lseg);
1048 else
1049 pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1050 pnfs_read_resend_pnfs(hdr, new_idx);
1051 }
1052
1053 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1054 {
1055 struct rpc_task *task = &hdr->task;
1056
1057 pnfs_layoutcommit_inode(hdr->inode, false);
1058 pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1059
1060 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1061 dprintk("%s Reset task %5u for i/o through MDS "
1062 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1063 hdr->task.tk_pid,
1064 hdr->inode->i_sb->s_id,
1065 (unsigned long long)NFS_FILEID(hdr->inode),
1066 hdr->args.count,
1067 (unsigned long long)hdr->args.offset);
1068
1069 trace_pnfs_mds_fallback_read_done(hdr->inode,
1070 hdr->args.offset, hdr->args.count,
1071 IOMODE_READ, NFS_I(hdr->inode)->layout,
1072 hdr->lseg);
1073 task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1074 }
1075 }
1076
1077 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1078 struct nfs4_state *state,
1079 struct nfs_client *clp,
1080 struct pnfs_layout_segment *lseg,
1081 u32 idx)
1082 {
1083 struct pnfs_layout_hdr *lo = lseg->pls_layout;
1084 struct inode *inode = lo->plh_inode;
1085 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1086 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1087
1088 switch (task->tk_status) {
1089 case -NFS4ERR_BADSESSION:
1090 case -NFS4ERR_BADSLOT:
1091 case -NFS4ERR_BAD_HIGH_SLOT:
1092 case -NFS4ERR_DEADSESSION:
1093 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1094 case -NFS4ERR_SEQ_FALSE_RETRY:
1095 case -NFS4ERR_SEQ_MISORDERED:
1096 dprintk("%s ERROR %d, Reset session. Exchangeid "
1097 "flags 0x%x\n", __func__, task->tk_status,
1098 clp->cl_exchange_flags);
1099 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1100 break;
1101 case -NFS4ERR_DELAY:
1102 case -NFS4ERR_GRACE:
1103 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1104 break;
1105 case -NFS4ERR_RETRY_UNCACHED_REP:
1106 break;
1107
1108 case -NFS4ERR_PNFS_NO_LAYOUT:
1109 case -ESTALE:
1110 case -EBADHANDLE:
1111 case -EISDIR:
1112 case -NFS4ERR_FHEXPIRED:
1113 case -NFS4ERR_WRONG_TYPE:
1114 dprintk("%s Invalid layout error %d\n", __func__,
1115 task->tk_status);
1116
1117
1118
1119
1120
1121
1122
1123 pnfs_destroy_layout(NFS_I(inode));
1124 rpc_wake_up(&tbl->slot_tbl_waitq);
1125 goto reset;
1126
1127 case -ECONNREFUSED:
1128 case -EHOSTDOWN:
1129 case -EHOSTUNREACH:
1130 case -ENETUNREACH:
1131 case -EIO:
1132 case -ETIMEDOUT:
1133 case -EPIPE:
1134 case -EPROTO:
1135 case -ENODEV:
1136 dprintk("%s DS connection error %d\n", __func__,
1137 task->tk_status);
1138 nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1139 &devid->deviceid);
1140 rpc_wake_up(&tbl->slot_tbl_waitq);
1141 fallthrough;
1142 default:
1143 if (ff_layout_avoid_mds_available_ds(lseg))
1144 return -NFS4ERR_RESET_TO_PNFS;
1145 reset:
1146 dprintk("%s Retry through MDS. Error %d\n", __func__,
1147 task->tk_status);
1148 return -NFS4ERR_RESET_TO_MDS;
1149 }
1150 task->tk_status = 0;
1151 return -EAGAIN;
1152 }
1153
1154
1155 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1156 struct pnfs_layout_segment *lseg,
1157 u32 idx)
1158 {
1159 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1160
1161 switch (task->tk_status) {
1162
1163 case -EACCES:
1164 case -ESTALE:
1165 case -EISDIR:
1166 case -EBADHANDLE:
1167 case -ELOOP:
1168 case -ENOSPC:
1169 break;
1170 case -EJUKEBOX:
1171 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1172 goto out_retry;
1173 default:
1174 dprintk("%s DS connection error %d\n", __func__,
1175 task->tk_status);
1176 nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1177 &devid->deviceid);
1178 }
1179
1180 return -NFS4ERR_RESET_TO_PNFS;
1181 out_retry:
1182 task->tk_status = 0;
1183 rpc_restart_call_prepare(task);
1184 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1185 return -EAGAIN;
1186 }
1187
1188 static int ff_layout_async_handle_error(struct rpc_task *task,
1189 struct nfs4_state *state,
1190 struct nfs_client *clp,
1191 struct pnfs_layout_segment *lseg,
1192 u32 idx)
1193 {
1194 int vers = clp->cl_nfs_mod->rpc_vers->number;
1195
1196 if (task->tk_status >= 0) {
1197 ff_layout_mark_ds_reachable(lseg, idx);
1198 return 0;
1199 }
1200
1201
1202 if (!pnfs_is_valid_lseg(lseg))
1203 return -NFS4ERR_RESET_TO_PNFS;
1204
1205 switch (vers) {
1206 case 3:
1207 return ff_layout_async_handle_error_v3(task, lseg, idx);
1208 case 4:
1209 return ff_layout_async_handle_error_v4(task, state, clp,
1210 lseg, idx);
1211 default:
1212
1213 WARN_ON_ONCE(1);
1214 return 0;
1215 }
1216 }
1217
1218 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1219 u32 idx, u64 offset, u64 length,
1220 u32 *op_status, int opnum, int error)
1221 {
1222 struct nfs4_ff_layout_mirror *mirror;
1223 u32 status = *op_status;
1224 int err;
1225
1226 if (status == 0) {
1227 switch (error) {
1228 case -ETIMEDOUT:
1229 case -EPFNOSUPPORT:
1230 case -EPROTONOSUPPORT:
1231 case -EOPNOTSUPP:
1232 case -ECONNREFUSED:
1233 case -ECONNRESET:
1234 case -EHOSTDOWN:
1235 case -EHOSTUNREACH:
1236 case -ENETUNREACH:
1237 case -EADDRINUSE:
1238 case -ENOBUFS:
1239 case -EPIPE:
1240 case -EPERM:
1241 case -EPROTO:
1242 case -ENODEV:
1243 *op_status = status = NFS4ERR_NXIO;
1244 break;
1245 case -EACCES:
1246 *op_status = status = NFS4ERR_ACCESS;
1247 break;
1248 default:
1249 return;
1250 }
1251 }
1252
1253 mirror = FF_LAYOUT_COMP(lseg, idx);
1254 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1255 mirror, offset, length, status, opnum,
1256 nfs_io_gfp_mask());
1257
1258 switch (status) {
1259 case NFS4ERR_DELAY:
1260 case NFS4ERR_GRACE:
1261 break;
1262 case NFS4ERR_NXIO:
1263 ff_layout_mark_ds_unreachable(lseg, idx);
1264
1265
1266
1267
1268 if (opnum == OP_READ)
1269 break;
1270 fallthrough;
1271 default:
1272 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
1273 lseg);
1274 }
1275
1276 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1277 }
1278
1279
1280 static int ff_layout_read_done_cb(struct rpc_task *task,
1281 struct nfs_pgio_header *hdr)
1282 {
1283 int err;
1284
1285 if (task->tk_status < 0) {
1286 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1287 hdr->args.offset, hdr->args.count,
1288 &hdr->res.op_status, OP_READ,
1289 task->tk_status);
1290 trace_ff_layout_read_error(hdr);
1291 }
1292
1293 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1294 hdr->ds_clp, hdr->lseg,
1295 hdr->pgio_mirror_idx);
1296
1297 trace_nfs4_pnfs_read(hdr, err);
1298 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1299 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1300 switch (err) {
1301 case -NFS4ERR_RESET_TO_PNFS:
1302 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1303 return task->tk_status;
1304 case -NFS4ERR_RESET_TO_MDS:
1305 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1306 return task->tk_status;
1307 case -EAGAIN:
1308 goto out_eagain;
1309 }
1310
1311 return 0;
1312 out_eagain:
1313 rpc_restart_call_prepare(task);
1314 return -EAGAIN;
1315 }
1316
1317 static bool
1318 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1319 {
1320 return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1321 }
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332 static void
1333 ff_layout_set_layoutcommit(struct inode *inode,
1334 struct pnfs_layout_segment *lseg,
1335 loff_t end_offset)
1336 {
1337 if (!ff_layout_need_layoutcommit(lseg))
1338 return;
1339
1340 pnfs_set_layoutcommit(inode, lseg, end_offset);
1341 dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1342 (unsigned long long) NFS_I(inode)->layout->plh_lwb);
1343 }
1344
1345 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1346 struct nfs_pgio_header *hdr)
1347 {
1348 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1349 return;
1350 nfs4_ff_layout_stat_io_start_read(hdr->inode,
1351 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1352 hdr->args.count,
1353 task->tk_start);
1354 }
1355
1356 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1357 struct nfs_pgio_header *hdr)
1358 {
1359 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1360 return;
1361 nfs4_ff_layout_stat_io_end_read(task,
1362 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1363 hdr->args.count,
1364 hdr->res.count);
1365 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1366 }
1367
1368 static int ff_layout_read_prepare_common(struct rpc_task *task,
1369 struct nfs_pgio_header *hdr)
1370 {
1371 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1372 rpc_exit(task, -EIO);
1373 return -EIO;
1374 }
1375
1376 ff_layout_read_record_layoutstats_start(task, hdr);
1377 return 0;
1378 }
1379
1380
1381
1382
1383
1384
1385 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1386 {
1387 struct nfs_pgio_header *hdr = data;
1388
1389 if (ff_layout_read_prepare_common(task, hdr))
1390 return;
1391
1392 rpc_call_start(task);
1393 }
1394
1395 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1396 {
1397 struct nfs_pgio_header *hdr = data;
1398
1399 if (nfs4_setup_sequence(hdr->ds_clp,
1400 &hdr->args.seq_args,
1401 &hdr->res.seq_res,
1402 task))
1403 return;
1404
1405 ff_layout_read_prepare_common(task, hdr);
1406 }
1407
1408 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1409 {
1410 struct nfs_pgio_header *hdr = data;
1411
1412 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1413 task->tk_status == 0) {
1414 nfs4_sequence_done(task, &hdr->res.seq_res);
1415 return;
1416 }
1417
1418
1419 hdr->mds_ops->rpc_call_done(task, hdr);
1420 }
1421
1422 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1423 {
1424 struct nfs_pgio_header *hdr = data;
1425
1426 ff_layout_read_record_layoutstats_done(task, hdr);
1427 rpc_count_iostats_metrics(task,
1428 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1429 }
1430
1431 static void ff_layout_read_release(void *data)
1432 {
1433 struct nfs_pgio_header *hdr = data;
1434
1435 ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1436 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1437 ff_layout_resend_pnfs_read(hdr);
1438 else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1439 ff_layout_reset_read(hdr);
1440 pnfs_generic_rw_release(data);
1441 }
1442
1443
1444 static int ff_layout_write_done_cb(struct rpc_task *task,
1445 struct nfs_pgio_header *hdr)
1446 {
1447 loff_t end_offs = 0;
1448 int err;
1449
1450 if (task->tk_status < 0) {
1451 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1452 hdr->args.offset, hdr->args.count,
1453 &hdr->res.op_status, OP_WRITE,
1454 task->tk_status);
1455 trace_ff_layout_write_error(hdr);
1456 }
1457
1458 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1459 hdr->ds_clp, hdr->lseg,
1460 hdr->pgio_mirror_idx);
1461
1462 trace_nfs4_pnfs_write(hdr, err);
1463 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1464 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1465 switch (err) {
1466 case -NFS4ERR_RESET_TO_PNFS:
1467 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1468 return task->tk_status;
1469 case -NFS4ERR_RESET_TO_MDS:
1470 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1471 return task->tk_status;
1472 case -EAGAIN:
1473 return -EAGAIN;
1474 }
1475
1476 if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1477 hdr->res.verf->committed == NFS_DATA_SYNC)
1478 end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1479
1480
1481 ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1482
1483
1484 hdr->fattr.valid = 0;
1485 if (task->tk_status >= 0)
1486 nfs_writeback_update_inode(hdr);
1487
1488 return 0;
1489 }
1490
1491 static int ff_layout_commit_done_cb(struct rpc_task *task,
1492 struct nfs_commit_data *data)
1493 {
1494 int err;
1495
1496 if (task->tk_status < 0) {
1497 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1498 data->args.offset, data->args.count,
1499 &data->res.op_status, OP_COMMIT,
1500 task->tk_status);
1501 trace_ff_layout_commit_error(data);
1502 }
1503
1504 err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1505 data->lseg, data->ds_commit_index);
1506
1507 trace_nfs4_pnfs_commit_ds(data, err);
1508 switch (err) {
1509 case -NFS4ERR_RESET_TO_PNFS:
1510 pnfs_generic_prepare_to_resend_writes(data);
1511 return -EAGAIN;
1512 case -NFS4ERR_RESET_TO_MDS:
1513 pnfs_generic_prepare_to_resend_writes(data);
1514 return -EAGAIN;
1515 case -EAGAIN:
1516 rpc_restart_call_prepare(task);
1517 return -EAGAIN;
1518 }
1519
1520 ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1521
1522 return 0;
1523 }
1524
1525 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1526 struct nfs_pgio_header *hdr)
1527 {
1528 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1529 return;
1530 nfs4_ff_layout_stat_io_start_write(hdr->inode,
1531 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1532 hdr->args.count,
1533 task->tk_start);
1534 }
1535
1536 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1537 struct nfs_pgio_header *hdr)
1538 {
1539 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1540 return;
1541 nfs4_ff_layout_stat_io_end_write(task,
1542 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1543 hdr->args.count, hdr->res.count,
1544 hdr->res.verf->committed);
1545 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1546 }
1547
1548 static int ff_layout_write_prepare_common(struct rpc_task *task,
1549 struct nfs_pgio_header *hdr)
1550 {
1551 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1552 rpc_exit(task, -EIO);
1553 return -EIO;
1554 }
1555
1556 ff_layout_write_record_layoutstats_start(task, hdr);
1557 return 0;
1558 }
1559
1560 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1561 {
1562 struct nfs_pgio_header *hdr = data;
1563
1564 if (ff_layout_write_prepare_common(task, hdr))
1565 return;
1566
1567 rpc_call_start(task);
1568 }
1569
1570 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1571 {
1572 struct nfs_pgio_header *hdr = data;
1573
1574 if (nfs4_setup_sequence(hdr->ds_clp,
1575 &hdr->args.seq_args,
1576 &hdr->res.seq_res,
1577 task))
1578 return;
1579
1580 ff_layout_write_prepare_common(task, hdr);
1581 }
1582
1583 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1584 {
1585 struct nfs_pgio_header *hdr = data;
1586
1587 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1588 task->tk_status == 0) {
1589 nfs4_sequence_done(task, &hdr->res.seq_res);
1590 return;
1591 }
1592
1593
1594 hdr->mds_ops->rpc_call_done(task, hdr);
1595 }
1596
1597 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1598 {
1599 struct nfs_pgio_header *hdr = data;
1600
1601 ff_layout_write_record_layoutstats_done(task, hdr);
1602 rpc_count_iostats_metrics(task,
1603 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1604 }
1605
1606 static void ff_layout_write_release(void *data)
1607 {
1608 struct nfs_pgio_header *hdr = data;
1609
1610 ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1611 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1612 ff_layout_send_layouterror(hdr->lseg);
1613 ff_layout_reset_write(hdr, true);
1614 } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1615 ff_layout_reset_write(hdr, false);
1616 pnfs_generic_rw_release(data);
1617 }
1618
1619 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1620 struct nfs_commit_data *cdata)
1621 {
1622 if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1623 return;
1624 nfs4_ff_layout_stat_io_start_write(cdata->inode,
1625 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1626 0, task->tk_start);
1627 }
1628
1629 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1630 struct nfs_commit_data *cdata)
1631 {
1632 struct nfs_page *req;
1633 __u64 count = 0;
1634
1635 if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1636 return;
1637
1638 if (task->tk_status == 0) {
1639 list_for_each_entry(req, &cdata->pages, wb_list)
1640 count += req->wb_bytes;
1641 }
1642 nfs4_ff_layout_stat_io_end_write(task,
1643 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1644 count, count, NFS_FILE_SYNC);
1645 set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1646 }
1647
1648 static void ff_layout_commit_prepare_common(struct rpc_task *task,
1649 struct nfs_commit_data *cdata)
1650 {
1651 ff_layout_commit_record_layoutstats_start(task, cdata);
1652 }
1653
1654 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1655 {
1656 ff_layout_commit_prepare_common(task, data);
1657 rpc_call_start(task);
1658 }
1659
1660 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1661 {
1662 struct nfs_commit_data *wdata = data;
1663
1664 if (nfs4_setup_sequence(wdata->ds_clp,
1665 &wdata->args.seq_args,
1666 &wdata->res.seq_res,
1667 task))
1668 return;
1669 ff_layout_commit_prepare_common(task, data);
1670 }
1671
1672 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1673 {
1674 pnfs_generic_write_commit_done(task, data);
1675 }
1676
1677 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1678 {
1679 struct nfs_commit_data *cdata = data;
1680
1681 ff_layout_commit_record_layoutstats_done(task, cdata);
1682 rpc_count_iostats_metrics(task,
1683 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1684 }
1685
1686 static void ff_layout_commit_release(void *data)
1687 {
1688 struct nfs_commit_data *cdata = data;
1689
1690 ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1691 pnfs_generic_commit_release(data);
1692 }
1693
1694 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1695 .rpc_call_prepare = ff_layout_read_prepare_v3,
1696 .rpc_call_done = ff_layout_read_call_done,
1697 .rpc_count_stats = ff_layout_read_count_stats,
1698 .rpc_release = ff_layout_read_release,
1699 };
1700
1701 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1702 .rpc_call_prepare = ff_layout_read_prepare_v4,
1703 .rpc_call_done = ff_layout_read_call_done,
1704 .rpc_count_stats = ff_layout_read_count_stats,
1705 .rpc_release = ff_layout_read_release,
1706 };
1707
1708 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1709 .rpc_call_prepare = ff_layout_write_prepare_v3,
1710 .rpc_call_done = ff_layout_write_call_done,
1711 .rpc_count_stats = ff_layout_write_count_stats,
1712 .rpc_release = ff_layout_write_release,
1713 };
1714
1715 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1716 .rpc_call_prepare = ff_layout_write_prepare_v4,
1717 .rpc_call_done = ff_layout_write_call_done,
1718 .rpc_count_stats = ff_layout_write_count_stats,
1719 .rpc_release = ff_layout_write_release,
1720 };
1721
1722 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1723 .rpc_call_prepare = ff_layout_commit_prepare_v3,
1724 .rpc_call_done = ff_layout_commit_done,
1725 .rpc_count_stats = ff_layout_commit_count_stats,
1726 .rpc_release = ff_layout_commit_release,
1727 };
1728
1729 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1730 .rpc_call_prepare = ff_layout_commit_prepare_v4,
1731 .rpc_call_done = ff_layout_commit_done,
1732 .rpc_count_stats = ff_layout_commit_count_stats,
1733 .rpc_release = ff_layout_commit_release,
1734 };
1735
1736 static enum pnfs_try_status
1737 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1738 {
1739 struct pnfs_layout_segment *lseg = hdr->lseg;
1740 struct nfs4_pnfs_ds *ds;
1741 struct rpc_clnt *ds_clnt;
1742 struct nfs4_ff_layout_mirror *mirror;
1743 const struct cred *ds_cred;
1744 loff_t offset = hdr->args.offset;
1745 u32 idx = hdr->pgio_mirror_idx;
1746 int vers;
1747 struct nfs_fh *fh;
1748
1749 dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1750 __func__, hdr->inode->i_ino,
1751 hdr->args.pgbase, (size_t)hdr->args.count, offset);
1752
1753 mirror = FF_LAYOUT_COMP(lseg, idx);
1754 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1755 if (!ds)
1756 goto out_failed;
1757
1758 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1759 hdr->inode);
1760 if (IS_ERR(ds_clnt))
1761 goto out_failed;
1762
1763 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1764 if (!ds_cred)
1765 goto out_failed;
1766
1767 vers = nfs4_ff_layout_ds_version(mirror);
1768
1769 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1770 ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1771
1772 hdr->pgio_done_cb = ff_layout_read_done_cb;
1773 refcount_inc(&ds->ds_clp->cl_count);
1774 hdr->ds_clp = ds->ds_clp;
1775 fh = nfs4_ff_layout_select_ds_fh(mirror);
1776 if (fh)
1777 hdr->args.fh = fh;
1778
1779 nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1780
1781
1782
1783
1784
1785 hdr->args.offset = offset;
1786 hdr->mds_offset = offset;
1787
1788
1789 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1790 vers == 3 ? &ff_layout_read_call_ops_v3 :
1791 &ff_layout_read_call_ops_v4,
1792 0, RPC_TASK_SOFTCONN);
1793 put_cred(ds_cred);
1794 return PNFS_ATTEMPTED;
1795
1796 out_failed:
1797 if (ff_layout_avoid_mds_available_ds(lseg))
1798 return PNFS_TRY_AGAIN;
1799 trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
1800 hdr->args.offset, hdr->args.count,
1801 IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
1802 return PNFS_NOT_ATTEMPTED;
1803 }
1804
1805
1806 static enum pnfs_try_status
1807 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1808 {
1809 struct pnfs_layout_segment *lseg = hdr->lseg;
1810 struct nfs4_pnfs_ds *ds;
1811 struct rpc_clnt *ds_clnt;
1812 struct nfs4_ff_layout_mirror *mirror;
1813 const struct cred *ds_cred;
1814 loff_t offset = hdr->args.offset;
1815 int vers;
1816 struct nfs_fh *fh;
1817 u32 idx = hdr->pgio_mirror_idx;
1818
1819 mirror = FF_LAYOUT_COMP(lseg, idx);
1820 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1821 if (!ds)
1822 goto out_failed;
1823
1824 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1825 hdr->inode);
1826 if (IS_ERR(ds_clnt))
1827 goto out_failed;
1828
1829 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1830 if (!ds_cred)
1831 goto out_failed;
1832
1833 vers = nfs4_ff_layout_ds_version(mirror);
1834
1835 dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1836 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1837 offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1838 vers);
1839
1840 hdr->pgio_done_cb = ff_layout_write_done_cb;
1841 refcount_inc(&ds->ds_clp->cl_count);
1842 hdr->ds_clp = ds->ds_clp;
1843 hdr->ds_commit_idx = idx;
1844 fh = nfs4_ff_layout_select_ds_fh(mirror);
1845 if (fh)
1846 hdr->args.fh = fh;
1847
1848 nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1849
1850
1851
1852
1853
1854 hdr->args.offset = offset;
1855
1856
1857 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1858 vers == 3 ? &ff_layout_write_call_ops_v3 :
1859 &ff_layout_write_call_ops_v4,
1860 sync, RPC_TASK_SOFTCONN);
1861 put_cred(ds_cred);
1862 return PNFS_ATTEMPTED;
1863
1864 out_failed:
1865 if (ff_layout_avoid_mds_available_ds(lseg))
1866 return PNFS_TRY_AGAIN;
1867 trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
1868 hdr->args.offset, hdr->args.count,
1869 IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
1870 return PNFS_NOT_ATTEMPTED;
1871 }
1872
1873 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1874 {
1875 return i;
1876 }
1877
1878 static struct nfs_fh *
1879 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1880 {
1881 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1882
1883
1884
1885
1886 return &flseg->mirror_array[i]->fh_versions[0];
1887 }
1888
1889 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1890 {
1891 struct pnfs_layout_segment *lseg = data->lseg;
1892 struct nfs4_pnfs_ds *ds;
1893 struct rpc_clnt *ds_clnt;
1894 struct nfs4_ff_layout_mirror *mirror;
1895 const struct cred *ds_cred;
1896 u32 idx;
1897 int vers, ret;
1898 struct nfs_fh *fh;
1899
1900 if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
1901 test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
1902 goto out_err;
1903
1904 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1905 mirror = FF_LAYOUT_COMP(lseg, idx);
1906 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1907 if (!ds)
1908 goto out_err;
1909
1910 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1911 data->inode);
1912 if (IS_ERR(ds_clnt))
1913 goto out_err;
1914
1915 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
1916 if (!ds_cred)
1917 goto out_err;
1918
1919 vers = nfs4_ff_layout_ds_version(mirror);
1920
1921 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1922 data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
1923 vers);
1924 data->commit_done_cb = ff_layout_commit_done_cb;
1925 data->cred = ds_cred;
1926 refcount_inc(&ds->ds_clp->cl_count);
1927 data->ds_clp = ds->ds_clp;
1928 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1929 if (fh)
1930 data->args.fh = fh;
1931
1932 ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1933 vers == 3 ? &ff_layout_commit_call_ops_v3 :
1934 &ff_layout_commit_call_ops_v4,
1935 how, RPC_TASK_SOFTCONN);
1936 put_cred(ds_cred);
1937 return ret;
1938 out_err:
1939 pnfs_generic_prepare_to_resend_writes(data);
1940 pnfs_generic_commit_release(data);
1941 return -EAGAIN;
1942 }
1943
1944 static int
1945 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1946 int how, struct nfs_commit_info *cinfo)
1947 {
1948 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1949 ff_layout_initiate_commit);
1950 }
1951
1952 static struct pnfs_ds_commit_info *
1953 ff_layout_get_ds_info(struct inode *inode)
1954 {
1955 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1956
1957 if (layout == NULL)
1958 return NULL;
1959
1960 return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1961 }
1962
1963 static void
1964 ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
1965 struct pnfs_layout_segment *lseg)
1966 {
1967 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1968 struct inode *inode = lseg->pls_layout->plh_inode;
1969 struct pnfs_commit_array *array, *new;
1970
1971 new = pnfs_alloc_commit_array(flseg->mirror_array_cnt,
1972 nfs_io_gfp_mask());
1973 if (new) {
1974 spin_lock(&inode->i_lock);
1975 array = pnfs_add_commit_array(fl_cinfo, new, lseg);
1976 spin_unlock(&inode->i_lock);
1977 if (array != new)
1978 pnfs_free_commit_array(new);
1979 }
1980 }
1981
1982 static void
1983 ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
1984 struct inode *inode)
1985 {
1986 spin_lock(&inode->i_lock);
1987 pnfs_generic_ds_cinfo_destroy(fl_cinfo);
1988 spin_unlock(&inode->i_lock);
1989 }
1990
1991 static void
1992 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
1993 {
1994 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
1995 id_node));
1996 }
1997
1998 static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
1999 const struct nfs4_layoutreturn_args *args,
2000 const struct nfs4_flexfile_layoutreturn_args *ff_args)
2001 {
2002 __be32 *start;
2003
2004 start = xdr_reserve_space(xdr, 4);
2005 if (unlikely(!start))
2006 return -E2BIG;
2007
2008 *start = cpu_to_be32(ff_args->num_errors);
2009
2010 return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2011 }
2012
2013 static void
2014 encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
2015 {
2016 WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
2017 }
2018
2019 static void
2020 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2021 const nfs4_stateid *stateid,
2022 const struct nfs42_layoutstat_devinfo *devinfo)
2023 {
2024 __be32 *p;
2025
2026 p = xdr_reserve_space(xdr, 8 + 8);
2027 p = xdr_encode_hyper(p, devinfo->offset);
2028 p = xdr_encode_hyper(p, devinfo->length);
2029 encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2030 p = xdr_reserve_space(xdr, 4*8);
2031 p = xdr_encode_hyper(p, devinfo->read_count);
2032 p = xdr_encode_hyper(p, devinfo->read_bytes);
2033 p = xdr_encode_hyper(p, devinfo->write_count);
2034 p = xdr_encode_hyper(p, devinfo->write_bytes);
2035 encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2036 }
2037
2038 static void
2039 ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2040 const nfs4_stateid *stateid,
2041 const struct nfs42_layoutstat_devinfo *devinfo)
2042 {
2043 ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2044 ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2045 devinfo->ld_private.data);
2046 }
2047
2048
2049 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2050 const struct nfs4_layoutreturn_args *args,
2051 struct nfs4_flexfile_layoutreturn_args *ff_args)
2052 {
2053 __be32 *p;
2054 int i;
2055
2056 p = xdr_reserve_space(xdr, 4);
2057 *p = cpu_to_be32(ff_args->num_dev);
2058 for (i = 0; i < ff_args->num_dev; i++)
2059 ff_layout_encode_ff_iostat(xdr,
2060 &args->layout->plh_stateid,
2061 &ff_args->devinfo[i]);
2062 }
2063
2064 static void
2065 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2066 unsigned int num_entries)
2067 {
2068 unsigned int i;
2069
2070 for (i = 0; i < num_entries; i++) {
2071 if (!devinfo[i].ld_private.ops)
2072 continue;
2073 if (!devinfo[i].ld_private.ops->free)
2074 continue;
2075 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2076 }
2077 }
2078
2079 static struct nfs4_deviceid_node *
2080 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2081 struct pnfs_device *pdev, gfp_t gfp_flags)
2082 {
2083 struct nfs4_ff_layout_ds *dsaddr;
2084
2085 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2086 if (!dsaddr)
2087 return NULL;
2088 return &dsaddr->id_node;
2089 }
2090
2091 static void
2092 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2093 const void *voidargs,
2094 const struct nfs4_xdr_opaque_data *ff_opaque)
2095 {
2096 const struct nfs4_layoutreturn_args *args = voidargs;
2097 struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2098 struct xdr_buf tmp_buf = {
2099 .head = {
2100 [0] = {
2101 .iov_base = page_address(ff_args->pages[0]),
2102 },
2103 },
2104 .buflen = PAGE_SIZE,
2105 };
2106 struct xdr_stream tmp_xdr;
2107 __be32 *start;
2108
2109 dprintk("%s: Begin\n", __func__);
2110
2111 xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2112
2113 ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2114 ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2115
2116 start = xdr_reserve_space(xdr, 4);
2117 *start = cpu_to_be32(tmp_buf.len);
2118 xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2119
2120 dprintk("%s: Return\n", __func__);
2121 }
2122
2123 static void
2124 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2125 {
2126 struct nfs4_flexfile_layoutreturn_args *ff_args;
2127
2128 if (!args->data)
2129 return;
2130 ff_args = args->data;
2131 args->data = NULL;
2132
2133 ff_layout_free_ds_ioerr(&ff_args->errors);
2134 ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2135
2136 put_page(ff_args->pages[0]);
2137 kfree(ff_args);
2138 }
2139
2140 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2141 .encode = ff_layout_encode_layoutreturn,
2142 .free = ff_layout_free_layoutreturn,
2143 };
2144
2145 static int
2146 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2147 {
2148 struct nfs4_flexfile_layoutreturn_args *ff_args;
2149 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2150
2151 ff_args = kmalloc(sizeof(*ff_args), nfs_io_gfp_mask());
2152 if (!ff_args)
2153 goto out_nomem;
2154 ff_args->pages[0] = alloc_page(nfs_io_gfp_mask());
2155 if (!ff_args->pages[0])
2156 goto out_nomem_free;
2157
2158 INIT_LIST_HEAD(&ff_args->errors);
2159 ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2160 &args->range, &ff_args->errors,
2161 FF_LAYOUTRETURN_MAXERR);
2162
2163 spin_lock(&args->inode->i_lock);
2164 ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2165 &ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
2166 spin_unlock(&args->inode->i_lock);
2167
2168 args->ld_private->ops = &layoutreturn_ops;
2169 args->ld_private->data = ff_args;
2170 return 0;
2171 out_nomem_free:
2172 kfree(ff_args);
2173 out_nomem:
2174 return -ENOMEM;
2175 }
2176
2177 #ifdef CONFIG_NFS_V4_2
2178 void
2179 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2180 {
2181 struct pnfs_layout_hdr *lo = lseg->pls_layout;
2182 struct nfs42_layout_error *errors;
2183 LIST_HEAD(head);
2184
2185 if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2186 return;
2187 ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2188 if (list_empty(&head))
2189 return;
2190
2191 errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, sizeof(*errors),
2192 nfs_io_gfp_mask());
2193 if (errors != NULL) {
2194 const struct nfs4_ff_layout_ds_err *pos;
2195 size_t n = 0;
2196
2197 list_for_each_entry(pos, &head, list) {
2198 errors[n].offset = pos->offset;
2199 errors[n].length = pos->length;
2200 nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2201 errors[n].errors[0].dev_id = pos->deviceid;
2202 errors[n].errors[0].status = pos->status;
2203 errors[n].errors[0].opnum = pos->opnum;
2204 n++;
2205 if (!list_is_last(&pos->list, &head) &&
2206 n < NFS42_LAYOUTERROR_MAX)
2207 continue;
2208 if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2209 break;
2210 n = 0;
2211 }
2212 kfree(errors);
2213 }
2214 ff_layout_free_ds_ioerr(&head);
2215 }
2216 #else
2217 void
2218 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2219 {
2220 }
2221 #endif
2222
2223 static int
2224 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2225 {
2226 const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2227
2228 return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2229 }
2230
2231 static size_t
2232 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2233 const int buflen)
2234 {
2235 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2236 const struct in6_addr *addr = &sin6->sin6_addr;
2237
2238
2239
2240
2241
2242
2243 if (ipv6_addr_any(addr))
2244 return snprintf(buf, buflen, "::");
2245
2246
2247
2248
2249
2250
2251 if (ipv6_addr_loopback(addr))
2252 return snprintf(buf, buflen, "::1");
2253
2254
2255
2256
2257
2258
2259
2260 if (ipv6_addr_v4mapped(addr))
2261 return snprintf(buf, buflen, "::ffff:%pI4",
2262 &addr->s6_addr32[3]);
2263
2264
2265
2266
2267 return snprintf(buf, buflen, "%pI6c", addr);
2268 }
2269
2270
2271 static void
2272 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2273 {
2274 struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2275 char portbuf[RPCBIND_MAXUADDRPLEN];
2276 char addrbuf[RPCBIND_MAXUADDRLEN];
2277 unsigned short port;
2278 int len, netid_len;
2279 __be32 *p;
2280
2281 switch (sap->sa_family) {
2282 case AF_INET:
2283 if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2284 return;
2285 port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2286 break;
2287 case AF_INET6:
2288 if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2289 return;
2290 port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2291 break;
2292 default:
2293 WARN_ON_ONCE(1);
2294 return;
2295 }
2296
2297 snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2298 len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2299
2300 netid_len = strlen(da->da_netid);
2301 p = xdr_reserve_space(xdr, 4 + netid_len);
2302 xdr_encode_opaque(p, da->da_netid, netid_len);
2303
2304 p = xdr_reserve_space(xdr, 4 + len);
2305 xdr_encode_opaque(p, addrbuf, len);
2306 }
2307
2308 static void
2309 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2310 ktime_t t)
2311 {
2312 struct timespec64 ts;
2313 __be32 *p;
2314
2315 p = xdr_reserve_space(xdr, 12);
2316 ts = ktime_to_timespec64(t);
2317 p = xdr_encode_hyper(p, ts.tv_sec);
2318 *p++ = cpu_to_be32(ts.tv_nsec);
2319 }
2320
2321 static void
2322 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2323 struct nfs4_ff_io_stat *stat)
2324 {
2325 __be32 *p;
2326
2327 p = xdr_reserve_space(xdr, 5 * 8);
2328 p = xdr_encode_hyper(p, stat->ops_requested);
2329 p = xdr_encode_hyper(p, stat->bytes_requested);
2330 p = xdr_encode_hyper(p, stat->ops_completed);
2331 p = xdr_encode_hyper(p, stat->bytes_completed);
2332 p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2333 ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2334 ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2335 }
2336
2337 static void
2338 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2339 const struct nfs42_layoutstat_devinfo *devinfo,
2340 struct nfs4_ff_layout_mirror *mirror)
2341 {
2342 struct nfs4_pnfs_ds_addr *da;
2343 struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2344 struct nfs_fh *fh = &mirror->fh_versions[0];
2345 __be32 *p;
2346
2347 da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2348 dprintk("%s: DS %s: encoding address %s\n",
2349 __func__, ds->ds_remotestr, da->da_remotestr);
2350
2351 ff_layout_encode_netaddr(xdr, da);
2352
2353 p = xdr_reserve_space(xdr, 4 + fh->size);
2354 xdr_encode_opaque(p, fh->data, fh->size);
2355
2356 spin_lock(&mirror->lock);
2357 ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2358
2359 ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2360 spin_unlock(&mirror->lock);
2361
2362 ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2363
2364 p = xdr_reserve_space(xdr, 4);
2365 *p = cpu_to_be32(false);
2366 }
2367
2368 static void
2369 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2370 const struct nfs4_xdr_opaque_data *opaque)
2371 {
2372 struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2373 struct nfs42_layoutstat_devinfo, ld_private);
2374 __be32 *start;
2375
2376
2377 start = xdr_reserve_space(xdr, 4);
2378 ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2379
2380 *start = cpu_to_be32((xdr->p - start - 1) * 4);
2381 }
2382
2383 static void
2384 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2385 {
2386 struct nfs4_ff_layout_mirror *mirror = opaque->data;
2387
2388 ff_layout_put_mirror(mirror);
2389 }
2390
2391 static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2392 .encode = ff_layout_encode_layoutstats,
2393 .free = ff_layout_free_layoutstats,
2394 };
2395
2396 static int
2397 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2398 struct nfs42_layoutstat_devinfo *devinfo,
2399 int dev_limit)
2400 {
2401 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2402 struct nfs4_ff_layout_mirror *mirror;
2403 struct nfs4_deviceid_node *dev;
2404 int i = 0;
2405
2406 list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2407 if (i >= dev_limit)
2408 break;
2409 if (IS_ERR_OR_NULL(mirror->mirror_ds))
2410 continue;
2411 if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
2412 continue;
2413
2414 if (!refcount_inc_not_zero(&mirror->ref))
2415 continue;
2416 dev = &mirror->mirror_ds->id_node;
2417 memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2418 devinfo->offset = 0;
2419 devinfo->length = NFS4_MAX_UINT64;
2420 spin_lock(&mirror->lock);
2421 devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2422 devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2423 devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2424 devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2425 spin_unlock(&mirror->lock);
2426 devinfo->layout_type = LAYOUT_FLEX_FILES;
2427 devinfo->ld_private.ops = &layoutstat_ops;
2428 devinfo->ld_private.data = mirror;
2429
2430 devinfo++;
2431 i++;
2432 }
2433 return i;
2434 }
2435
2436 static int
2437 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2438 {
2439 struct nfs4_flexfile_layout *ff_layout;
2440 const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2441
2442
2443 args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo),
2444 nfs_io_gfp_mask());
2445 if (!args->devinfo)
2446 return -ENOMEM;
2447
2448 spin_lock(&args->inode->i_lock);
2449 ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2450 args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2451 &args->devinfo[0], dev_count);
2452 spin_unlock(&args->inode->i_lock);
2453 if (!args->num_dev) {
2454 kfree(args->devinfo);
2455 args->devinfo = NULL;
2456 return -ENOENT;
2457 }
2458
2459 return 0;
2460 }
2461
2462 static int
2463 ff_layout_set_layoutdriver(struct nfs_server *server,
2464 const struct nfs_fh *dummy)
2465 {
2466 #if IS_ENABLED(CONFIG_NFS_V4_2)
2467 server->caps |= NFS_CAP_LAYOUTSTATS;
2468 #endif
2469 return 0;
2470 }
2471
2472 static const struct pnfs_commit_ops ff_layout_commit_ops = {
2473 .setup_ds_info = ff_layout_setup_ds_info,
2474 .release_ds_info = ff_layout_release_ds_info,
2475 .mark_request_commit = pnfs_layout_mark_request_commit,
2476 .clear_request_commit = pnfs_generic_clear_request_commit,
2477 .scan_commit_lists = pnfs_generic_scan_commit_lists,
2478 .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
2479 .commit_pagelist = ff_layout_commit_pagelist,
2480 };
2481
2482 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2483 .id = LAYOUT_FLEX_FILES,
2484 .name = "LAYOUT_FLEX_FILES",
2485 .owner = THIS_MODULE,
2486 .flags = PNFS_LAYOUTGET_ON_OPEN,
2487 .max_layoutget_response = 4096,
2488 .set_layoutdriver = ff_layout_set_layoutdriver,
2489 .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
2490 .free_layout_hdr = ff_layout_free_layout_hdr,
2491 .alloc_lseg = ff_layout_alloc_lseg,
2492 .free_lseg = ff_layout_free_lseg,
2493 .add_lseg = ff_layout_add_lseg,
2494 .pg_read_ops = &ff_layout_pg_read_ops,
2495 .pg_write_ops = &ff_layout_pg_write_ops,
2496 .get_ds_info = ff_layout_get_ds_info,
2497 .free_deviceid_node = ff_layout_free_deviceid_node,
2498 .read_pagelist = ff_layout_read_pagelist,
2499 .write_pagelist = ff_layout_write_pagelist,
2500 .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
2501 .prepare_layoutreturn = ff_layout_prepare_layoutreturn,
2502 .sync = pnfs_nfs_generic_sync,
2503 .prepare_layoutstats = ff_layout_prepare_layoutstats,
2504 };
2505
2506 static int __init nfs4flexfilelayout_init(void)
2507 {
2508 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2509 __func__);
2510 return pnfs_register_layoutdriver(&flexfilelayout_type);
2511 }
2512
2513 static void __exit nfs4flexfilelayout_exit(void)
2514 {
2515 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2516 __func__);
2517 pnfs_unregister_layoutdriver(&flexfilelayout_type);
2518 }
2519
2520 MODULE_ALIAS("nfs-layouttype4-4");
2521
2522 MODULE_LICENSE("GPL");
2523 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2524
2525 module_init(nfs4flexfilelayout_init);
2526 module_exit(nfs4flexfilelayout_exit);
2527
2528 module_param(io_maxretrans, ushort, 0644);
2529 MODULE_PARM_DESC(io_maxretrans, "The number of times the NFSv4.1 client "
2530 "retries an I/O request before returning an error. ");