0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/kernel.h>
0012 #include <linux/fs.h>
0013 #include <linux/string.h>
0014 #include <linux/buffer_head.h>
0015 #include <linux/errno.h>
0016 #include "mdt.h"
0017 #include "sufile.h"
0018
0019 #include <trace/events/nilfs2.h>
0020
0021
0022
0023
0024
0025
0026
0027
0028 struct nilfs_sufile_info {
0029 struct nilfs_mdt_info mi;
0030 unsigned long ncleansegs;
0031 __u64 allocmin;
0032 __u64 allocmax;
0033 };
0034
0035 static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
0036 {
0037 return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
0038 }
0039
0040 static inline unsigned long
0041 nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
0042 {
0043 return NILFS_MDT(sufile)->mi_entries_per_block;
0044 }
0045
0046 static unsigned long
0047 nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
0048 {
0049 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
0050
0051 do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
0052 return (unsigned long)t;
0053 }
0054
0055 static unsigned long
0056 nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
0057 {
0058 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
0059
0060 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
0061 }
0062
0063 static unsigned long
0064 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
0065 __u64 max)
0066 {
0067 return min_t(unsigned long,
0068 nilfs_sufile_segment_usages_per_block(sufile) -
0069 nilfs_sufile_get_offset(sufile, curr),
0070 max - curr + 1);
0071 }
0072
0073 static struct nilfs_segment_usage *
0074 nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
0075 struct buffer_head *bh, void *kaddr)
0076 {
0077 return kaddr + bh_offset(bh) +
0078 nilfs_sufile_get_offset(sufile, segnum) *
0079 NILFS_MDT(sufile)->mi_entry_size;
0080 }
0081
0082 static inline int nilfs_sufile_get_header_block(struct inode *sufile,
0083 struct buffer_head **bhp)
0084 {
0085 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
0086 }
0087
0088 static inline int
0089 nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
0090 int create, struct buffer_head **bhp)
0091 {
0092 return nilfs_mdt_get_block(sufile,
0093 nilfs_sufile_get_blkoff(sufile, segnum),
0094 create, NULL, bhp);
0095 }
0096
0097 static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
0098 __u64 segnum)
0099 {
0100 return nilfs_mdt_delete_block(sufile,
0101 nilfs_sufile_get_blkoff(sufile, segnum));
0102 }
0103
0104 static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
0105 u64 ncleanadd, u64 ndirtyadd)
0106 {
0107 struct nilfs_sufile_header *header;
0108 void *kaddr;
0109
0110 kaddr = kmap_atomic(header_bh->b_page);
0111 header = kaddr + bh_offset(header_bh);
0112 le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
0113 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
0114 kunmap_atomic(kaddr);
0115
0116 mark_buffer_dirty(header_bh);
0117 }
0118
0119
0120
0121
0122
0123 unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
0124 {
0125 return NILFS_SUI(sufile)->ncleansegs;
0126 }
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156 int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
0157 int create, size_t *ndone,
0158 void (*dofunc)(struct inode *, __u64,
0159 struct buffer_head *,
0160 struct buffer_head *))
0161 {
0162 struct buffer_head *header_bh, *bh;
0163 unsigned long blkoff, prev_blkoff;
0164 __u64 *seg;
0165 size_t nerr = 0, n = 0;
0166 int ret = 0;
0167
0168 if (unlikely(nsegs == 0))
0169 goto out;
0170
0171 down_write(&NILFS_MDT(sufile)->mi_sem);
0172 for (seg = segnumv; seg < segnumv + nsegs; seg++) {
0173 if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
0174 nilfs_warn(sufile->i_sb,
0175 "%s: invalid segment number: %llu",
0176 __func__, (unsigned long long)*seg);
0177 nerr++;
0178 }
0179 }
0180 if (nerr > 0) {
0181 ret = -EINVAL;
0182 goto out_sem;
0183 }
0184
0185 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
0186 if (ret < 0)
0187 goto out_sem;
0188
0189 seg = segnumv;
0190 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
0191 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
0192 if (ret < 0)
0193 goto out_header;
0194
0195 for (;;) {
0196 dofunc(sufile, *seg, header_bh, bh);
0197
0198 if (++seg >= segnumv + nsegs)
0199 break;
0200 prev_blkoff = blkoff;
0201 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
0202 if (blkoff == prev_blkoff)
0203 continue;
0204
0205
0206 brelse(bh);
0207 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
0208 if (unlikely(ret < 0))
0209 goto out_header;
0210 }
0211 brelse(bh);
0212
0213 out_header:
0214 n = seg - segnumv;
0215 brelse(header_bh);
0216 out_sem:
0217 up_write(&NILFS_MDT(sufile)->mi_sem);
0218 out:
0219 if (ndone)
0220 *ndone = n;
0221 return ret;
0222 }
0223
0224 int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
0225 void (*dofunc)(struct inode *, __u64,
0226 struct buffer_head *,
0227 struct buffer_head *))
0228 {
0229 struct buffer_head *header_bh, *bh;
0230 int ret;
0231
0232 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
0233 nilfs_warn(sufile->i_sb, "%s: invalid segment number: %llu",
0234 __func__, (unsigned long long)segnum);
0235 return -EINVAL;
0236 }
0237 down_write(&NILFS_MDT(sufile)->mi_sem);
0238
0239 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
0240 if (ret < 0)
0241 goto out_sem;
0242
0243 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
0244 if (!ret) {
0245 dofunc(sufile, segnum, header_bh, bh);
0246 brelse(bh);
0247 }
0248 brelse(header_bh);
0249
0250 out_sem:
0251 up_write(&NILFS_MDT(sufile)->mi_sem);
0252 return ret;
0253 }
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266 int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
0267 {
0268 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
0269 __u64 nsegs;
0270 int ret = -ERANGE;
0271
0272 down_write(&NILFS_MDT(sufile)->mi_sem);
0273 nsegs = nilfs_sufile_get_nsegments(sufile);
0274
0275 if (start <= end && end < nsegs) {
0276 sui->allocmin = start;
0277 sui->allocmax = end;
0278 ret = 0;
0279 }
0280 up_write(&NILFS_MDT(sufile)->mi_sem);
0281 return ret;
0282 }
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301 int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
0302 {
0303 struct buffer_head *header_bh, *su_bh;
0304 struct nilfs_sufile_header *header;
0305 struct nilfs_segment_usage *su;
0306 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
0307 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
0308 __u64 segnum, maxsegnum, last_alloc;
0309 void *kaddr;
0310 unsigned long nsegments, nsus, cnt;
0311 int ret, j;
0312
0313 down_write(&NILFS_MDT(sufile)->mi_sem);
0314
0315 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
0316 if (ret < 0)
0317 goto out_sem;
0318 kaddr = kmap_atomic(header_bh->b_page);
0319 header = kaddr + bh_offset(header_bh);
0320 last_alloc = le64_to_cpu(header->sh_last_alloc);
0321 kunmap_atomic(kaddr);
0322
0323 nsegments = nilfs_sufile_get_nsegments(sufile);
0324 maxsegnum = sui->allocmax;
0325 segnum = last_alloc + 1;
0326 if (segnum < sui->allocmin || segnum > sui->allocmax)
0327 segnum = sui->allocmin;
0328
0329 for (cnt = 0; cnt < nsegments; cnt += nsus) {
0330 if (segnum > maxsegnum) {
0331 if (cnt < sui->allocmax - sui->allocmin + 1) {
0332
0333
0334
0335
0336
0337 segnum = sui->allocmin;
0338 maxsegnum = last_alloc;
0339 } else if (segnum > sui->allocmin &&
0340 sui->allocmax + 1 < nsegments) {
0341 segnum = sui->allocmax + 1;
0342 maxsegnum = nsegments - 1;
0343 } else if (sui->allocmin > 0) {
0344 segnum = 0;
0345 maxsegnum = sui->allocmin - 1;
0346 } else {
0347 break;
0348 }
0349 }
0350 trace_nilfs2_segment_usage_check(sufile, segnum, cnt);
0351 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
0352 &su_bh);
0353 if (ret < 0)
0354 goto out_header;
0355 kaddr = kmap_atomic(su_bh->b_page);
0356 su = nilfs_sufile_block_get_segment_usage(
0357 sufile, segnum, su_bh, kaddr);
0358
0359 nsus = nilfs_sufile_segment_usages_in_block(
0360 sufile, segnum, maxsegnum);
0361 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
0362 if (!nilfs_segment_usage_clean(su))
0363 continue;
0364
0365 nilfs_segment_usage_set_dirty(su);
0366 kunmap_atomic(kaddr);
0367
0368 kaddr = kmap_atomic(header_bh->b_page);
0369 header = kaddr + bh_offset(header_bh);
0370 le64_add_cpu(&header->sh_ncleansegs, -1);
0371 le64_add_cpu(&header->sh_ndirtysegs, 1);
0372 header->sh_last_alloc = cpu_to_le64(segnum);
0373 kunmap_atomic(kaddr);
0374
0375 sui->ncleansegs--;
0376 mark_buffer_dirty(header_bh);
0377 mark_buffer_dirty(su_bh);
0378 nilfs_mdt_mark_dirty(sufile);
0379 brelse(su_bh);
0380 *segnump = segnum;
0381
0382 trace_nilfs2_segment_usage_allocated(sufile, segnum);
0383
0384 goto out_header;
0385 }
0386
0387 kunmap_atomic(kaddr);
0388 brelse(su_bh);
0389 }
0390
0391
0392 ret = -ENOSPC;
0393
0394 out_header:
0395 brelse(header_bh);
0396
0397 out_sem:
0398 up_write(&NILFS_MDT(sufile)->mi_sem);
0399 return ret;
0400 }
0401
0402 void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
0403 struct buffer_head *header_bh,
0404 struct buffer_head *su_bh)
0405 {
0406 struct nilfs_segment_usage *su;
0407 void *kaddr;
0408
0409 kaddr = kmap_atomic(su_bh->b_page);
0410 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
0411 if (unlikely(!nilfs_segment_usage_clean(su))) {
0412 nilfs_warn(sufile->i_sb, "%s: segment %llu must be clean",
0413 __func__, (unsigned long long)segnum);
0414 kunmap_atomic(kaddr);
0415 return;
0416 }
0417 nilfs_segment_usage_set_dirty(su);
0418 kunmap_atomic(kaddr);
0419
0420 nilfs_sufile_mod_counter(header_bh, -1, 1);
0421 NILFS_SUI(sufile)->ncleansegs--;
0422
0423 mark_buffer_dirty(su_bh);
0424 nilfs_mdt_mark_dirty(sufile);
0425 }
0426
0427 void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
0428 struct buffer_head *header_bh,
0429 struct buffer_head *su_bh)
0430 {
0431 struct nilfs_segment_usage *su;
0432 void *kaddr;
0433 int clean, dirty;
0434
0435 kaddr = kmap_atomic(su_bh->b_page);
0436 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
0437 if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) &&
0438 su->su_nblocks == cpu_to_le32(0)) {
0439 kunmap_atomic(kaddr);
0440 return;
0441 }
0442 clean = nilfs_segment_usage_clean(su);
0443 dirty = nilfs_segment_usage_dirty(su);
0444
0445
0446 su->su_lastmod = cpu_to_le64(0);
0447 su->su_nblocks = cpu_to_le32(0);
0448 su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY));
0449 kunmap_atomic(kaddr);
0450
0451 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
0452 NILFS_SUI(sufile)->ncleansegs -= clean;
0453
0454 mark_buffer_dirty(su_bh);
0455 nilfs_mdt_mark_dirty(sufile);
0456 }
0457
0458 void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
0459 struct buffer_head *header_bh,
0460 struct buffer_head *su_bh)
0461 {
0462 struct nilfs_segment_usage *su;
0463 void *kaddr;
0464 int sudirty;
0465
0466 kaddr = kmap_atomic(su_bh->b_page);
0467 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
0468 if (nilfs_segment_usage_clean(su)) {
0469 nilfs_warn(sufile->i_sb, "%s: segment %llu is already clean",
0470 __func__, (unsigned long long)segnum);
0471 kunmap_atomic(kaddr);
0472 return;
0473 }
0474 WARN_ON(nilfs_segment_usage_error(su));
0475 WARN_ON(!nilfs_segment_usage_dirty(su));
0476
0477 sudirty = nilfs_segment_usage_dirty(su);
0478 nilfs_segment_usage_set_clean(su);
0479 kunmap_atomic(kaddr);
0480 mark_buffer_dirty(su_bh);
0481
0482 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
0483 NILFS_SUI(sufile)->ncleansegs++;
0484
0485 nilfs_mdt_mark_dirty(sufile);
0486
0487 trace_nilfs2_segment_usage_freed(sufile, segnum);
0488 }
0489
0490
0491
0492
0493
0494
0495 int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
0496 {
0497 struct buffer_head *bh;
0498 int ret;
0499
0500 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
0501 if (!ret) {
0502 mark_buffer_dirty(bh);
0503 nilfs_mdt_mark_dirty(sufile);
0504 brelse(bh);
0505 }
0506 return ret;
0507 }
0508
0509
0510
0511
0512
0513
0514
0515
0516 int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
0517 unsigned long nblocks, time64_t modtime)
0518 {
0519 struct buffer_head *bh;
0520 struct nilfs_segment_usage *su;
0521 void *kaddr;
0522 int ret;
0523
0524 down_write(&NILFS_MDT(sufile)->mi_sem);
0525 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
0526 if (ret < 0)
0527 goto out_sem;
0528
0529 kaddr = kmap_atomic(bh->b_page);
0530 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
0531 WARN_ON(nilfs_segment_usage_error(su));
0532 if (modtime)
0533 su->su_lastmod = cpu_to_le64(modtime);
0534 su->su_nblocks = cpu_to_le32(nblocks);
0535 kunmap_atomic(kaddr);
0536
0537 mark_buffer_dirty(bh);
0538 nilfs_mdt_mark_dirty(sufile);
0539 brelse(bh);
0540
0541 out_sem:
0542 up_write(&NILFS_MDT(sufile)->mi_sem);
0543 return ret;
0544 }
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562 int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
0563 {
0564 struct buffer_head *header_bh;
0565 struct nilfs_sufile_header *header;
0566 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
0567 void *kaddr;
0568 int ret;
0569
0570 down_read(&NILFS_MDT(sufile)->mi_sem);
0571
0572 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
0573 if (ret < 0)
0574 goto out_sem;
0575
0576 kaddr = kmap_atomic(header_bh->b_page);
0577 header = kaddr + bh_offset(header_bh);
0578 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
0579 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
0580 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
0581 sustat->ss_ctime = nilfs->ns_ctime;
0582 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
0583 spin_lock(&nilfs->ns_last_segment_lock);
0584 sustat->ss_prot_seq = nilfs->ns_prot_seq;
0585 spin_unlock(&nilfs->ns_last_segment_lock);
0586 kunmap_atomic(kaddr);
0587 brelse(header_bh);
0588
0589 out_sem:
0590 up_read(&NILFS_MDT(sufile)->mi_sem);
0591 return ret;
0592 }
0593
0594 void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
0595 struct buffer_head *header_bh,
0596 struct buffer_head *su_bh)
0597 {
0598 struct nilfs_segment_usage *su;
0599 void *kaddr;
0600 int suclean;
0601
0602 kaddr = kmap_atomic(su_bh->b_page);
0603 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
0604 if (nilfs_segment_usage_error(su)) {
0605 kunmap_atomic(kaddr);
0606 return;
0607 }
0608 suclean = nilfs_segment_usage_clean(su);
0609 nilfs_segment_usage_set_error(su);
0610 kunmap_atomic(kaddr);
0611
0612 if (suclean) {
0613 nilfs_sufile_mod_counter(header_bh, -1, 0);
0614 NILFS_SUI(sufile)->ncleansegs--;
0615 }
0616 mark_buffer_dirty(su_bh);
0617 nilfs_mdt_mark_dirty(sufile);
0618 }
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637 static int nilfs_sufile_truncate_range(struct inode *sufile,
0638 __u64 start, __u64 end)
0639 {
0640 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
0641 struct buffer_head *header_bh;
0642 struct buffer_head *su_bh;
0643 struct nilfs_segment_usage *su, *su2;
0644 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
0645 unsigned long segusages_per_block;
0646 unsigned long nsegs, ncleaned;
0647 __u64 segnum;
0648 void *kaddr;
0649 ssize_t n, nc;
0650 int ret;
0651 int j;
0652
0653 nsegs = nilfs_sufile_get_nsegments(sufile);
0654
0655 ret = -EINVAL;
0656 if (start > end || start >= nsegs)
0657 goto out;
0658
0659 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
0660 if (ret < 0)
0661 goto out;
0662
0663 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
0664 ncleaned = 0;
0665
0666 for (segnum = start; segnum <= end; segnum += n) {
0667 n = min_t(unsigned long,
0668 segusages_per_block -
0669 nilfs_sufile_get_offset(sufile, segnum),
0670 end - segnum + 1);
0671 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
0672 &su_bh);
0673 if (ret < 0) {
0674 if (ret != -ENOENT)
0675 goto out_header;
0676
0677 continue;
0678 }
0679 kaddr = kmap_atomic(su_bh->b_page);
0680 su = nilfs_sufile_block_get_segment_usage(
0681 sufile, segnum, su_bh, kaddr);
0682 su2 = su;
0683 for (j = 0; j < n; j++, su = (void *)su + susz) {
0684 if ((le32_to_cpu(su->su_flags) &
0685 ~BIT(NILFS_SEGMENT_USAGE_ERROR)) ||
0686 nilfs_segment_is_active(nilfs, segnum + j)) {
0687 ret = -EBUSY;
0688 kunmap_atomic(kaddr);
0689 brelse(su_bh);
0690 goto out_header;
0691 }
0692 }
0693 nc = 0;
0694 for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
0695 if (nilfs_segment_usage_error(su)) {
0696 nilfs_segment_usage_set_clean(su);
0697 nc++;
0698 }
0699 }
0700 kunmap_atomic(kaddr);
0701 if (nc > 0) {
0702 mark_buffer_dirty(su_bh);
0703 ncleaned += nc;
0704 }
0705 brelse(su_bh);
0706
0707 if (n == segusages_per_block) {
0708
0709 nilfs_sufile_delete_segment_usage_block(sufile, segnum);
0710 }
0711 }
0712 ret = 0;
0713
0714 out_header:
0715 if (ncleaned > 0) {
0716 NILFS_SUI(sufile)->ncleansegs += ncleaned;
0717 nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
0718 nilfs_mdt_mark_dirty(sufile);
0719 }
0720 brelse(header_bh);
0721 out:
0722 return ret;
0723 }
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741 int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
0742 {
0743 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
0744 struct buffer_head *header_bh;
0745 struct nilfs_sufile_header *header;
0746 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
0747 void *kaddr;
0748 unsigned long nsegs, nrsvsegs;
0749 int ret = 0;
0750
0751 down_write(&NILFS_MDT(sufile)->mi_sem);
0752
0753 nsegs = nilfs_sufile_get_nsegments(sufile);
0754 if (nsegs == newnsegs)
0755 goto out;
0756
0757 ret = -ENOSPC;
0758 nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
0759 if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
0760 goto out;
0761
0762 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
0763 if (ret < 0)
0764 goto out;
0765
0766 if (newnsegs > nsegs) {
0767 sui->ncleansegs += newnsegs - nsegs;
0768 } else {
0769 ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
0770 if (ret < 0)
0771 goto out_header;
0772
0773 sui->ncleansegs -= nsegs - newnsegs;
0774 }
0775
0776 kaddr = kmap_atomic(header_bh->b_page);
0777 header = kaddr + bh_offset(header_bh);
0778 header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
0779 kunmap_atomic(kaddr);
0780
0781 mark_buffer_dirty(header_bh);
0782 nilfs_mdt_mark_dirty(sufile);
0783 nilfs_set_nsegments(nilfs, newnsegs);
0784
0785 out_header:
0786 brelse(header_bh);
0787 out:
0788 up_write(&NILFS_MDT(sufile)->mi_sem);
0789 return ret;
0790 }
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809 ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
0810 unsigned int sisz, size_t nsi)
0811 {
0812 struct buffer_head *su_bh;
0813 struct nilfs_segment_usage *su;
0814 struct nilfs_suinfo *si = buf;
0815 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
0816 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
0817 void *kaddr;
0818 unsigned long nsegs, segusages_per_block;
0819 ssize_t n;
0820 int ret, i, j;
0821
0822 down_read(&NILFS_MDT(sufile)->mi_sem);
0823
0824 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
0825 nsegs = min_t(unsigned long,
0826 nilfs_sufile_get_nsegments(sufile) - segnum,
0827 nsi);
0828 for (i = 0; i < nsegs; i += n, segnum += n) {
0829 n = min_t(unsigned long,
0830 segusages_per_block -
0831 nilfs_sufile_get_offset(sufile, segnum),
0832 nsegs - i);
0833 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
0834 &su_bh);
0835 if (ret < 0) {
0836 if (ret != -ENOENT)
0837 goto out;
0838
0839 memset(si, 0, sisz * n);
0840 si = (void *)si + sisz * n;
0841 continue;
0842 }
0843
0844 kaddr = kmap_atomic(su_bh->b_page);
0845 su = nilfs_sufile_block_get_segment_usage(
0846 sufile, segnum, su_bh, kaddr);
0847 for (j = 0; j < n;
0848 j++, su = (void *)su + susz, si = (void *)si + sisz) {
0849 si->sui_lastmod = le64_to_cpu(su->su_lastmod);
0850 si->sui_nblocks = le32_to_cpu(su->su_nblocks);
0851 si->sui_flags = le32_to_cpu(su->su_flags) &
0852 ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
0853 if (nilfs_segment_is_active(nilfs, segnum + j))
0854 si->sui_flags |=
0855 BIT(NILFS_SEGMENT_USAGE_ACTIVE);
0856 }
0857 kunmap_atomic(kaddr);
0858 brelse(su_bh);
0859 }
0860 ret = nsegs;
0861
0862 out:
0863 up_read(&NILFS_MDT(sufile)->mi_sem);
0864 return ret;
0865 }
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887 ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
0888 unsigned int supsz, size_t nsup)
0889 {
0890 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
0891 struct buffer_head *header_bh, *bh;
0892 struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup;
0893 struct nilfs_segment_usage *su;
0894 void *kaddr;
0895 unsigned long blkoff, prev_blkoff;
0896 int cleansi, cleansu, dirtysi, dirtysu;
0897 long ncleaned = 0, ndirtied = 0;
0898 int ret = 0;
0899
0900 if (unlikely(nsup == 0))
0901 return ret;
0902
0903 for (sup = buf; sup < supend; sup = (void *)sup + supsz) {
0904 if (sup->sup_segnum >= nilfs->ns_nsegments
0905 || (sup->sup_flags &
0906 (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS))
0907 || (nilfs_suinfo_update_nblocks(sup) &&
0908 sup->sup_sui.sui_nblocks >
0909 nilfs->ns_blocks_per_segment))
0910 return -EINVAL;
0911 }
0912
0913 down_write(&NILFS_MDT(sufile)->mi_sem);
0914
0915 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
0916 if (ret < 0)
0917 goto out_sem;
0918
0919 sup = buf;
0920 blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
0921 ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
0922 if (ret < 0)
0923 goto out_header;
0924
0925 for (;;) {
0926 kaddr = kmap_atomic(bh->b_page);
0927 su = nilfs_sufile_block_get_segment_usage(
0928 sufile, sup->sup_segnum, bh, kaddr);
0929
0930 if (nilfs_suinfo_update_lastmod(sup))
0931 su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod);
0932
0933 if (nilfs_suinfo_update_nblocks(sup))
0934 su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks);
0935
0936 if (nilfs_suinfo_update_flags(sup)) {
0937
0938
0939
0940
0941
0942 sup->sup_sui.sui_flags &=
0943 ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
0944
0945 cleansi = nilfs_suinfo_clean(&sup->sup_sui);
0946 cleansu = nilfs_segment_usage_clean(su);
0947 dirtysi = nilfs_suinfo_dirty(&sup->sup_sui);
0948 dirtysu = nilfs_segment_usage_dirty(su);
0949
0950 if (cleansi && !cleansu)
0951 ++ncleaned;
0952 else if (!cleansi && cleansu)
0953 --ncleaned;
0954
0955 if (dirtysi && !dirtysu)
0956 ++ndirtied;
0957 else if (!dirtysi && dirtysu)
0958 --ndirtied;
0959
0960 su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags);
0961 }
0962
0963 kunmap_atomic(kaddr);
0964
0965 sup = (void *)sup + supsz;
0966 if (sup >= supend)
0967 break;
0968
0969 prev_blkoff = blkoff;
0970 blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
0971 if (blkoff == prev_blkoff)
0972 continue;
0973
0974
0975 mark_buffer_dirty(bh);
0976 put_bh(bh);
0977 ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
0978 if (unlikely(ret < 0))
0979 goto out_mark;
0980 }
0981 mark_buffer_dirty(bh);
0982 put_bh(bh);
0983
0984 out_mark:
0985 if (ncleaned || ndirtied) {
0986 nilfs_sufile_mod_counter(header_bh, (u64)ncleaned,
0987 (u64)ndirtied);
0988 NILFS_SUI(sufile)->ncleansegs += ncleaned;
0989 }
0990 nilfs_mdt_mark_dirty(sufile);
0991 out_header:
0992 put_bh(header_bh);
0993 out_sem:
0994 up_write(&NILFS_MDT(sufile)->mi_sem);
0995 return ret;
0996 }
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014 int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
1015 {
1016 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
1017 struct buffer_head *su_bh;
1018 struct nilfs_segment_usage *su;
1019 void *kaddr;
1020 size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size;
1021 sector_t seg_start, seg_end, start_block, end_block;
1022 sector_t start = 0, nblocks = 0;
1023 u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0;
1024 int ret = 0;
1025 unsigned int sects_per_block;
1026
1027 sects_per_block = (1 << nilfs->ns_blocksize_bits) /
1028 bdev_logical_block_size(nilfs->ns_bdev);
1029 len = range->len >> nilfs->ns_blocksize_bits;
1030 minlen = range->minlen >> nilfs->ns_blocksize_bits;
1031 max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment);
1032
1033 if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits)
1034 return -EINVAL;
1035
1036 start_block = (range->start + nilfs->ns_blocksize - 1) >>
1037 nilfs->ns_blocksize_bits;
1038
1039
1040
1041
1042
1043
1044 if (max_blocks - start_block < len)
1045 end_block = max_blocks - 1;
1046 else
1047 end_block = start_block + len - 1;
1048
1049 segnum = nilfs_get_segnum_of_block(nilfs, start_block);
1050 segnum_end = nilfs_get_segnum_of_block(nilfs, end_block);
1051
1052 down_read(&NILFS_MDT(sufile)->mi_sem);
1053
1054 while (segnum <= segnum_end) {
1055 n = nilfs_sufile_segment_usages_in_block(sufile, segnum,
1056 segnum_end);
1057
1058 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
1059 &su_bh);
1060 if (ret < 0) {
1061 if (ret != -ENOENT)
1062 goto out_sem;
1063
1064 segnum += n;
1065 continue;
1066 }
1067
1068 kaddr = kmap_atomic(su_bh->b_page);
1069 su = nilfs_sufile_block_get_segment_usage(sufile, segnum,
1070 su_bh, kaddr);
1071 for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) {
1072 if (!nilfs_segment_usage_clean(su))
1073 continue;
1074
1075 nilfs_get_segment_range(nilfs, segnum, &seg_start,
1076 &seg_end);
1077
1078 if (!nblocks) {
1079
1080 start = seg_start;
1081 nblocks = seg_end - seg_start + 1;
1082 continue;
1083 }
1084
1085 if (start + nblocks == seg_start) {
1086
1087 nblocks += seg_end - seg_start + 1;
1088 continue;
1089 }
1090
1091
1092 if (start < start_block) {
1093 nblocks -= start_block - start;
1094 start = start_block;
1095 }
1096
1097 if (nblocks >= minlen) {
1098 kunmap_atomic(kaddr);
1099
1100 ret = blkdev_issue_discard(nilfs->ns_bdev,
1101 start * sects_per_block,
1102 nblocks * sects_per_block,
1103 GFP_NOFS);
1104 if (ret < 0) {
1105 put_bh(su_bh);
1106 goto out_sem;
1107 }
1108
1109 ndiscarded += nblocks;
1110 kaddr = kmap_atomic(su_bh->b_page);
1111 su = nilfs_sufile_block_get_segment_usage(
1112 sufile, segnum, su_bh, kaddr);
1113 }
1114
1115
1116 start = seg_start;
1117 nblocks = seg_end - seg_start + 1;
1118 }
1119 kunmap_atomic(kaddr);
1120 put_bh(su_bh);
1121 }
1122
1123
1124 if (nblocks) {
1125
1126 if (start < start_block) {
1127 nblocks -= start_block - start;
1128 start = start_block;
1129 }
1130 if (start + nblocks > end_block + 1)
1131 nblocks = end_block - start + 1;
1132
1133 if (nblocks >= minlen) {
1134 ret = blkdev_issue_discard(nilfs->ns_bdev,
1135 start * sects_per_block,
1136 nblocks * sects_per_block,
1137 GFP_NOFS);
1138 if (!ret)
1139 ndiscarded += nblocks;
1140 }
1141 }
1142
1143 out_sem:
1144 up_read(&NILFS_MDT(sufile)->mi_sem);
1145
1146 range->len = ndiscarded << nilfs->ns_blocksize_bits;
1147 return ret;
1148 }
1149
1150
1151
1152
1153
1154
1155
1156
1157 int nilfs_sufile_read(struct super_block *sb, size_t susize,
1158 struct nilfs_inode *raw_inode, struct inode **inodep)
1159 {
1160 struct inode *sufile;
1161 struct nilfs_sufile_info *sui;
1162 struct buffer_head *header_bh;
1163 struct nilfs_sufile_header *header;
1164 void *kaddr;
1165 int err;
1166
1167 if (susize > sb->s_blocksize) {
1168 nilfs_err(sb, "too large segment usage size: %zu bytes",
1169 susize);
1170 return -EINVAL;
1171 } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) {
1172 nilfs_err(sb, "too small segment usage size: %zu bytes",
1173 susize);
1174 return -EINVAL;
1175 }
1176
1177 sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
1178 if (unlikely(!sufile))
1179 return -ENOMEM;
1180 if (!(sufile->i_state & I_NEW))
1181 goto out;
1182
1183 err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
1184 if (err)
1185 goto failed;
1186
1187 nilfs_mdt_set_entry_size(sufile, susize,
1188 sizeof(struct nilfs_sufile_header));
1189
1190 err = nilfs_read_inode_common(sufile, raw_inode);
1191 if (err)
1192 goto failed;
1193
1194 err = nilfs_sufile_get_header_block(sufile, &header_bh);
1195 if (err)
1196 goto failed;
1197
1198 sui = NILFS_SUI(sufile);
1199 kaddr = kmap_atomic(header_bh->b_page);
1200 header = kaddr + bh_offset(header_bh);
1201 sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
1202 kunmap_atomic(kaddr);
1203 brelse(header_bh);
1204
1205 sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
1206 sui->allocmin = 0;
1207
1208 unlock_new_inode(sufile);
1209 out:
1210 *inodep = sufile;
1211 return 0;
1212 failed:
1213 iget_failed(sufile);
1214 return err;
1215 }