0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/types.h>
0012 #include <linux/buffer_head.h>
0013 #include <linux/fs.h>
0014 #include <linux/bitops.h>
0015 #include <linux/slab.h>
0016 #include "mdt.h"
0017 #include "alloc.h"
0018
0019
0020
0021
0022
0023
0024
0025 static inline unsigned long
0026 nilfs_palloc_groups_per_desc_block(const struct inode *inode)
0027 {
0028 return i_blocksize(inode) /
0029 sizeof(struct nilfs_palloc_group_desc);
0030 }
0031
0032
0033
0034
0035
0036 static inline unsigned long
0037 nilfs_palloc_groups_count(const struct inode *inode)
0038 {
0039 return 1UL << (BITS_PER_LONG - (inode->i_blkbits + 3 ));
0040 }
0041
0042
0043
0044
0045
0046
0047 int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned int entry_size)
0048 {
0049 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
0050
0051 mi->mi_bgl = kmalloc(sizeof(*mi->mi_bgl), GFP_NOFS);
0052 if (!mi->mi_bgl)
0053 return -ENOMEM;
0054
0055 bgl_lock_init(mi->mi_bgl);
0056
0057 nilfs_mdt_set_entry_size(inode, entry_size, 0);
0058
0059 mi->mi_blocks_per_group =
0060 DIV_ROUND_UP(nilfs_palloc_entries_per_group(inode),
0061 mi->mi_entries_per_block) + 1;
0062
0063
0064
0065
0066 mi->mi_blocks_per_desc_block =
0067 nilfs_palloc_groups_per_desc_block(inode) *
0068 mi->mi_blocks_per_group + 1;
0069
0070
0071
0072
0073 return 0;
0074 }
0075
0076
0077
0078
0079
0080
0081
0082 static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr,
0083 unsigned long *offset)
0084 {
0085 __u64 group = nr;
0086
0087 *offset = do_div(group, nilfs_palloc_entries_per_group(inode));
0088 return group;
0089 }
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 static unsigned long
0100 nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group)
0101 {
0102 unsigned long desc_block =
0103 group / nilfs_palloc_groups_per_desc_block(inode);
0104 return desc_block * NILFS_MDT(inode)->mi_blocks_per_desc_block;
0105 }
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115 static unsigned long
0116 nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group)
0117 {
0118 unsigned long desc_offset =
0119 group % nilfs_palloc_groups_per_desc_block(inode);
0120 return nilfs_palloc_desc_blkoff(inode, group) + 1 +
0121 desc_offset * NILFS_MDT(inode)->mi_blocks_per_group;
0122 }
0123
0124
0125
0126
0127
0128
0129 static unsigned long
0130 nilfs_palloc_group_desc_nfrees(const struct nilfs_palloc_group_desc *desc,
0131 spinlock_t *lock)
0132 {
0133 unsigned long nfree;
0134
0135 spin_lock(lock);
0136 nfree = le32_to_cpu(desc->pg_nfrees);
0137 spin_unlock(lock);
0138 return nfree;
0139 }
0140
0141
0142
0143
0144
0145
0146
0147 static u32
0148 nilfs_palloc_group_desc_add_entries(struct nilfs_palloc_group_desc *desc,
0149 spinlock_t *lock, u32 n)
0150 {
0151 u32 nfree;
0152
0153 spin_lock(lock);
0154 le32_add_cpu(&desc->pg_nfrees, n);
0155 nfree = le32_to_cpu(desc->pg_nfrees);
0156 spin_unlock(lock);
0157 return nfree;
0158 }
0159
0160
0161
0162
0163
0164
0165 static unsigned long
0166 nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr)
0167 {
0168 unsigned long group, group_offset;
0169
0170 group = nilfs_palloc_group(inode, nr, &group_offset);
0171
0172 return nilfs_palloc_bitmap_blkoff(inode, group) + 1 +
0173 group_offset / NILFS_MDT(inode)->mi_entries_per_block;
0174 }
0175
0176
0177
0178
0179
0180
0181
0182 static void nilfs_palloc_desc_block_init(struct inode *inode,
0183 struct buffer_head *bh, void *kaddr)
0184 {
0185 struct nilfs_palloc_group_desc *desc = kaddr + bh_offset(bh);
0186 unsigned long n = nilfs_palloc_groups_per_desc_block(inode);
0187 __le32 nfrees;
0188
0189 nfrees = cpu_to_le32(nilfs_palloc_entries_per_group(inode));
0190 while (n-- > 0) {
0191 desc->pg_nfrees = nfrees;
0192 desc++;
0193 }
0194 }
0195
0196 static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
0197 int create,
0198 void (*init_block)(struct inode *,
0199 struct buffer_head *,
0200 void *),
0201 struct buffer_head **bhp,
0202 struct nilfs_bh_assoc *prev,
0203 spinlock_t *lock)
0204 {
0205 int ret;
0206
0207 spin_lock(lock);
0208 if (prev->bh && blkoff == prev->blkoff) {
0209 get_bh(prev->bh);
0210 *bhp = prev->bh;
0211 spin_unlock(lock);
0212 return 0;
0213 }
0214 spin_unlock(lock);
0215
0216 ret = nilfs_mdt_get_block(inode, blkoff, create, init_block, bhp);
0217 if (!ret) {
0218 spin_lock(lock);
0219
0220
0221
0222
0223 brelse(prev->bh);
0224 get_bh(*bhp);
0225 prev->bh = *bhp;
0226 prev->blkoff = blkoff;
0227 spin_unlock(lock);
0228 }
0229 return ret;
0230 }
0231
0232
0233
0234
0235
0236
0237
0238
0239 static int nilfs_palloc_delete_block(struct inode *inode, unsigned long blkoff,
0240 struct nilfs_bh_assoc *prev,
0241 spinlock_t *lock)
0242 {
0243 spin_lock(lock);
0244 if (prev->bh && blkoff == prev->blkoff) {
0245 brelse(prev->bh);
0246 prev->bh = NULL;
0247 }
0248 spin_unlock(lock);
0249 return nilfs_mdt_delete_block(inode, blkoff);
0250 }
0251
0252
0253
0254
0255
0256
0257
0258
0259 static int nilfs_palloc_get_desc_block(struct inode *inode,
0260 unsigned long group,
0261 int create, struct buffer_head **bhp)
0262 {
0263 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
0264
0265 return nilfs_palloc_get_block(inode,
0266 nilfs_palloc_desc_blkoff(inode, group),
0267 create, nilfs_palloc_desc_block_init,
0268 bhp, &cache->prev_desc, &cache->lock);
0269 }
0270
0271
0272
0273
0274
0275
0276
0277
0278 static int nilfs_palloc_get_bitmap_block(struct inode *inode,
0279 unsigned long group,
0280 int create, struct buffer_head **bhp)
0281 {
0282 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
0283
0284 return nilfs_palloc_get_block(inode,
0285 nilfs_palloc_bitmap_blkoff(inode, group),
0286 create, NULL, bhp,
0287 &cache->prev_bitmap, &cache->lock);
0288 }
0289
0290
0291
0292
0293
0294
0295 static int nilfs_palloc_delete_bitmap_block(struct inode *inode,
0296 unsigned long group)
0297 {
0298 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
0299
0300 return nilfs_palloc_delete_block(inode,
0301 nilfs_palloc_bitmap_blkoff(inode,
0302 group),
0303 &cache->prev_bitmap, &cache->lock);
0304 }
0305
0306
0307
0308
0309
0310
0311
0312
0313 int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
0314 int create, struct buffer_head **bhp)
0315 {
0316 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
0317
0318 return nilfs_palloc_get_block(inode,
0319 nilfs_palloc_entry_blkoff(inode, nr),
0320 create, NULL, bhp,
0321 &cache->prev_entry, &cache->lock);
0322 }
0323
0324
0325
0326
0327
0328
0329 static int nilfs_palloc_delete_entry_block(struct inode *inode, __u64 nr)
0330 {
0331 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
0332
0333 return nilfs_palloc_delete_block(inode,
0334 nilfs_palloc_entry_blkoff(inode, nr),
0335 &cache->prev_entry, &cache->lock);
0336 }
0337
0338
0339
0340
0341
0342
0343
0344
0345 static struct nilfs_palloc_group_desc *
0346 nilfs_palloc_block_get_group_desc(const struct inode *inode,
0347 unsigned long group,
0348 const struct buffer_head *bh, void *kaddr)
0349 {
0350 return (struct nilfs_palloc_group_desc *)(kaddr + bh_offset(bh)) +
0351 group % nilfs_palloc_groups_per_desc_block(inode);
0352 }
0353
0354
0355
0356
0357
0358
0359
0360
0361 void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
0362 const struct buffer_head *bh, void *kaddr)
0363 {
0364 unsigned long entry_offset, group_offset;
0365
0366 nilfs_palloc_group(inode, nr, &group_offset);
0367 entry_offset = group_offset % NILFS_MDT(inode)->mi_entries_per_block;
0368
0369 return kaddr + bh_offset(bh) +
0370 entry_offset * NILFS_MDT(inode)->mi_entry_size;
0371 }
0372
0373
0374
0375
0376
0377
0378
0379
0380 static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
0381 unsigned long target,
0382 unsigned int bsize,
0383 spinlock_t *lock)
0384 {
0385 int pos, end = bsize;
0386
0387 if (likely(target < bsize)) {
0388 pos = target;
0389 do {
0390 pos = nilfs_find_next_zero_bit(bitmap, end, pos);
0391 if (pos >= end)
0392 break;
0393 if (!nilfs_set_bit_atomic(lock, pos, bitmap))
0394 return pos;
0395 } while (++pos < end);
0396
0397 end = target;
0398 }
0399
0400
0401 for (pos = 0; pos < end; pos++) {
0402 pos = nilfs_find_next_zero_bit(bitmap, end, pos);
0403 if (pos >= end)
0404 break;
0405 if (!nilfs_set_bit_atomic(lock, pos, bitmap))
0406 return pos;
0407 }
0408
0409 return -ENOSPC;
0410 }
0411
0412
0413
0414
0415
0416
0417
0418
0419 static unsigned long
0420 nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode,
0421 unsigned long curr, unsigned long max)
0422 {
0423 return min_t(unsigned long,
0424 nilfs_palloc_groups_per_desc_block(inode) -
0425 curr % nilfs_palloc_groups_per_desc_block(inode),
0426 max - curr + 1);
0427 }
0428
0429
0430
0431
0432
0433
0434 static int nilfs_palloc_count_desc_blocks(struct inode *inode,
0435 unsigned long *desc_blocks)
0436 {
0437 __u64 blknum;
0438 int ret;
0439
0440 ret = nilfs_bmap_last_key(NILFS_I(inode)->i_bmap, &blknum);
0441 if (likely(!ret))
0442 *desc_blocks = DIV_ROUND_UP(
0443 (unsigned long)blknum,
0444 NILFS_MDT(inode)->mi_blocks_per_desc_block);
0445 return ret;
0446 }
0447
0448
0449
0450
0451
0452
0453
0454 static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode,
0455 unsigned long desc_blocks)
0456 {
0457 return (nilfs_palloc_groups_per_desc_block(inode) * desc_blocks) <
0458 nilfs_palloc_groups_count(inode);
0459 }
0460
0461
0462
0463
0464
0465
0466
0467
0468 int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp)
0469 {
0470 unsigned long desc_blocks = 0;
0471 u64 entries_per_desc_block, nmax;
0472 int err;
0473
0474 err = nilfs_palloc_count_desc_blocks(inode, &desc_blocks);
0475 if (unlikely(err))
0476 return err;
0477
0478 entries_per_desc_block = (u64)nilfs_palloc_entries_per_group(inode) *
0479 nilfs_palloc_groups_per_desc_block(inode);
0480 nmax = entries_per_desc_block * desc_blocks;
0481
0482 if (nused == nmax &&
0483 nilfs_palloc_mdt_file_can_grow(inode, desc_blocks))
0484 nmax += entries_per_desc_block;
0485
0486 if (nused > nmax)
0487 return -ERANGE;
0488
0489 *nmaxp = nmax;
0490 return 0;
0491 }
0492
0493
0494
0495
0496
0497
0498 int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
0499 struct nilfs_palloc_req *req)
0500 {
0501 struct buffer_head *desc_bh, *bitmap_bh;
0502 struct nilfs_palloc_group_desc *desc;
0503 unsigned char *bitmap;
0504 void *desc_kaddr, *bitmap_kaddr;
0505 unsigned long group, maxgroup, ngroups;
0506 unsigned long group_offset, maxgroup_offset;
0507 unsigned long n, entries_per_group;
0508 unsigned long i, j;
0509 spinlock_t *lock;
0510 int pos, ret;
0511
0512 ngroups = nilfs_palloc_groups_count(inode);
0513 maxgroup = ngroups - 1;
0514 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
0515 entries_per_group = nilfs_palloc_entries_per_group(inode);
0516
0517 for (i = 0; i < ngroups; i += n) {
0518 if (group >= ngroups) {
0519
0520 group = 0;
0521 maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr,
0522 &maxgroup_offset) - 1;
0523 }
0524 ret = nilfs_palloc_get_desc_block(inode, group, 1, &desc_bh);
0525 if (ret < 0)
0526 return ret;
0527 desc_kaddr = kmap(desc_bh->b_page);
0528 desc = nilfs_palloc_block_get_group_desc(
0529 inode, group, desc_bh, desc_kaddr);
0530 n = nilfs_palloc_rest_groups_in_desc_block(inode, group,
0531 maxgroup);
0532 for (j = 0; j < n; j++, desc++, group++) {
0533 lock = nilfs_mdt_bgl_lock(inode, group);
0534 if (nilfs_palloc_group_desc_nfrees(desc, lock) > 0) {
0535 ret = nilfs_palloc_get_bitmap_block(
0536 inode, group, 1, &bitmap_bh);
0537 if (ret < 0)
0538 goto out_desc;
0539 bitmap_kaddr = kmap(bitmap_bh->b_page);
0540 bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
0541 pos = nilfs_palloc_find_available_slot(
0542 bitmap, group_offset,
0543 entries_per_group, lock);
0544 if (pos >= 0) {
0545
0546 nilfs_palloc_group_desc_add_entries(
0547 desc, lock, -1);
0548 req->pr_entry_nr =
0549 entries_per_group * group + pos;
0550 kunmap(desc_bh->b_page);
0551 kunmap(bitmap_bh->b_page);
0552
0553 req->pr_desc_bh = desc_bh;
0554 req->pr_bitmap_bh = bitmap_bh;
0555 return 0;
0556 }
0557 kunmap(bitmap_bh->b_page);
0558 brelse(bitmap_bh);
0559 }
0560
0561 group_offset = 0;
0562 }
0563
0564 kunmap(desc_bh->b_page);
0565 brelse(desc_bh);
0566 }
0567
0568
0569 return -ENOSPC;
0570
0571 out_desc:
0572 kunmap(desc_bh->b_page);
0573 brelse(desc_bh);
0574 return ret;
0575 }
0576
0577
0578
0579
0580
0581
0582 void nilfs_palloc_commit_alloc_entry(struct inode *inode,
0583 struct nilfs_palloc_req *req)
0584 {
0585 mark_buffer_dirty(req->pr_bitmap_bh);
0586 mark_buffer_dirty(req->pr_desc_bh);
0587 nilfs_mdt_mark_dirty(inode);
0588
0589 brelse(req->pr_bitmap_bh);
0590 brelse(req->pr_desc_bh);
0591 }
0592
0593
0594
0595
0596
0597
0598 void nilfs_palloc_commit_free_entry(struct inode *inode,
0599 struct nilfs_palloc_req *req)
0600 {
0601 struct nilfs_palloc_group_desc *desc;
0602 unsigned long group, group_offset;
0603 unsigned char *bitmap;
0604 void *desc_kaddr, *bitmap_kaddr;
0605 spinlock_t *lock;
0606
0607 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
0608 desc_kaddr = kmap(req->pr_desc_bh->b_page);
0609 desc = nilfs_palloc_block_get_group_desc(inode, group,
0610 req->pr_desc_bh, desc_kaddr);
0611 bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
0612 bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
0613 lock = nilfs_mdt_bgl_lock(inode, group);
0614
0615 if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap))
0616 nilfs_warn(inode->i_sb,
0617 "%s (ino=%lu): entry number %llu already freed",
0618 __func__, inode->i_ino,
0619 (unsigned long long)req->pr_entry_nr);
0620 else
0621 nilfs_palloc_group_desc_add_entries(desc, lock, 1);
0622
0623 kunmap(req->pr_bitmap_bh->b_page);
0624 kunmap(req->pr_desc_bh->b_page);
0625
0626 mark_buffer_dirty(req->pr_desc_bh);
0627 mark_buffer_dirty(req->pr_bitmap_bh);
0628 nilfs_mdt_mark_dirty(inode);
0629
0630 brelse(req->pr_bitmap_bh);
0631 brelse(req->pr_desc_bh);
0632 }
0633
0634
0635
0636
0637
0638
0639 void nilfs_palloc_abort_alloc_entry(struct inode *inode,
0640 struct nilfs_palloc_req *req)
0641 {
0642 struct nilfs_palloc_group_desc *desc;
0643 void *desc_kaddr, *bitmap_kaddr;
0644 unsigned char *bitmap;
0645 unsigned long group, group_offset;
0646 spinlock_t *lock;
0647
0648 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
0649 desc_kaddr = kmap(req->pr_desc_bh->b_page);
0650 desc = nilfs_palloc_block_get_group_desc(inode, group,
0651 req->pr_desc_bh, desc_kaddr);
0652 bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
0653 bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
0654 lock = nilfs_mdt_bgl_lock(inode, group);
0655
0656 if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap))
0657 nilfs_warn(inode->i_sb,
0658 "%s (ino=%lu): entry number %llu already freed",
0659 __func__, inode->i_ino,
0660 (unsigned long long)req->pr_entry_nr);
0661 else
0662 nilfs_palloc_group_desc_add_entries(desc, lock, 1);
0663
0664 kunmap(req->pr_bitmap_bh->b_page);
0665 kunmap(req->pr_desc_bh->b_page);
0666
0667 brelse(req->pr_bitmap_bh);
0668 brelse(req->pr_desc_bh);
0669
0670 req->pr_entry_nr = 0;
0671 req->pr_bitmap_bh = NULL;
0672 req->pr_desc_bh = NULL;
0673 }
0674
0675
0676
0677
0678
0679
0680 int nilfs_palloc_prepare_free_entry(struct inode *inode,
0681 struct nilfs_palloc_req *req)
0682 {
0683 struct buffer_head *desc_bh, *bitmap_bh;
0684 unsigned long group, group_offset;
0685 int ret;
0686
0687 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
0688 ret = nilfs_palloc_get_desc_block(inode, group, 1, &desc_bh);
0689 if (ret < 0)
0690 return ret;
0691 ret = nilfs_palloc_get_bitmap_block(inode, group, 1, &bitmap_bh);
0692 if (ret < 0) {
0693 brelse(desc_bh);
0694 return ret;
0695 }
0696
0697 req->pr_desc_bh = desc_bh;
0698 req->pr_bitmap_bh = bitmap_bh;
0699 return 0;
0700 }
0701
0702
0703
0704
0705
0706
0707 void nilfs_palloc_abort_free_entry(struct inode *inode,
0708 struct nilfs_palloc_req *req)
0709 {
0710 brelse(req->pr_bitmap_bh);
0711 brelse(req->pr_desc_bh);
0712
0713 req->pr_entry_nr = 0;
0714 req->pr_bitmap_bh = NULL;
0715 req->pr_desc_bh = NULL;
0716 }
0717
0718
0719
0720
0721
0722
0723
0724 int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
0725 {
0726 struct buffer_head *desc_bh, *bitmap_bh;
0727 struct nilfs_palloc_group_desc *desc;
0728 unsigned char *bitmap;
0729 void *desc_kaddr, *bitmap_kaddr;
0730 unsigned long group, group_offset;
0731 __u64 group_min_nr, last_nrs[8];
0732 const unsigned long epg = nilfs_palloc_entries_per_group(inode);
0733 const unsigned int epb = NILFS_MDT(inode)->mi_entries_per_block;
0734 unsigned int entry_start, end, pos;
0735 spinlock_t *lock;
0736 int i, j, k, ret;
0737 u32 nfree;
0738
0739 for (i = 0; i < nitems; i = j) {
0740 int change_group = false;
0741 int nempties = 0, n = 0;
0742
0743 group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset);
0744 ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh);
0745 if (ret < 0)
0746 return ret;
0747 ret = nilfs_palloc_get_bitmap_block(inode, group, 0,
0748 &bitmap_bh);
0749 if (ret < 0) {
0750 brelse(desc_bh);
0751 return ret;
0752 }
0753
0754
0755 group_min_nr = (__u64)group * epg;
0756
0757 bitmap_kaddr = kmap(bitmap_bh->b_page);
0758 bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
0759 lock = nilfs_mdt_bgl_lock(inode, group);
0760
0761 j = i;
0762 entry_start = rounddown(group_offset, epb);
0763 do {
0764 if (!nilfs_clear_bit_atomic(lock, group_offset,
0765 bitmap)) {
0766 nilfs_warn(inode->i_sb,
0767 "%s (ino=%lu): entry number %llu already freed",
0768 __func__, inode->i_ino,
0769 (unsigned long long)entry_nrs[j]);
0770 } else {
0771 n++;
0772 }
0773
0774 j++;
0775 if (j >= nitems || entry_nrs[j] < group_min_nr ||
0776 entry_nrs[j] >= group_min_nr + epg) {
0777 change_group = true;
0778 } else {
0779 group_offset = entry_nrs[j] - group_min_nr;
0780 if (group_offset >= entry_start &&
0781 group_offset < entry_start + epb) {
0782
0783 continue;
0784 }
0785 }
0786
0787
0788 end = entry_start + epb;
0789 pos = nilfs_find_next_bit(bitmap, end, entry_start);
0790 if (pos >= end) {
0791 last_nrs[nempties++] = entry_nrs[j - 1];
0792 if (nempties >= ARRAY_SIZE(last_nrs))
0793 break;
0794 }
0795
0796 if (change_group)
0797 break;
0798
0799
0800 entry_start = rounddown(group_offset, epb);
0801 } while (true);
0802
0803 kunmap(bitmap_bh->b_page);
0804 mark_buffer_dirty(bitmap_bh);
0805 brelse(bitmap_bh);
0806
0807 for (k = 0; k < nempties; k++) {
0808 ret = nilfs_palloc_delete_entry_block(inode,
0809 last_nrs[k]);
0810 if (ret && ret != -ENOENT)
0811 nilfs_warn(inode->i_sb,
0812 "error %d deleting block that object (entry=%llu, ino=%lu) belongs to",
0813 ret, (unsigned long long)last_nrs[k],
0814 inode->i_ino);
0815 }
0816
0817 desc_kaddr = kmap_atomic(desc_bh->b_page);
0818 desc = nilfs_palloc_block_get_group_desc(
0819 inode, group, desc_bh, desc_kaddr);
0820 nfree = nilfs_palloc_group_desc_add_entries(desc, lock, n);
0821 kunmap_atomic(desc_kaddr);
0822 mark_buffer_dirty(desc_bh);
0823 nilfs_mdt_mark_dirty(inode);
0824 brelse(desc_bh);
0825
0826 if (nfree == nilfs_palloc_entries_per_group(inode)) {
0827 ret = nilfs_palloc_delete_bitmap_block(inode, group);
0828 if (ret && ret != -ENOENT)
0829 nilfs_warn(inode->i_sb,
0830 "error %d deleting bitmap block of group=%lu, ino=%lu",
0831 ret, group, inode->i_ino);
0832 }
0833 }
0834 return 0;
0835 }
0836
0837 void nilfs_palloc_setup_cache(struct inode *inode,
0838 struct nilfs_palloc_cache *cache)
0839 {
0840 NILFS_MDT(inode)->mi_palloc_cache = cache;
0841 spin_lock_init(&cache->lock);
0842 }
0843
0844 void nilfs_palloc_clear_cache(struct inode *inode)
0845 {
0846 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
0847
0848 spin_lock(&cache->lock);
0849 brelse(cache->prev_desc.bh);
0850 brelse(cache->prev_bitmap.bh);
0851 brelse(cache->prev_entry.bh);
0852 cache->prev_desc.bh = NULL;
0853 cache->prev_bitmap.bh = NULL;
0854 cache->prev_entry.bh = NULL;
0855 spin_unlock(&cache->lock);
0856 }
0857
0858 void nilfs_palloc_destroy_cache(struct inode *inode)
0859 {
0860 nilfs_palloc_clear_cache(inode);
0861 NILFS_MDT(inode)->mi_palloc_cache = NULL;
0862 }