0001
0002
0003
0004
0005
0006
0007 #include <linux/fs.h>
0008 #include <linux/mm.h>
0009 #include <linux/module.h>
0010 #include <linux/bio.h>
0011 #include <linux/slab.h>
0012 #include <linux/init.h>
0013 #include <linux/buffer_head.h>
0014 #include <linux/mempool.h>
0015 #include <linux/seq_file.h>
0016 #include <linux/writeback.h>
0017 #include "jfs_incore.h"
0018 #include "jfs_superblock.h"
0019 #include "jfs_filsys.h"
0020 #include "jfs_metapage.h"
0021 #include "jfs_txnmgr.h"
0022 #include "jfs_debug.h"
0023
0024 #ifdef CONFIG_JFS_STATISTICS
0025 static struct {
0026 uint pagealloc;
0027 uint pagefree;
0028 uint lockwait;
0029 } mpStat;
0030 #endif
0031
0032 #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
0033 #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
0034
0035 static inline void unlock_metapage(struct metapage *mp)
0036 {
0037 clear_bit_unlock(META_locked, &mp->flag);
0038 wake_up(&mp->wait);
0039 }
0040
0041 static inline void __lock_metapage(struct metapage *mp)
0042 {
0043 DECLARE_WAITQUEUE(wait, current);
0044 INCREMENT(mpStat.lockwait);
0045 add_wait_queue_exclusive(&mp->wait, &wait);
0046 do {
0047 set_current_state(TASK_UNINTERRUPTIBLE);
0048 if (metapage_locked(mp)) {
0049 unlock_page(mp->page);
0050 io_schedule();
0051 lock_page(mp->page);
0052 }
0053 } while (trylock_metapage(mp));
0054 __set_current_state(TASK_RUNNING);
0055 remove_wait_queue(&mp->wait, &wait);
0056 }
0057
0058
0059
0060
0061 static inline void lock_metapage(struct metapage *mp)
0062 {
0063 if (trylock_metapage(mp))
0064 __lock_metapage(mp);
0065 }
0066
0067 #define METAPOOL_MIN_PAGES 32
0068 static struct kmem_cache *metapage_cache;
0069 static mempool_t *metapage_mempool;
0070
0071 #define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
0072
0073 #if MPS_PER_PAGE > 1
0074
0075 struct meta_anchor {
0076 int mp_count;
0077 atomic_t io_count;
0078 struct metapage *mp[MPS_PER_PAGE];
0079 };
0080 #define mp_anchor(page) ((struct meta_anchor *)page_private(page))
0081
0082 static inline struct metapage *page_to_mp(struct page *page, int offset)
0083 {
0084 if (!PagePrivate(page))
0085 return NULL;
0086 return mp_anchor(page)->mp[offset >> L2PSIZE];
0087 }
0088
0089 static inline int insert_metapage(struct page *page, struct metapage *mp)
0090 {
0091 struct meta_anchor *a;
0092 int index;
0093 int l2mp_blocks;
0094
0095 if (PagePrivate(page))
0096 a = mp_anchor(page);
0097 else {
0098 a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
0099 if (!a)
0100 return -ENOMEM;
0101 set_page_private(page, (unsigned long)a);
0102 SetPagePrivate(page);
0103 kmap(page);
0104 }
0105
0106 if (mp) {
0107 l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
0108 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
0109 a->mp_count++;
0110 a->mp[index] = mp;
0111 }
0112
0113 return 0;
0114 }
0115
0116 static inline void remove_metapage(struct page *page, struct metapage *mp)
0117 {
0118 struct meta_anchor *a = mp_anchor(page);
0119 int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
0120 int index;
0121
0122 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
0123
0124 BUG_ON(a->mp[index] != mp);
0125
0126 a->mp[index] = NULL;
0127 if (--a->mp_count == 0) {
0128 kfree(a);
0129 set_page_private(page, 0);
0130 ClearPagePrivate(page);
0131 kunmap(page);
0132 }
0133 }
0134
0135 static inline void inc_io(struct page *page)
0136 {
0137 atomic_inc(&mp_anchor(page)->io_count);
0138 }
0139
0140 static inline void dec_io(struct page *page, void (*handler) (struct page *))
0141 {
0142 if (atomic_dec_and_test(&mp_anchor(page)->io_count))
0143 handler(page);
0144 }
0145
0146 #else
0147 static inline struct metapage *page_to_mp(struct page *page, int offset)
0148 {
0149 return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
0150 }
0151
0152 static inline int insert_metapage(struct page *page, struct metapage *mp)
0153 {
0154 if (mp) {
0155 set_page_private(page, (unsigned long)mp);
0156 SetPagePrivate(page);
0157 kmap(page);
0158 }
0159 return 0;
0160 }
0161
0162 static inline void remove_metapage(struct page *page, struct metapage *mp)
0163 {
0164 set_page_private(page, 0);
0165 ClearPagePrivate(page);
0166 kunmap(page);
0167 }
0168
0169 #define inc_io(page) do {} while(0)
0170 #define dec_io(page, handler) handler(page)
0171
0172 #endif
0173
0174 static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
0175 {
0176 struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
0177
0178 if (mp) {
0179 mp->lid = 0;
0180 mp->lsn = 0;
0181 mp->data = NULL;
0182 mp->clsn = 0;
0183 mp->log = NULL;
0184 init_waitqueue_head(&mp->wait);
0185 }
0186 return mp;
0187 }
0188
0189 static inline void free_metapage(struct metapage *mp)
0190 {
0191 mempool_free(mp, metapage_mempool);
0192 }
0193
0194 int __init metapage_init(void)
0195 {
0196
0197
0198
0199 metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
0200 0, 0, NULL);
0201 if (metapage_cache == NULL)
0202 return -ENOMEM;
0203
0204 metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
0205 metapage_cache);
0206
0207 if (metapage_mempool == NULL) {
0208 kmem_cache_destroy(metapage_cache);
0209 return -ENOMEM;
0210 }
0211
0212 return 0;
0213 }
0214
0215 void metapage_exit(void)
0216 {
0217 mempool_destroy(metapage_mempool);
0218 kmem_cache_destroy(metapage_cache);
0219 }
0220
0221 static inline void drop_metapage(struct page *page, struct metapage *mp)
0222 {
0223 if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
0224 test_bit(META_io, &mp->flag))
0225 return;
0226 remove_metapage(page, mp);
0227 INCREMENT(mpStat.pagefree);
0228 free_metapage(mp);
0229 }
0230
0231
0232
0233
0234
0235 static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
0236 int *len)
0237 {
0238 int rc = 0;
0239 int xflag;
0240 s64 xaddr;
0241 sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
0242 inode->i_blkbits;
0243
0244 if (lblock >= file_blocks)
0245 return 0;
0246 if (lblock + *len > file_blocks)
0247 *len = file_blocks - lblock;
0248
0249 if (inode->i_ino) {
0250 rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
0251 if ((rc == 0) && *len)
0252 lblock = (sector_t)xaddr;
0253 else
0254 lblock = 0;
0255 }
0256
0257 return lblock;
0258 }
0259
0260 static void last_read_complete(struct page *page)
0261 {
0262 if (!PageError(page))
0263 SetPageUptodate(page);
0264 unlock_page(page);
0265 }
0266
0267 static void metapage_read_end_io(struct bio *bio)
0268 {
0269 struct page *page = bio->bi_private;
0270
0271 if (bio->bi_status) {
0272 printk(KERN_ERR "metapage_read_end_io: I/O error\n");
0273 SetPageError(page);
0274 }
0275
0276 dec_io(page, last_read_complete);
0277 bio_put(bio);
0278 }
0279
0280 static void remove_from_logsync(struct metapage *mp)
0281 {
0282 struct jfs_log *log = mp->log;
0283 unsigned long flags;
0284
0285
0286
0287
0288 if (!log)
0289 return;
0290
0291 LOGSYNC_LOCK(log, flags);
0292 if (mp->lsn) {
0293 mp->log = NULL;
0294 mp->lsn = 0;
0295 mp->clsn = 0;
0296 log->count--;
0297 list_del(&mp->synclist);
0298 }
0299 LOGSYNC_UNLOCK(log, flags);
0300 }
0301
0302 static void last_write_complete(struct page *page)
0303 {
0304 struct metapage *mp;
0305 unsigned int offset;
0306
0307 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
0308 mp = page_to_mp(page, offset);
0309 if (mp && test_bit(META_io, &mp->flag)) {
0310 if (mp->lsn)
0311 remove_from_logsync(mp);
0312 clear_bit(META_io, &mp->flag);
0313 }
0314
0315
0316
0317
0318 }
0319 end_page_writeback(page);
0320 }
0321
0322 static void metapage_write_end_io(struct bio *bio)
0323 {
0324 struct page *page = bio->bi_private;
0325
0326 BUG_ON(!PagePrivate(page));
0327
0328 if (bio->bi_status) {
0329 printk(KERN_ERR "metapage_write_end_io: I/O error\n");
0330 SetPageError(page);
0331 }
0332 dec_io(page, last_write_complete);
0333 bio_put(bio);
0334 }
0335
0336 static int metapage_writepage(struct page *page, struct writeback_control *wbc)
0337 {
0338 struct bio *bio = NULL;
0339 int block_offset;
0340 struct inode *inode = page->mapping->host;
0341 int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
0342 int len;
0343 int xlen;
0344 struct metapage *mp;
0345 int redirty = 0;
0346 sector_t lblock;
0347 int nr_underway = 0;
0348 sector_t pblock;
0349 sector_t next_block = 0;
0350 sector_t page_start;
0351 unsigned long bio_bytes = 0;
0352 unsigned long bio_offset = 0;
0353 int offset;
0354 int bad_blocks = 0;
0355
0356 page_start = (sector_t)page->index <<
0357 (PAGE_SHIFT - inode->i_blkbits);
0358 BUG_ON(!PageLocked(page));
0359 BUG_ON(PageWriteback(page));
0360 set_page_writeback(page);
0361
0362 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
0363 mp = page_to_mp(page, offset);
0364
0365 if (!mp || !test_bit(META_dirty, &mp->flag))
0366 continue;
0367
0368 if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
0369 redirty = 1;
0370
0371
0372
0373
0374 if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
0375 jfs_flush_journal(mp->log, 0);
0376 continue;
0377 }
0378
0379 clear_bit(META_dirty, &mp->flag);
0380 set_bit(META_io, &mp->flag);
0381 block_offset = offset >> inode->i_blkbits;
0382 lblock = page_start + block_offset;
0383 if (bio) {
0384 if (xlen && lblock == next_block) {
0385
0386 len = min(xlen, blocks_per_mp);
0387 xlen -= len;
0388 bio_bytes += len << inode->i_blkbits;
0389 continue;
0390 }
0391
0392 if (bio_add_page(bio, page, bio_bytes, bio_offset) <
0393 bio_bytes)
0394 goto add_failed;
0395
0396
0397
0398
0399 inc_io(page);
0400 if (!bio->bi_iter.bi_size)
0401 goto dump_bio;
0402 submit_bio(bio);
0403 nr_underway++;
0404 bio = NULL;
0405 } else
0406 inc_io(page);
0407 xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
0408 pblock = metapage_get_blocks(inode, lblock, &xlen);
0409 if (!pblock) {
0410 printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
0411
0412
0413
0414
0415 bad_blocks++;
0416 continue;
0417 }
0418 len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
0419
0420 bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS);
0421 bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
0422 bio->bi_end_io = metapage_write_end_io;
0423 bio->bi_private = page;
0424
0425
0426 bio_offset = offset;
0427 bio_bytes = len << inode->i_blkbits;
0428
0429 xlen -= len;
0430 next_block = lblock + len;
0431 }
0432 if (bio) {
0433 if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
0434 goto add_failed;
0435 if (!bio->bi_iter.bi_size)
0436 goto dump_bio;
0437
0438 submit_bio(bio);
0439 nr_underway++;
0440 }
0441 if (redirty)
0442 redirty_page_for_writepage(wbc, page);
0443
0444 unlock_page(page);
0445
0446 if (bad_blocks)
0447 goto err_out;
0448
0449 if (nr_underway == 0)
0450 end_page_writeback(page);
0451
0452 return 0;
0453 add_failed:
0454
0455 printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
0456 goto skip;
0457 dump_bio:
0458 print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
0459 4, bio, sizeof(*bio), 0);
0460 skip:
0461 bio_put(bio);
0462 unlock_page(page);
0463 dec_io(page, last_write_complete);
0464 err_out:
0465 while (bad_blocks--)
0466 dec_io(page, last_write_complete);
0467 return -EIO;
0468 }
0469
0470 static int metapage_read_folio(struct file *fp, struct folio *folio)
0471 {
0472 struct page *page = &folio->page;
0473 struct inode *inode = page->mapping->host;
0474 struct bio *bio = NULL;
0475 int block_offset;
0476 int blocks_per_page = i_blocks_per_page(inode, page);
0477 sector_t page_start;
0478 sector_t pblock;
0479 int xlen;
0480 unsigned int len;
0481 int offset;
0482
0483 BUG_ON(!PageLocked(page));
0484 page_start = (sector_t)page->index <<
0485 (PAGE_SHIFT - inode->i_blkbits);
0486
0487 block_offset = 0;
0488 while (block_offset < blocks_per_page) {
0489 xlen = blocks_per_page - block_offset;
0490 pblock = metapage_get_blocks(inode, page_start + block_offset,
0491 &xlen);
0492 if (pblock) {
0493 if (!PagePrivate(page))
0494 insert_metapage(page, NULL);
0495 inc_io(page);
0496 if (bio)
0497 submit_bio(bio);
0498
0499 bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_READ,
0500 GFP_NOFS);
0501 bio->bi_iter.bi_sector =
0502 pblock << (inode->i_blkbits - 9);
0503 bio->bi_end_io = metapage_read_end_io;
0504 bio->bi_private = page;
0505 len = xlen << inode->i_blkbits;
0506 offset = block_offset << inode->i_blkbits;
0507 if (bio_add_page(bio, page, len, offset) < len)
0508 goto add_failed;
0509 block_offset += xlen;
0510 } else
0511 block_offset++;
0512 }
0513 if (bio)
0514 submit_bio(bio);
0515 else
0516 unlock_page(page);
0517
0518 return 0;
0519
0520 add_failed:
0521 printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
0522 bio_put(bio);
0523 dec_io(page, last_read_complete);
0524 return -EIO;
0525 }
0526
0527 static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
0528 {
0529 struct metapage *mp;
0530 bool ret = true;
0531 int offset;
0532
0533 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
0534 mp = page_to_mp(&folio->page, offset);
0535
0536 if (!mp)
0537 continue;
0538
0539 jfs_info("metapage_release_folio: mp = 0x%p", mp);
0540 if (mp->count || mp->nohomeok ||
0541 test_bit(META_dirty, &mp->flag)) {
0542 jfs_info("count = %ld, nohomeok = %d", mp->count,
0543 mp->nohomeok);
0544 ret = false;
0545 continue;
0546 }
0547 if (mp->lsn)
0548 remove_from_logsync(mp);
0549 remove_metapage(&folio->page, mp);
0550 INCREMENT(mpStat.pagefree);
0551 free_metapage(mp);
0552 }
0553 return ret;
0554 }
0555
0556 static void metapage_invalidate_folio(struct folio *folio, size_t offset,
0557 size_t length)
0558 {
0559 BUG_ON(offset || length < folio_size(folio));
0560
0561 BUG_ON(folio_test_writeback(folio));
0562
0563 metapage_release_folio(folio, 0);
0564 }
0565
0566 const struct address_space_operations jfs_metapage_aops = {
0567 .read_folio = metapage_read_folio,
0568 .writepage = metapage_writepage,
0569 .release_folio = metapage_release_folio,
0570 .invalidate_folio = metapage_invalidate_folio,
0571 .dirty_folio = filemap_dirty_folio,
0572 };
0573
0574 struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
0575 unsigned int size, int absolute,
0576 unsigned long new)
0577 {
0578 int l2BlocksPerPage;
0579 int l2bsize;
0580 struct address_space *mapping;
0581 struct metapage *mp = NULL;
0582 struct page *page;
0583 unsigned long page_index;
0584 unsigned long page_offset;
0585
0586 jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
0587 inode->i_ino, lblock, absolute);
0588
0589 l2bsize = inode->i_blkbits;
0590 l2BlocksPerPage = PAGE_SHIFT - l2bsize;
0591 page_index = lblock >> l2BlocksPerPage;
0592 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
0593 if ((page_offset + size) > PAGE_SIZE) {
0594 jfs_err("MetaData crosses page boundary!!");
0595 jfs_err("lblock = %lx, size = %d", lblock, size);
0596 dump_stack();
0597 return NULL;
0598 }
0599 if (absolute)
0600 mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
0601 else {
0602
0603
0604
0605
0606
0607 if ((lblock << inode->i_blkbits) >= inode->i_size)
0608 return NULL;
0609 mapping = inode->i_mapping;
0610 }
0611
0612 if (new && (PSIZE == PAGE_SIZE)) {
0613 page = grab_cache_page(mapping, page_index);
0614 if (!page) {
0615 jfs_err("grab_cache_page failed!");
0616 return NULL;
0617 }
0618 SetPageUptodate(page);
0619 } else {
0620 page = read_mapping_page(mapping, page_index, NULL);
0621 if (IS_ERR(page)) {
0622 jfs_err("read_mapping_page failed!");
0623 return NULL;
0624 }
0625 lock_page(page);
0626 }
0627
0628 mp = page_to_mp(page, page_offset);
0629 if (mp) {
0630 if (mp->logical_size != size) {
0631 jfs_error(inode->i_sb,
0632 "get_mp->logical_size != size\n");
0633 jfs_err("logical_size = %d, size = %d",
0634 mp->logical_size, size);
0635 dump_stack();
0636 goto unlock;
0637 }
0638 mp->count++;
0639 lock_metapage(mp);
0640 if (test_bit(META_discard, &mp->flag)) {
0641 if (!new) {
0642 jfs_error(inode->i_sb,
0643 "using a discarded metapage\n");
0644 discard_metapage(mp);
0645 goto unlock;
0646 }
0647 clear_bit(META_discard, &mp->flag);
0648 }
0649 } else {
0650 INCREMENT(mpStat.pagealloc);
0651 mp = alloc_metapage(GFP_NOFS);
0652 if (!mp)
0653 goto unlock;
0654 mp->page = page;
0655 mp->sb = inode->i_sb;
0656 mp->flag = 0;
0657 mp->xflag = COMMIT_PAGE;
0658 mp->count = 1;
0659 mp->nohomeok = 0;
0660 mp->logical_size = size;
0661 mp->data = page_address(page) + page_offset;
0662 mp->index = lblock;
0663 if (unlikely(insert_metapage(page, mp))) {
0664 free_metapage(mp);
0665 goto unlock;
0666 }
0667 lock_metapage(mp);
0668 }
0669
0670 if (new) {
0671 jfs_info("zeroing mp = 0x%p", mp);
0672 memset(mp->data, 0, PSIZE);
0673 }
0674
0675 unlock_page(page);
0676 jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
0677 return mp;
0678
0679 unlock:
0680 unlock_page(page);
0681 return NULL;
0682 }
0683
0684 void grab_metapage(struct metapage * mp)
0685 {
0686 jfs_info("grab_metapage: mp = 0x%p", mp);
0687 get_page(mp->page);
0688 lock_page(mp->page);
0689 mp->count++;
0690 lock_metapage(mp);
0691 unlock_page(mp->page);
0692 }
0693
0694 void force_metapage(struct metapage *mp)
0695 {
0696 struct page *page = mp->page;
0697 jfs_info("force_metapage: mp = 0x%p", mp);
0698 set_bit(META_forcewrite, &mp->flag);
0699 clear_bit(META_sync, &mp->flag);
0700 get_page(page);
0701 lock_page(page);
0702 set_page_dirty(page);
0703 if (write_one_page(page))
0704 jfs_error(mp->sb, "write_one_page() failed\n");
0705 clear_bit(META_forcewrite, &mp->flag);
0706 put_page(page);
0707 }
0708
0709 void hold_metapage(struct metapage *mp)
0710 {
0711 lock_page(mp->page);
0712 }
0713
0714 void put_metapage(struct metapage *mp)
0715 {
0716 if (mp->count || mp->nohomeok) {
0717
0718 unlock_page(mp->page);
0719 return;
0720 }
0721 get_page(mp->page);
0722 mp->count++;
0723 lock_metapage(mp);
0724 unlock_page(mp->page);
0725 release_metapage(mp);
0726 }
0727
0728 void release_metapage(struct metapage * mp)
0729 {
0730 struct page *page = mp->page;
0731 jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
0732
0733 BUG_ON(!page);
0734
0735 lock_page(page);
0736 unlock_metapage(mp);
0737
0738 assert(mp->count);
0739 if (--mp->count || mp->nohomeok) {
0740 unlock_page(page);
0741 put_page(page);
0742 return;
0743 }
0744
0745 if (test_bit(META_dirty, &mp->flag)) {
0746 set_page_dirty(page);
0747 if (test_bit(META_sync, &mp->flag)) {
0748 clear_bit(META_sync, &mp->flag);
0749 if (write_one_page(page))
0750 jfs_error(mp->sb, "write_one_page() failed\n");
0751 lock_page(page);
0752 }
0753 } else if (mp->lsn)
0754 remove_from_logsync(mp);
0755
0756
0757 drop_metapage(page, mp);
0758
0759 unlock_page(page);
0760 put_page(page);
0761 }
0762
0763 void __invalidate_metapages(struct inode *ip, s64 addr, int len)
0764 {
0765 sector_t lblock;
0766 int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
0767 int BlocksPerPage = 1 << l2BlocksPerPage;
0768
0769 struct address_space *mapping =
0770 JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
0771 struct metapage *mp;
0772 struct page *page;
0773 unsigned int offset;
0774
0775
0776
0777
0778
0779 for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
0780 lblock += BlocksPerPage) {
0781 page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
0782 if (!page)
0783 continue;
0784 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
0785 mp = page_to_mp(page, offset);
0786 if (!mp)
0787 continue;
0788 if (mp->index < addr)
0789 continue;
0790 if (mp->index >= addr + len)
0791 break;
0792
0793 clear_bit(META_dirty, &mp->flag);
0794 set_bit(META_discard, &mp->flag);
0795 if (mp->lsn)
0796 remove_from_logsync(mp);
0797 }
0798 unlock_page(page);
0799 put_page(page);
0800 }
0801 }
0802
0803 #ifdef CONFIG_JFS_STATISTICS
0804 int jfs_mpstat_proc_show(struct seq_file *m, void *v)
0805 {
0806 seq_printf(m,
0807 "JFS Metapage statistics\n"
0808 "=======================\n"
0809 "page allocations = %d\n"
0810 "page frees = %d\n"
0811 "lock waits = %d\n",
0812 mpStat.pagealloc,
0813 mpStat.pagefree,
0814 mpStat.lockwait);
0815 return 0;
0816 }
0817 #endif