0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #include <linux/fs.h>
0033 #include <linux/vmalloc.h>
0034 #include <linux/completion.h>
0035 #include <linux/freezer.h>
0036 #include <linux/module.h>
0037 #include <linux/moduleparam.h>
0038 #include <linux/kthread.h>
0039 #include <linux/seq_file.h>
0040 #include "jfs_incore.h"
0041 #include "jfs_inode.h"
0042 #include "jfs_filsys.h"
0043 #include "jfs_metapage.h"
0044 #include "jfs_dinode.h"
0045 #include "jfs_imap.h"
0046 #include "jfs_dmap.h"
0047 #include "jfs_superblock.h"
0048 #include "jfs_debug.h"
0049
0050
0051
0052
0053 static struct {
0054 int freetid;
0055 int freelock;
0056 wait_queue_head_t freewait;
0057 wait_queue_head_t freelockwait;
0058 wait_queue_head_t lowlockwait;
0059 int tlocksInUse;
0060 spinlock_t LazyLock;
0061
0062 struct list_head unlock_queue;
0063 struct list_head anon_list;
0064 struct list_head anon_list2;
0065
0066 } TxAnchor;
0067
0068 int jfs_tlocks_low;
0069
0070 #ifdef CONFIG_JFS_STATISTICS
0071 static struct {
0072 uint txBegin;
0073 uint txBegin_barrier;
0074 uint txBegin_lockslow;
0075 uint txBegin_freetid;
0076 uint txBeginAnon;
0077 uint txBeginAnon_barrier;
0078 uint txBeginAnon_lockslow;
0079 uint txLockAlloc;
0080 uint txLockAlloc_freelock;
0081 } TxStat;
0082 #endif
0083
0084 static int nTxBlock = -1;
0085 module_param(nTxBlock, int, 0);
0086 MODULE_PARM_DESC(nTxBlock,
0087 "Number of transaction blocks (max:65536)");
0088
0089 static int nTxLock = -1;
0090 module_param(nTxLock, int, 0);
0091 MODULE_PARM_DESC(nTxLock,
0092 "Number of transaction locks (max:65536)");
0093
0094 struct tblock *TxBlock;
0095 static int TxLockLWM;
0096 static int TxLockHWM;
0097 static int TxLockVHWM;
0098 struct tlock *TxLock;
0099
0100
0101
0102
0103 static DEFINE_SPINLOCK(jfsTxnLock);
0104
0105 #define TXN_LOCK() spin_lock(&jfsTxnLock)
0106 #define TXN_UNLOCK() spin_unlock(&jfsTxnLock)
0107
0108 #define LAZY_LOCK_INIT() spin_lock_init(&TxAnchor.LazyLock)
0109 #define LAZY_LOCK(flags) spin_lock_irqsave(&TxAnchor.LazyLock, flags)
0110 #define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags)
0111
0112 static DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait);
0113 static int jfs_commit_thread_waking;
0114
0115
0116
0117
0118 static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
0119 {
0120 DECLARE_WAITQUEUE(wait, current);
0121
0122 add_wait_queue(event, &wait);
0123 set_current_state(TASK_UNINTERRUPTIBLE);
0124 TXN_UNLOCK();
0125 io_schedule();
0126 remove_wait_queue(event, &wait);
0127 }
0128
0129 #define TXN_SLEEP(event)\
0130 {\
0131 TXN_SLEEP_DROP_LOCK(event);\
0132 TXN_LOCK();\
0133 }
0134
0135 #define TXN_WAKEUP(event) wake_up_all(event)
0136
0137
0138
0139
0140 static struct {
0141 tid_t maxtid;
0142 lid_t maxlid;
0143 int ntid;
0144 int nlid;
0145 int waitlock;
0146 } stattx;
0147
0148
0149
0150
0151 static void diLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
0152 struct tlock *tlck, struct commit *cd);
0153 static void dataLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
0154 struct tlock *tlck);
0155 static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
0156 struct tlock * tlck);
0157 static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
0158 struct tlock * tlck);
0159 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
0160 struct tblock * tblk);
0161 static void txForce(struct tblock * tblk);
0162 static void txLog(struct jfs_log *log, struct tblock *tblk,
0163 struct commit *cd);
0164 static void txUpdateMap(struct tblock * tblk);
0165 static void txRelease(struct tblock * tblk);
0166 static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
0167 struct tlock * tlck);
0168 static void LogSyncRelease(struct metapage * mp);
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180 static lid_t txLockAlloc(void)
0181 {
0182 lid_t lid;
0183
0184 INCREMENT(TxStat.txLockAlloc);
0185 if (!TxAnchor.freelock) {
0186 INCREMENT(TxStat.txLockAlloc_freelock);
0187 }
0188
0189 while (!(lid = TxAnchor.freelock))
0190 TXN_SLEEP(&TxAnchor.freelockwait);
0191 TxAnchor.freelock = TxLock[lid].next;
0192 HIGHWATERMARK(stattx.maxlid, lid);
0193 if ((++TxAnchor.tlocksInUse > TxLockHWM) && (jfs_tlocks_low == 0)) {
0194 jfs_info("txLockAlloc tlocks low");
0195 jfs_tlocks_low = 1;
0196 wake_up_process(jfsSyncThread);
0197 }
0198
0199 return lid;
0200 }
0201
0202 static void txLockFree(lid_t lid)
0203 {
0204 TxLock[lid].tid = 0;
0205 TxLock[lid].next = TxAnchor.freelock;
0206 TxAnchor.freelock = lid;
0207 TxAnchor.tlocksInUse--;
0208 if (jfs_tlocks_low && (TxAnchor.tlocksInUse < TxLockLWM)) {
0209 jfs_info("txLockFree jfs_tlocks_low no more");
0210 jfs_tlocks_low = 0;
0211 TXN_WAKEUP(&TxAnchor.lowlockwait);
0212 }
0213 TXN_WAKEUP(&TxAnchor.freelockwait);
0214 }
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225 int txInit(void)
0226 {
0227 int k, size;
0228 struct sysinfo si;
0229
0230
0231
0232 if (nTxLock == -1) {
0233 if (nTxBlock == -1) {
0234
0235 si_meminfo(&si);
0236 if (si.totalram > (256 * 1024))
0237 nTxLock = 64 * 1024;
0238 else
0239 nTxLock = si.totalram >> 2;
0240 } else if (nTxBlock > (8 * 1024))
0241 nTxLock = 64 * 1024;
0242 else
0243 nTxLock = nTxBlock << 3;
0244 }
0245 if (nTxBlock == -1)
0246 nTxBlock = nTxLock >> 3;
0247
0248
0249 if (nTxBlock < 16)
0250 nTxBlock = 16;
0251 if (nTxBlock > 65536)
0252 nTxBlock = 65536;
0253 if (nTxLock < 256)
0254 nTxLock = 256;
0255 if (nTxLock > 65536)
0256 nTxLock = 65536;
0257
0258 printk(KERN_INFO "JFS: nTxBlock = %d, nTxLock = %d\n",
0259 nTxBlock, nTxLock);
0260
0261
0262
0263
0264
0265
0266 TxLockLWM = (nTxLock * 4) / 10;
0267 TxLockHWM = (nTxLock * 7) / 10;
0268 TxLockVHWM = (nTxLock * 8) / 10;
0269
0270 size = sizeof(struct tblock) * nTxBlock;
0271 TxBlock = vmalloc(size);
0272 if (TxBlock == NULL)
0273 return -ENOMEM;
0274
0275 for (k = 1; k < nTxBlock - 1; k++) {
0276 TxBlock[k].next = k + 1;
0277 init_waitqueue_head(&TxBlock[k].gcwait);
0278 init_waitqueue_head(&TxBlock[k].waitor);
0279 }
0280 TxBlock[k].next = 0;
0281 init_waitqueue_head(&TxBlock[k].gcwait);
0282 init_waitqueue_head(&TxBlock[k].waitor);
0283
0284 TxAnchor.freetid = 1;
0285 init_waitqueue_head(&TxAnchor.freewait);
0286
0287 stattx.maxtid = 1;
0288
0289
0290
0291
0292
0293
0294
0295 size = sizeof(struct tlock) * nTxLock;
0296 TxLock = vmalloc(size);
0297 if (TxLock == NULL) {
0298 vfree(TxBlock);
0299 return -ENOMEM;
0300 }
0301
0302
0303 for (k = 1; k < nTxLock - 1; k++)
0304 TxLock[k].next = k + 1;
0305 TxLock[k].next = 0;
0306 init_waitqueue_head(&TxAnchor.freelockwait);
0307 init_waitqueue_head(&TxAnchor.lowlockwait);
0308
0309 TxAnchor.freelock = 1;
0310 TxAnchor.tlocksInUse = 0;
0311 INIT_LIST_HEAD(&TxAnchor.anon_list);
0312 INIT_LIST_HEAD(&TxAnchor.anon_list2);
0313
0314 LAZY_LOCK_INIT();
0315 INIT_LIST_HEAD(&TxAnchor.unlock_queue);
0316
0317 stattx.maxlid = 1;
0318
0319 return 0;
0320 }
0321
0322
0323
0324
0325
0326
0327 void txExit(void)
0328 {
0329 vfree(TxLock);
0330 TxLock = NULL;
0331 vfree(TxBlock);
0332 TxBlock = NULL;
0333 }
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348 tid_t txBegin(struct super_block *sb, int flag)
0349 {
0350 tid_t t;
0351 struct tblock *tblk;
0352 struct jfs_log *log;
0353
0354 jfs_info("txBegin: flag = 0x%x", flag);
0355 log = JFS_SBI(sb)->log;
0356
0357 TXN_LOCK();
0358
0359 INCREMENT(TxStat.txBegin);
0360
0361 retry:
0362 if (!(flag & COMMIT_FORCE)) {
0363
0364
0365
0366 if (test_bit(log_SYNCBARRIER, &log->flag) ||
0367 test_bit(log_QUIESCE, &log->flag)) {
0368 INCREMENT(TxStat.txBegin_barrier);
0369 TXN_SLEEP(&log->syncwait);
0370 goto retry;
0371 }
0372 }
0373 if (flag == 0) {
0374
0375
0376
0377
0378
0379 if (TxAnchor.tlocksInUse > TxLockVHWM) {
0380 INCREMENT(TxStat.txBegin_lockslow);
0381 TXN_SLEEP(&TxAnchor.lowlockwait);
0382 goto retry;
0383 }
0384 }
0385
0386
0387
0388
0389 if ((t = TxAnchor.freetid) == 0) {
0390 jfs_info("txBegin: waiting for free tid");
0391 INCREMENT(TxStat.txBegin_freetid);
0392 TXN_SLEEP(&TxAnchor.freewait);
0393 goto retry;
0394 }
0395
0396 tblk = tid_to_tblock(t);
0397
0398 if ((tblk->next == 0) && !(flag & COMMIT_FORCE)) {
0399
0400 jfs_info("txBegin: waiting for free tid");
0401 INCREMENT(TxStat.txBegin_freetid);
0402 TXN_SLEEP(&TxAnchor.freewait);
0403 goto retry;
0404 }
0405
0406 TxAnchor.freetid = tblk->next;
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418 tblk->next = tblk->last = tblk->xflag = tblk->flag = tblk->lsn = 0;
0419
0420 tblk->sb = sb;
0421 ++log->logtid;
0422 tblk->logtid = log->logtid;
0423
0424 ++log->active;
0425
0426 HIGHWATERMARK(stattx.maxtid, t);
0427 INCREMENT(stattx.ntid);
0428
0429 TXN_UNLOCK();
0430
0431 jfs_info("txBegin: returning tid = %d", t);
0432
0433 return t;
0434 }
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447 void txBeginAnon(struct super_block *sb)
0448 {
0449 struct jfs_log *log;
0450
0451 log = JFS_SBI(sb)->log;
0452
0453 TXN_LOCK();
0454 INCREMENT(TxStat.txBeginAnon);
0455
0456 retry:
0457
0458
0459
0460 if (test_bit(log_SYNCBARRIER, &log->flag) ||
0461 test_bit(log_QUIESCE, &log->flag)) {
0462 INCREMENT(TxStat.txBeginAnon_barrier);
0463 TXN_SLEEP(&log->syncwait);
0464 goto retry;
0465 }
0466
0467
0468
0469
0470 if (TxAnchor.tlocksInUse > TxLockVHWM) {
0471 INCREMENT(TxStat.txBeginAnon_lockslow);
0472 TXN_SLEEP(&TxAnchor.lowlockwait);
0473 goto retry;
0474 }
0475 TXN_UNLOCK();
0476 }
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487 void txEnd(tid_t tid)
0488 {
0489 struct tblock *tblk = tid_to_tblock(tid);
0490 struct jfs_log *log;
0491
0492 jfs_info("txEnd: tid = %d", tid);
0493 TXN_LOCK();
0494
0495
0496
0497
0498
0499 TXN_WAKEUP(&tblk->waitor);
0500
0501 log = JFS_SBI(tblk->sb)->log;
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511 if (tblk->flag & tblkGC_LAZY) {
0512 jfs_info("txEnd called w/lazy tid: %d, tblk = 0x%p", tid, tblk);
0513 TXN_UNLOCK();
0514
0515 spin_lock_irq(&log->gclock);
0516 tblk->flag |= tblkGC_UNLOCKED;
0517 spin_unlock_irq(&log->gclock);
0518 return;
0519 }
0520
0521 jfs_info("txEnd: tid: %d, tblk = 0x%p", tid, tblk);
0522
0523 assert(tblk->next == 0);
0524
0525
0526
0527
0528 tblk->next = TxAnchor.freetid;
0529 TxAnchor.freetid = tid;
0530
0531
0532
0533
0534 if (--log->active == 0) {
0535 clear_bit(log_FLUSH, &log->flag);
0536
0537
0538
0539
0540 if (test_bit(log_SYNCBARRIER, &log->flag)) {
0541 TXN_UNLOCK();
0542
0543
0544 jfs_syncpt(log, 1);
0545
0546 jfs_info("log barrier off: 0x%x", log->lsn);
0547
0548
0549 clear_bit(log_SYNCBARRIER, &log->flag);
0550
0551
0552 TXN_WAKEUP(&log->syncwait);
0553
0554 goto wakeup;
0555 }
0556 }
0557
0558 TXN_UNLOCK();
0559 wakeup:
0560
0561
0562
0563 TXN_WAKEUP(&TxAnchor.freewait);
0564 }
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577 struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
0578 int type)
0579 {
0580 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
0581 int dir_xtree = 0;
0582 lid_t lid;
0583 tid_t xtid;
0584 struct tlock *tlck;
0585 struct xtlock *xtlck;
0586 struct linelock *linelock;
0587 xtpage_t *p;
0588 struct tblock *tblk;
0589
0590 TXN_LOCK();
0591
0592 if (S_ISDIR(ip->i_mode) && (type & tlckXTREE) &&
0593 !(mp->xflag & COMMIT_PAGE)) {
0594
0595
0596
0597
0598 dir_xtree = 1;
0599 lid = jfs_ip->xtlid;
0600 } else
0601 lid = mp->lid;
0602
0603
0604 if (lid == 0)
0605 goto allocateLock;
0606
0607 jfs_info("txLock: tid:%d ip:0x%p mp:0x%p lid:%d", tid, ip, mp, lid);
0608
0609
0610 tlck = lid_to_tlock(lid);
0611 if ((xtid = tlck->tid) == tid) {
0612 TXN_UNLOCK();
0613 goto grantLock;
0614 }
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627 if (xtid == 0) {
0628 tlck->tid = tid;
0629 TXN_UNLOCK();
0630 tblk = tid_to_tblock(tid);
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641 if (jfs_ip->atlhead == lid) {
0642 if (jfs_ip->atltail == lid) {
0643
0644
0645
0646 TXN_LOCK();
0647 list_del_init(&jfs_ip->anon_inode_list);
0648 TXN_UNLOCK();
0649 }
0650 jfs_ip->atlhead = tlck->next;
0651 } else {
0652 lid_t last;
0653 for (last = jfs_ip->atlhead;
0654 lid_to_tlock(last)->next != lid;
0655 last = lid_to_tlock(last)->next) {
0656 assert(last);
0657 }
0658 lid_to_tlock(last)->next = tlck->next;
0659 if (jfs_ip->atltail == lid)
0660 jfs_ip->atltail = last;
0661 }
0662
0663
0664
0665 if (tblk->next)
0666 lid_to_tlock(tblk->last)->next = lid;
0667 else
0668 tblk->next = lid;
0669 tlck->next = 0;
0670 tblk->last = lid;
0671
0672 goto grantLock;
0673 }
0674
0675 goto waitLock;
0676
0677
0678
0679
0680 allocateLock:
0681 lid = txLockAlloc();
0682 tlck = lid_to_tlock(lid);
0683
0684
0685
0686
0687 tlck->tid = tid;
0688
0689 TXN_UNLOCK();
0690
0691
0692 if (mp->xflag & COMMIT_PAGE) {
0693
0694 tlck->flag = tlckPAGELOCK;
0695
0696
0697 metapage_nohomeok(mp);
0698
0699 jfs_info("locking mp = 0x%p, nohomeok = %d tid = %d tlck = 0x%p",
0700 mp, mp->nohomeok, tid, tlck);
0701
0702
0703
0704
0705
0706
0707 if ((tid == 0) && mp->lsn)
0708 set_cflag(COMMIT_Synclist, ip);
0709 }
0710
0711 else
0712 tlck->flag = tlckINODELOCK;
0713
0714 if (S_ISDIR(ip->i_mode))
0715 tlck->flag |= tlckDIRECTORY;
0716
0717 tlck->type = 0;
0718
0719
0720 tlck->ip = ip;
0721 tlck->mp = mp;
0722 if (dir_xtree)
0723 jfs_ip->xtlid = lid;
0724 else
0725 mp->lid = lid;
0726
0727
0728
0729
0730
0731 if (tid) {
0732 tblk = tid_to_tblock(tid);
0733 if (tblk->next)
0734 lid_to_tlock(tblk->last)->next = lid;
0735 else
0736 tblk->next = lid;
0737 tlck->next = 0;
0738 tblk->last = lid;
0739 }
0740
0741
0742
0743 else {
0744 tlck->next = jfs_ip->atlhead;
0745 jfs_ip->atlhead = lid;
0746 if (tlck->next == 0) {
0747
0748 jfs_ip->atltail = lid;
0749 TXN_LOCK();
0750 list_add_tail(&jfs_ip->anon_inode_list,
0751 &TxAnchor.anon_list);
0752 TXN_UNLOCK();
0753 }
0754 }
0755
0756
0757 linelock = (struct linelock *) & tlck->lock;
0758 linelock->next = 0;
0759 linelock->flag = tlckLINELOCK;
0760 linelock->maxcnt = TLOCKSHORT;
0761 linelock->index = 0;
0762
0763 switch (type & tlckTYPE) {
0764 case tlckDTREE:
0765 linelock->l2linesize = L2DTSLOTSIZE;
0766 break;
0767
0768 case tlckXTREE:
0769 linelock->l2linesize = L2XTSLOTSIZE;
0770
0771 xtlck = (struct xtlock *) linelock;
0772 xtlck->header.offset = 0;
0773 xtlck->header.length = 2;
0774
0775 if (type & tlckNEW) {
0776 xtlck->lwm.offset = XTENTRYSTART;
0777 } else {
0778 if (mp->xflag & COMMIT_PAGE)
0779 p = (xtpage_t *) mp->data;
0780 else
0781 p = &jfs_ip->i_xtroot;
0782 xtlck->lwm.offset =
0783 le16_to_cpu(p->header.nextindex);
0784 }
0785 xtlck->lwm.length = 0;
0786 xtlck->twm.offset = 0;
0787 xtlck->hwm.offset = 0;
0788
0789 xtlck->index = 2;
0790 break;
0791
0792 case tlckINODE:
0793 linelock->l2linesize = L2INODESLOTSIZE;
0794 break;
0795
0796 case tlckDATA:
0797 linelock->l2linesize = L2DATASLOTSIZE;
0798 break;
0799
0800 default:
0801 jfs_err("UFO tlock:0x%p", tlck);
0802 }
0803
0804
0805
0806
0807 grantLock:
0808 tlck->type |= type;
0809
0810 return tlck;
0811
0812
0813
0814
0815 waitLock:
0816
0817
0818 if (jfs_ip->fileset != AGGREGATE_I) {
0819 printk(KERN_ERR "txLock: trying to lock locked page!");
0820 print_hex_dump(KERN_ERR, "ip: ", DUMP_PREFIX_ADDRESS, 16, 4,
0821 ip, sizeof(*ip), 0);
0822 print_hex_dump(KERN_ERR, "mp: ", DUMP_PREFIX_ADDRESS, 16, 4,
0823 mp, sizeof(*mp), 0);
0824 print_hex_dump(KERN_ERR, "Locker's tblock: ",
0825 DUMP_PREFIX_ADDRESS, 16, 4, tid_to_tblock(tid),
0826 sizeof(struct tblock), 0);
0827 print_hex_dump(KERN_ERR, "Tlock: ", DUMP_PREFIX_ADDRESS, 16, 4,
0828 tlck, sizeof(*tlck), 0);
0829 BUG();
0830 }
0831 INCREMENT(stattx.waitlock);
0832 TXN_UNLOCK();
0833 release_metapage(mp);
0834 TXN_LOCK();
0835 xtid = tlck->tid;
0836
0837 jfs_info("txLock: in waitLock, tid = %d, xtid = %d, lid = %d",
0838 tid, xtid, lid);
0839
0840
0841 if (xtid && (tlck->mp == mp) && (mp->lid == lid))
0842 TXN_SLEEP_DROP_LOCK(&tid_to_tblock(xtid)->waitor);
0843 else
0844 TXN_UNLOCK();
0845 jfs_info("txLock: awakened tid = %d, lid = %d", tid, lid);
0846
0847 return NULL;
0848 }
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863 static void txRelease(struct tblock * tblk)
0864 {
0865 struct metapage *mp;
0866 lid_t lid;
0867 struct tlock *tlck;
0868
0869 TXN_LOCK();
0870
0871 for (lid = tblk->next; lid; lid = tlck->next) {
0872 tlck = lid_to_tlock(lid);
0873 if ((mp = tlck->mp) != NULL &&
0874 (tlck->type & tlckBTROOT) == 0) {
0875 assert(mp->xflag & COMMIT_PAGE);
0876 mp->lid = 0;
0877 }
0878 }
0879
0880
0881
0882
0883
0884 TXN_WAKEUP(&tblk->waitor);
0885
0886 TXN_UNLOCK();
0887 }
0888
0889
0890
0891
0892
0893
0894
0895 static void txUnlock(struct tblock * tblk)
0896 {
0897 struct tlock *tlck;
0898 struct linelock *linelock;
0899 lid_t lid, next, llid, k;
0900 struct metapage *mp;
0901 struct jfs_log *log;
0902 int difft, diffp;
0903 unsigned long flags;
0904
0905 jfs_info("txUnlock: tblk = 0x%p", tblk);
0906 log = JFS_SBI(tblk->sb)->log;
0907
0908
0909
0910
0911 for (lid = tblk->next; lid; lid = next) {
0912 tlck = lid_to_tlock(lid);
0913 next = tlck->next;
0914
0915 jfs_info("unlocking lid = %d, tlck = 0x%p", lid, tlck);
0916
0917
0918 if ((mp = tlck->mp) != NULL &&
0919 (tlck->type & tlckBTROOT) == 0) {
0920 assert(mp->xflag & COMMIT_PAGE);
0921
0922
0923
0924 hold_metapage(mp);
0925
0926 assert(mp->nohomeok > 0);
0927 _metapage_homeok(mp);
0928
0929
0930 LOGSYNC_LOCK(log, flags);
0931 if (mp->clsn) {
0932 logdiff(difft, tblk->clsn, log);
0933 logdiff(diffp, mp->clsn, log);
0934 if (difft > diffp)
0935 mp->clsn = tblk->clsn;
0936 } else
0937 mp->clsn = tblk->clsn;
0938 LOGSYNC_UNLOCK(log, flags);
0939
0940 assert(!(tlck->flag & tlckFREEPAGE));
0941
0942 put_metapage(mp);
0943 }
0944
0945
0946
0947
0948 TXN_LOCK();
0949
0950 llid = ((struct linelock *) & tlck->lock)->next;
0951 while (llid) {
0952 linelock = (struct linelock *) lid_to_tlock(llid);
0953 k = linelock->next;
0954 txLockFree(llid);
0955 llid = k;
0956 }
0957 txLockFree(lid);
0958
0959 TXN_UNLOCK();
0960 }
0961 tblk->next = tblk->last = 0;
0962
0963
0964
0965
0966
0967
0968 if (tblk->lsn) {
0969 LOGSYNC_LOCK(log, flags);
0970 log->count--;
0971 list_del(&tblk->synclist);
0972 LOGSYNC_UNLOCK(log, flags);
0973 }
0974 }
0975
0976
0977
0978
0979
0980
0981
0982 struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
0983 {
0984 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
0985 lid_t lid;
0986 struct tblock *tblk;
0987 struct tlock *tlck;
0988 struct maplock *maplock;
0989
0990 TXN_LOCK();
0991
0992
0993
0994
0995 lid = txLockAlloc();
0996 tlck = lid_to_tlock(lid);
0997
0998
0999
1000
1001 tlck->tid = tid;
1002
1003
1004 tlck->flag = tlckINODELOCK;
1005 if (S_ISDIR(ip->i_mode))
1006 tlck->flag |= tlckDIRECTORY;
1007 tlck->ip = ip;
1008 tlck->mp = NULL;
1009
1010 tlck->type = type;
1011
1012
1013
1014
1015
1016 if (tid) {
1017 tblk = tid_to_tblock(tid);
1018 if (tblk->next)
1019 lid_to_tlock(tblk->last)->next = lid;
1020 else
1021 tblk->next = lid;
1022 tlck->next = 0;
1023 tblk->last = lid;
1024 }
1025
1026
1027
1028 else {
1029 tlck->next = jfs_ip->atlhead;
1030 jfs_ip->atlhead = lid;
1031 if (tlck->next == 0) {
1032
1033 jfs_ip->atltail = lid;
1034 list_add_tail(&jfs_ip->anon_inode_list,
1035 &TxAnchor.anon_list);
1036 }
1037 }
1038
1039 TXN_UNLOCK();
1040
1041
1042 maplock = (struct maplock *) & tlck->lock;
1043 maplock->next = 0;
1044 maplock->maxcnt = 0;
1045 maplock->index = 0;
1046
1047 return tlck;
1048 }
1049
1050
1051
1052
1053
1054
1055 struct linelock *txLinelock(struct linelock * tlock)
1056 {
1057 lid_t lid;
1058 struct tlock *tlck;
1059 struct linelock *linelock;
1060
1061 TXN_LOCK();
1062
1063
1064 lid = txLockAlloc();
1065 tlck = lid_to_tlock(lid);
1066
1067 TXN_UNLOCK();
1068
1069
1070 linelock = (struct linelock *) tlck;
1071 linelock->next = 0;
1072 linelock->flag = tlckLINELOCK;
1073 linelock->maxcnt = TLOCKLONG;
1074 linelock->index = 0;
1075 if (tlck->flag & tlckDIRECTORY)
1076 linelock->flag |= tlckDIRECTORY;
1077
1078
1079 linelock->next = tlock->next;
1080 tlock->next = lid;
1081
1082 return linelock;
1083 }
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122 int txCommit(tid_t tid,
1123 int nip,
1124 struct inode **iplist,
1125 int flag)
1126 {
1127 int rc = 0;
1128 struct commit cd;
1129 struct jfs_log *log;
1130 struct tblock *tblk;
1131 struct lrd *lrd;
1132 struct inode *ip;
1133 struct jfs_inode_info *jfs_ip;
1134 int k, n;
1135 ino_t top;
1136 struct super_block *sb;
1137
1138 jfs_info("txCommit, tid = %d, flag = %d", tid, flag);
1139
1140 if (isReadOnly(iplist[0])) {
1141 rc = -EROFS;
1142 goto TheEnd;
1143 }
1144
1145 sb = cd.sb = iplist[0]->i_sb;
1146 cd.tid = tid;
1147
1148 if (tid == 0)
1149 tid = txBegin(sb, 0);
1150 tblk = tid_to_tblock(tid);
1151
1152
1153
1154
1155 log = JFS_SBI(sb)->log;
1156 cd.log = log;
1157
1158
1159 lrd = &cd.lrd;
1160 lrd->logtid = cpu_to_le32(tblk->logtid);
1161 lrd->backchain = 0;
1162
1163 tblk->xflag |= flag;
1164
1165 if ((flag & (COMMIT_FORCE | COMMIT_SYNC)) == 0)
1166 tblk->xflag |= COMMIT_LAZY;
1167
1168
1169
1170
1171
1172
1173
1174
1175 cd.iplist = iplist;
1176 cd.nip = nip;
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190 for (k = 0; k < cd.nip; k++) {
1191 top = (cd.iplist[k])->i_ino;
1192 for (n = k + 1; n < cd.nip; n++) {
1193 ip = cd.iplist[n];
1194 if (ip->i_ino > top) {
1195 top = ip->i_ino;
1196 cd.iplist[n] = cd.iplist[k];
1197 cd.iplist[k] = ip;
1198 }
1199 }
1200
1201 ip = cd.iplist[k];
1202 jfs_ip = JFS_IP(ip);
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232 clear_cflag(COMMIT_Dirty, ip);
1233
1234
1235 if (jfs_ip->atlhead) {
1236 lid_to_tlock(jfs_ip->atltail)->next = tblk->next;
1237 tblk->next = jfs_ip->atlhead;
1238 if (!tblk->last)
1239 tblk->last = jfs_ip->atltail;
1240 jfs_ip->atlhead = jfs_ip->atltail = 0;
1241 TXN_LOCK();
1242 list_del_init(&jfs_ip->anon_inode_list);
1243 TXN_UNLOCK();
1244 }
1245
1246
1247
1248
1249
1250 if (((rc = diWrite(tid, ip))))
1251 goto out;
1252 }
1253
1254
1255
1256
1257
1258
1259 txLog(log, tblk, &cd);
1260
1261
1262
1263
1264
1265 if (tblk->xflag & COMMIT_DELETE) {
1266 ihold(tblk->u.ip);
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284 if (tblk->u.ip->i_state & I_SYNC)
1285 tblk->xflag &= ~COMMIT_LAZY;
1286 }
1287
1288 ASSERT((!(tblk->xflag & COMMIT_DELETE)) ||
1289 ((tblk->u.ip->i_nlink == 0) &&
1290 !test_cflag(COMMIT_Nolink, tblk->u.ip)));
1291
1292
1293
1294
1295 lrd->type = cpu_to_le16(LOG_COMMIT);
1296 lrd->length = 0;
1297 lmLog(log, tblk, lrd, NULL);
1298
1299 lmGroupCommit(log, tblk);
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309 if (flag & COMMIT_FORCE)
1310 txForce(tblk);
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321 if (tblk->xflag & COMMIT_FORCE)
1322 txUpdateMap(tblk);
1323
1324
1325
1326
1327 txRelease(tblk);
1328
1329 if ((tblk->flag & tblkGC_LAZY) == 0)
1330 txUnlock(tblk);
1331
1332
1333
1334
1335
1336 for (k = 0; k < cd.nip; k++) {
1337 ip = cd.iplist[k];
1338 jfs_ip = JFS_IP(ip);
1339
1340
1341
1342
1343 jfs_ip->bxflag = 0;
1344 jfs_ip->blid = 0;
1345 }
1346
1347 out:
1348 if (rc != 0)
1349 txAbort(tid, 1);
1350
1351 TheEnd:
1352 jfs_info("txCommit: tid = %d, returning %d", tid, rc);
1353 return rc;
1354 }
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367 static void txLog(struct jfs_log *log, struct tblock *tblk, struct commit *cd)
1368 {
1369 struct inode *ip;
1370 lid_t lid;
1371 struct tlock *tlck;
1372 struct lrd *lrd = &cd->lrd;
1373
1374
1375
1376
1377 for (lid = tblk->next; lid; lid = tlck->next) {
1378 tlck = lid_to_tlock(lid);
1379
1380 tlck->flag |= tlckLOG;
1381
1382
1383 ip = tlck->ip;
1384 lrd->aggregate = cpu_to_le32(JFS_SBI(ip->i_sb)->aggregate);
1385 lrd->log.redopage.fileset = cpu_to_le32(JFS_IP(ip)->fileset);
1386 lrd->log.redopage.inode = cpu_to_le32(ip->i_ino);
1387
1388
1389 switch (tlck->type & tlckTYPE) {
1390 case tlckXTREE:
1391 xtLog(log, tblk, lrd, tlck);
1392 break;
1393
1394 case tlckDTREE:
1395 dtLog(log, tblk, lrd, tlck);
1396 break;
1397
1398 case tlckINODE:
1399 diLog(log, tblk, lrd, tlck, cd);
1400 break;
1401
1402 case tlckMAP:
1403 mapLog(log, tblk, lrd, tlck);
1404 break;
1405
1406 case tlckDATA:
1407 dataLog(log, tblk, lrd, tlck);
1408 break;
1409
1410 default:
1411 jfs_err("UFO tlock:0x%p", tlck);
1412 }
1413 }
1414
1415 return;
1416 }
1417
1418
1419
1420
1421
1422
1423 static void diLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
1424 struct tlock *tlck, struct commit *cd)
1425 {
1426 struct metapage *mp;
1427 pxd_t *pxd;
1428 struct pxd_lock *pxdlock;
1429
1430 mp = tlck->mp;
1431
1432
1433 lrd->log.redopage.type = cpu_to_le16(LOG_INODE);
1434 lrd->log.redopage.l2linesize = cpu_to_le16(L2INODESLOTSIZE);
1435
1436 pxd = &lrd->log.redopage.pxd;
1437
1438
1439
1440
1441 if (tlck->type & tlckENTRY) {
1442
1443 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1444 PXDaddress(pxd, mp->index);
1445 PXDlength(pxd,
1446 mp->logical_size >> tblk->sb->s_blocksize_bits);
1447 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1448
1449
1450 tlck->flag |= tlckWRITEPAGE;
1451 } else if (tlck->type & tlckFREE) {
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469 lrd->type = cpu_to_le16(LOG_NOREDOINOEXT);
1470
1471
1472
1473
1474
1475
1476
1477 lrd->log.noredoinoext.iagnum =
1478 cpu_to_le32((u32) (size_t) cd->iplist[1]);
1479 lrd->log.noredoinoext.inoext_idx =
1480 cpu_to_le32((u32) (size_t) cd->iplist[2]);
1481
1482 pxdlock = (struct pxd_lock *) & tlck->lock;
1483 *pxd = pxdlock->pxd;
1484 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1485
1486
1487 tlck->flag |= tlckUPDATEMAP;
1488
1489
1490 tlck->flag |= tlckWRITEPAGE;
1491 } else
1492 jfs_err("diLog: UFO type tlck:0x%p", tlck);
1493 return;
1494 }
1495
1496
1497
1498
1499
1500
1501 static void dataLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
1502 struct tlock *tlck)
1503 {
1504 struct metapage *mp;
1505 pxd_t *pxd;
1506
1507 mp = tlck->mp;
1508
1509
1510 lrd->log.redopage.type = cpu_to_le16(LOG_DATA);
1511 lrd->log.redopage.l2linesize = cpu_to_le16(L2DATASLOTSIZE);
1512
1513 pxd = &lrd->log.redopage.pxd;
1514
1515
1516 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1517
1518 if (jfs_dirtable_inline(tlck->ip)) {
1519
1520
1521
1522
1523 mp->lid = 0;
1524 grab_metapage(mp);
1525 metapage_homeok(mp);
1526 discard_metapage(mp);
1527 tlck->mp = NULL;
1528 return;
1529 }
1530
1531 PXDaddress(pxd, mp->index);
1532 PXDlength(pxd, mp->logical_size >> tblk->sb->s_blocksize_bits);
1533
1534 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1535
1536
1537 tlck->flag |= tlckWRITEPAGE;
1538
1539 return;
1540 }
1541
1542
1543
1544
1545
1546
1547 static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1548 struct tlock * tlck)
1549 {
1550 struct metapage *mp;
1551 struct pxd_lock *pxdlock;
1552 pxd_t *pxd;
1553
1554 mp = tlck->mp;
1555
1556
1557 lrd->log.redopage.type = cpu_to_le16(LOG_DTREE);
1558 lrd->log.redopage.l2linesize = cpu_to_le16(L2DTSLOTSIZE);
1559
1560 pxd = &lrd->log.redopage.pxd;
1561
1562 if (tlck->type & tlckBTROOT)
1563 lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1564
1565
1566
1567
1568
1569
1570
1571 if (tlck->type & (tlckNEW | tlckEXTEND)) {
1572
1573
1574
1575
1576 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1577 if (tlck->type & tlckEXTEND)
1578 lrd->log.redopage.type |= cpu_to_le16(LOG_EXTEND);
1579 else
1580 lrd->log.redopage.type |= cpu_to_le16(LOG_NEW);
1581 PXDaddress(pxd, mp->index);
1582 PXDlength(pxd,
1583 mp->logical_size >> tblk->sb->s_blocksize_bits);
1584 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1585
1586
1587
1588
1589 if (tlck->type & tlckBTROOT)
1590 return;
1591 tlck->flag |= tlckUPDATEMAP;
1592 pxdlock = (struct pxd_lock *) & tlck->lock;
1593 pxdlock->flag = mlckALLOCPXD;
1594 pxdlock->pxd = *pxd;
1595
1596 pxdlock->index = 1;
1597
1598
1599 tlck->flag |= tlckWRITEPAGE;
1600 return;
1601 }
1602
1603
1604
1605
1606
1607 if (tlck->type & (tlckENTRY | tlckRELINK)) {
1608
1609 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1610 PXDaddress(pxd, mp->index);
1611 PXDlength(pxd,
1612 mp->logical_size >> tblk->sb->s_blocksize_bits);
1613 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1614
1615
1616 tlck->flag |= tlckWRITEPAGE;
1617 return;
1618 }
1619
1620
1621
1622
1623
1624
1625
1626
1627 if (tlck->type & (tlckFREE | tlckRELOCATE)) {
1628
1629
1630
1631
1632 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1633 pxdlock = (struct pxd_lock *) & tlck->lock;
1634 *pxd = pxdlock->pxd;
1635 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1636
1637
1638
1639
1640 tlck->flag |= tlckUPDATEMAP;
1641 }
1642 return;
1643 }
1644
1645
1646
1647
1648
1649
1650 static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1651 struct tlock * tlck)
1652 {
1653 struct inode *ip;
1654 struct metapage *mp;
1655 xtpage_t *p;
1656 struct xtlock *xtlck;
1657 struct maplock *maplock;
1658 struct xdlistlock *xadlock;
1659 struct pxd_lock *pxdlock;
1660 pxd_t *page_pxd;
1661 int next, lwm, hwm;
1662
1663 ip = tlck->ip;
1664 mp = tlck->mp;
1665
1666
1667 lrd->log.redopage.type = cpu_to_le16(LOG_XTREE);
1668 lrd->log.redopage.l2linesize = cpu_to_le16(L2XTSLOTSIZE);
1669
1670 page_pxd = &lrd->log.redopage.pxd;
1671
1672 if (tlck->type & tlckBTROOT) {
1673 lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1674 p = &JFS_IP(ip)->i_xtroot;
1675 if (S_ISDIR(ip->i_mode))
1676 lrd->log.redopage.type |=
1677 cpu_to_le16(LOG_DIR_XTREE);
1678 } else
1679 p = (xtpage_t *) mp->data;
1680 next = le16_to_cpu(p->header.nextindex);
1681
1682 xtlck = (struct xtlock *) & tlck->lock;
1683
1684 maplock = (struct maplock *) & tlck->lock;
1685 xadlock = (struct xdlistlock *) maplock;
1686
1687
1688
1689
1690
1691 if (tlck->type & (tlckNEW | tlckGROW | tlckRELINK)) {
1692
1693
1694
1695
1696
1697
1698
1699 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1700 PXDaddress(page_pxd, mp->index);
1701 PXDlength(page_pxd,
1702 mp->logical_size >> tblk->sb->s_blocksize_bits);
1703 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1704
1705
1706
1707
1708
1709
1710 lwm = xtlck->lwm.offset;
1711 if (lwm == 0)
1712 lwm = XTPAGEMAXSLOT;
1713
1714 if (lwm == next)
1715 goto out;
1716 if (lwm > next) {
1717 jfs_err("xtLog: lwm > next");
1718 goto out;
1719 }
1720 tlck->flag |= tlckUPDATEMAP;
1721 xadlock->flag = mlckALLOCXADLIST;
1722 xadlock->count = next - lwm;
1723 if ((xadlock->count <= 4) && (tblk->xflag & COMMIT_LAZY)) {
1724 int i;
1725 pxd_t *pxd;
1726
1727
1728
1729
1730
1731
1732
1733 xadlock->flag = mlckALLOCPXDLIST;
1734 pxd = xadlock->xdlist = &xtlck->pxdlock;
1735 for (i = 0; i < xadlock->count; i++) {
1736 PXDaddress(pxd, addressXAD(&p->xad[lwm + i]));
1737 PXDlength(pxd, lengthXAD(&p->xad[lwm + i]));
1738 p->xad[lwm + i].flag &=
1739 ~(XAD_NEW | XAD_EXTENDED);
1740 pxd++;
1741 }
1742 } else {
1743
1744
1745
1746
1747 xadlock->flag = mlckALLOCXADLIST;
1748 xadlock->xdlist = &p->xad[lwm];
1749 tblk->xflag &= ~COMMIT_LAZY;
1750 }
1751 jfs_info("xtLog: alloc ip:0x%p mp:0x%p tlck:0x%p lwm:%d count:%d",
1752 tlck->ip, mp, tlck, lwm, xadlock->count);
1753
1754 maplock->index = 1;
1755
1756 out:
1757
1758 tlck->flag |= tlckWRITEPAGE;
1759
1760 return;
1761 }
1762
1763
1764
1765
1766
1767
1768
1769 if (tlck->type & tlckFREE) {
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786 if (tblk->xflag & COMMIT_TRUNCATE) {
1787
1788 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1789 PXDaddress(page_pxd, mp->index);
1790 PXDlength(page_pxd,
1791 mp->logical_size >> tblk->sb->
1792 s_blocksize_bits);
1793 lrd->backchain =
1794 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1795
1796 if (tlck->type & tlckBTROOT) {
1797
1798 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1799 lrd->backchain =
1800 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1801 }
1802 }
1803
1804
1805
1806
1807
1808 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1809 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEXADLIST);
1810 xtlck = (struct xtlock *) & tlck->lock;
1811 hwm = xtlck->hwm.offset;
1812 lrd->log.updatemap.nxd =
1813 cpu_to_le16(hwm - XTENTRYSTART + 1);
1814
1815 xtlck->header.offset = XTENTRYSTART;
1816 xtlck->header.length = hwm - XTENTRYSTART + 1;
1817 xtlck->index = 1;
1818 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1819
1820
1821
1822
1823
1824 tlck->flag |= tlckUPDATEMAP;
1825 xadlock->count = hwm - XTENTRYSTART + 1;
1826 if ((xadlock->count <= 4) && (tblk->xflag & COMMIT_LAZY)) {
1827 int i;
1828 pxd_t *pxd;
1829
1830
1831
1832
1833
1834
1835
1836 xadlock->flag = mlckFREEPXDLIST;
1837 pxd = xadlock->xdlist = &xtlck->pxdlock;
1838 for (i = 0; i < xadlock->count; i++) {
1839 PXDaddress(pxd,
1840 addressXAD(&p->xad[XTENTRYSTART + i]));
1841 PXDlength(pxd,
1842 lengthXAD(&p->xad[XTENTRYSTART + i]));
1843 pxd++;
1844 }
1845 } else {
1846
1847
1848
1849
1850 xadlock->flag = mlckFREEXADLIST;
1851 xadlock->xdlist = &p->xad[XTENTRYSTART];
1852 tblk->xflag &= ~COMMIT_LAZY;
1853 }
1854 jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d lwm:2",
1855 tlck->ip, mp, xadlock->count);
1856
1857 maplock->index = 1;
1858
1859
1860 if (((tblk->xflag & COMMIT_PWMAP) || S_ISDIR(ip->i_mode))
1861 && !(tlck->type & tlckBTROOT))
1862 tlck->flag |= tlckFREEPAGE;
1863
1864
1865
1866
1867 return;
1868 }
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880 if (tlck->type & tlckTRUNCATE) {
1881 pxd_t pxd;
1882 int twm;
1883
1884
1885
1886
1887
1888
1889
1890
1891 tblk->xflag &= ~COMMIT_LAZY;
1892 lwm = xtlck->lwm.offset;
1893 if (lwm == 0)
1894 lwm = XTPAGEMAXSLOT;
1895 hwm = xtlck->hwm.offset;
1896 twm = xtlck->twm.offset;
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1910 PXDaddress(page_pxd, mp->index);
1911 PXDlength(page_pxd,
1912 mp->logical_size >> tblk->sb->s_blocksize_bits);
1913 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1914
1915
1916
1917
1918 if (twm == next - 1) {
1919
1920
1921
1922
1923
1924 pxdlock = (struct pxd_lock *) & xtlck->pxdlock;
1925
1926 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1927 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
1928 lrd->log.updatemap.nxd = cpu_to_le16(1);
1929 lrd->log.updatemap.pxd = pxdlock->pxd;
1930 pxd = pxdlock->pxd;
1931 lrd->backchain =
1932 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1933 }
1934
1935
1936
1937
1938 if (hwm >= next) {
1939
1940
1941
1942
1943 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1944 lrd->log.updatemap.type =
1945 cpu_to_le16(LOG_FREEXADLIST);
1946 xtlck = (struct xtlock *) & tlck->lock;
1947 hwm = xtlck->hwm.offset;
1948 lrd->log.updatemap.nxd =
1949 cpu_to_le16(hwm - next + 1);
1950
1951 xtlck->header.offset = next;
1952 xtlck->header.length = hwm - next + 1;
1953 xtlck->index = 1;
1954 lrd->backchain =
1955 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1956 }
1957
1958
1959
1960
1961 maplock->index = 0;
1962
1963
1964
1965
1966 if (lwm < next) {
1967
1968
1969
1970
1971
1972 tlck->flag |= tlckUPDATEMAP;
1973 xadlock->flag = mlckALLOCXADLIST;
1974 xadlock->count = next - lwm;
1975 xadlock->xdlist = &p->xad[lwm];
1976
1977 jfs_info("xtLog: alloc ip:0x%p mp:0x%p count:%d lwm:%d next:%d",
1978 tlck->ip, mp, xadlock->count, lwm, next);
1979 maplock->index++;
1980 xadlock++;
1981 }
1982
1983
1984
1985
1986 if (twm == next - 1) {
1987
1988
1989
1990
1991
1992 tlck->flag |= tlckUPDATEMAP;
1993 pxdlock = (struct pxd_lock *) xadlock;
1994 pxdlock->flag = mlckFREEPXD;
1995 pxdlock->count = 1;
1996 pxdlock->pxd = pxd;
1997
1998 jfs_info("xtLog: truncate ip:0x%p mp:0x%p count:%d hwm:%d",
1999 ip, mp, pxdlock->count, hwm);
2000 maplock->index++;
2001 xadlock++;
2002 }
2003
2004
2005
2006
2007 if (hwm >= next) {
2008
2009
2010
2011
2012 tlck->flag |= tlckUPDATEMAP;
2013 xadlock->flag = mlckFREEXADLIST;
2014 xadlock->count = hwm - next + 1;
2015 xadlock->xdlist = &p->xad[next];
2016
2017 jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d next:%d hwm:%d",
2018 tlck->ip, mp, xadlock->count, next, hwm);
2019 maplock->index++;
2020 }
2021
2022
2023 tlck->flag |= tlckWRITEPAGE;
2024 }
2025 return;
2026 }
2027
2028
2029
2030
2031
2032
2033 static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
2034 struct tlock * tlck)
2035 {
2036 struct pxd_lock *pxdlock;
2037 int i, nlock;
2038 pxd_t *pxd;
2039
2040
2041
2042
2043
2044
2045
2046
2047 if (tlck->type & tlckRELOCATE) {
2048
2049
2050
2051 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
2052 pxdlock = (struct pxd_lock *) & tlck->lock;
2053 pxd = &lrd->log.redopage.pxd;
2054 *pxd = pxdlock->pxd;
2055 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2068 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
2069 lrd->log.updatemap.nxd = cpu_to_le16(1);
2070 lrd->log.updatemap.pxd = pxdlock->pxd;
2071 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2072
2073
2074
2075
2076 tlck->flag |= tlckUPDATEMAP;
2077 return;
2078 }
2079
2080
2081
2082
2083
2084 else {
2085
2086
2087
2088
2089
2090 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2091 pxdlock = (struct pxd_lock *) & tlck->lock;
2092 nlock = pxdlock->index;
2093 for (i = 0; i < nlock; i++, pxdlock++) {
2094 if (pxdlock->flag & mlckALLOCPXD)
2095 lrd->log.updatemap.type =
2096 cpu_to_le16(LOG_ALLOCPXD);
2097 else
2098 lrd->log.updatemap.type =
2099 cpu_to_le16(LOG_FREEPXD);
2100 lrd->log.updatemap.nxd = cpu_to_le16(1);
2101 lrd->log.updatemap.pxd = pxdlock->pxd;
2102 lrd->backchain =
2103 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2104 jfs_info("mapLog: xaddr:0x%lx xlen:0x%x",
2105 (ulong) addressPXD(&pxdlock->pxd),
2106 lengthPXD(&pxdlock->pxd));
2107 }
2108
2109
2110 tlck->flag |= tlckUPDATEMAP;
2111 }
2112 }
2113
2114
2115
2116
2117
2118
2119
2120 void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
2121 {
2122 struct tlock *tlck = NULL;
2123 struct pxd_lock *maplock = NULL, *pxdlock = NULL;
2124
2125
2126
2127
2128 if (newea) {
2129
2130
2131
2132
2133 if (newea->flag & DXD_EXTENT) {
2134 tlck = txMaplock(tid, ip, tlckMAP);
2135 maplock = (struct pxd_lock *) & tlck->lock;
2136 pxdlock = (struct pxd_lock *) maplock;
2137 pxdlock->flag = mlckALLOCPXD;
2138 PXDaddress(&pxdlock->pxd, addressDXD(newea));
2139 PXDlength(&pxdlock->pxd, lengthDXD(newea));
2140 pxdlock++;
2141 maplock->index = 1;
2142 } else if (newea->flag & DXD_INLINE) {
2143 tlck = NULL;
2144
2145 set_cflag(COMMIT_Inlineea, ip);
2146 }
2147 }
2148
2149
2150
2151
2152 if (!test_cflag(COMMIT_Nolink, ip) && oldea->flag & DXD_EXTENT) {
2153 if (tlck == NULL) {
2154 tlck = txMaplock(tid, ip, tlckMAP);
2155 maplock = (struct pxd_lock *) & tlck->lock;
2156 pxdlock = (struct pxd_lock *) maplock;
2157 maplock->index = 0;
2158 }
2159 pxdlock->flag = mlckFREEPXD;
2160 PXDaddress(&pxdlock->pxd, addressDXD(oldea));
2161 PXDlength(&pxdlock->pxd, lengthDXD(oldea));
2162 maplock->index++;
2163 }
2164 }
2165
2166
2167
2168
2169
2170
2171
2172 static void txForce(struct tblock * tblk)
2173 {
2174 struct tlock *tlck;
2175 lid_t lid, next;
2176 struct metapage *mp;
2177
2178
2179
2180
2181
2182
2183 tlck = lid_to_tlock(tblk->next);
2184 lid = tlck->next;
2185 tlck->next = 0;
2186 while (lid) {
2187 tlck = lid_to_tlock(lid);
2188 next = tlck->next;
2189 tlck->next = tblk->next;
2190 tblk->next = lid;
2191 lid = next;
2192 }
2193
2194
2195
2196
2197
2198 for (lid = tblk->next; lid; lid = next) {
2199 tlck = lid_to_tlock(lid);
2200 next = tlck->next;
2201
2202 if ((mp = tlck->mp) != NULL &&
2203 (tlck->type & tlckBTROOT) == 0) {
2204 assert(mp->xflag & COMMIT_PAGE);
2205
2206 if (tlck->flag & tlckWRITEPAGE) {
2207 tlck->flag &= ~tlckWRITEPAGE;
2208
2209
2210 force_metapage(mp);
2211 #if 0
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223 assert(mp->nohomeok);
2224 set_bit(META_dirty, &mp->flag);
2225 set_bit(META_sync, &mp->flag);
2226 #endif
2227 }
2228 }
2229 }
2230 }
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240 static void txUpdateMap(struct tblock * tblk)
2241 {
2242 struct inode *ip;
2243 struct inode *ipimap;
2244 lid_t lid;
2245 struct tlock *tlck;
2246 struct maplock *maplock;
2247 struct pxd_lock pxdlock;
2248 int maptype;
2249 int k, nlock;
2250 struct metapage *mp = NULL;
2251
2252 ipimap = JFS_SBI(tblk->sb)->ipimap;
2253
2254 maptype = (tblk->xflag & COMMIT_PMAP) ? COMMIT_PMAP : COMMIT_PWMAP;
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269 for (lid = tblk->next; lid; lid = tlck->next) {
2270 tlck = lid_to_tlock(lid);
2271
2272 if ((tlck->flag & tlckUPDATEMAP) == 0)
2273 continue;
2274
2275 if (tlck->flag & tlckFREEPAGE) {
2276
2277
2278
2279
2280
2281
2282
2283 mp = tlck->mp;
2284 ASSERT(mp->xflag & COMMIT_PAGE);
2285 grab_metapage(mp);
2286 }
2287
2288
2289
2290
2291
2292
2293 maplock = (struct maplock *) & tlck->lock;
2294 nlock = maplock->index;
2295
2296 for (k = 0; k < nlock; k++, maplock++) {
2297
2298
2299
2300
2301
2302 if (maplock->flag & mlckALLOC) {
2303 txAllocPMap(ipimap, maplock, tblk);
2304 }
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319 else {
2320
2321 if (tlck->flag & tlckDIRECTORY)
2322 txFreeMap(ipimap, maplock,
2323 tblk, COMMIT_PWMAP);
2324 else
2325 txFreeMap(ipimap, maplock,
2326 tblk, maptype);
2327 }
2328 }
2329 if (tlck->flag & tlckFREEPAGE) {
2330 if (!(tblk->flag & tblkGC_LAZY)) {
2331
2332 ASSERT(mp->lid == lid);
2333 tlck->mp->lid = 0;
2334 }
2335 assert(mp->nohomeok == 1);
2336 metapage_homeok(mp);
2337 discard_metapage(mp);
2338 tlck->mp = NULL;
2339 }
2340 }
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350 if (tblk->xflag & COMMIT_CREATE) {
2351 diUpdatePMap(ipimap, tblk->ino, false, tblk);
2352
2353
2354
2355 pxdlock.flag = mlckALLOCPXD;
2356 pxdlock.pxd = tblk->u.ixpxd;
2357 pxdlock.index = 1;
2358 txAllocPMap(ipimap, (struct maplock *) & pxdlock, tblk);
2359 } else if (tblk->xflag & COMMIT_DELETE) {
2360 ip = tblk->u.ip;
2361 diUpdatePMap(ipimap, ip->i_ino, true, tblk);
2362 iput(ip);
2363 }
2364 }
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
2387 struct tblock * tblk)
2388 {
2389 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2390 struct xdlistlock *xadlistlock;
2391 xad_t *xad;
2392 s64 xaddr;
2393 int xlen;
2394 struct pxd_lock *pxdlock;
2395 struct xdlistlock *pxdlistlock;
2396 pxd_t *pxd;
2397 int n;
2398
2399
2400
2401
2402 if (maplock->flag & mlckALLOCXADLIST) {
2403 xadlistlock = (struct xdlistlock *) maplock;
2404 xad = xadlistlock->xdlist;
2405 for (n = 0; n < xadlistlock->count; n++, xad++) {
2406 if (xad->flag & (XAD_NEW | XAD_EXTENDED)) {
2407 xaddr = addressXAD(xad);
2408 xlen = lengthXAD(xad);
2409 dbUpdatePMap(ipbmap, false, xaddr,
2410 (s64) xlen, tblk);
2411 xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
2412 jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2413 (ulong) xaddr, xlen);
2414 }
2415 }
2416 } else if (maplock->flag & mlckALLOCPXD) {
2417 pxdlock = (struct pxd_lock *) maplock;
2418 xaddr = addressPXD(&pxdlock->pxd);
2419 xlen = lengthPXD(&pxdlock->pxd);
2420 dbUpdatePMap(ipbmap, false, xaddr, (s64) xlen, tblk);
2421 jfs_info("allocPMap: xaddr:0x%lx xlen:%d", (ulong) xaddr, xlen);
2422 } else {
2423
2424 pxdlistlock = (struct xdlistlock *) maplock;
2425 pxd = pxdlistlock->xdlist;
2426 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2427 xaddr = addressPXD(pxd);
2428 xlen = lengthPXD(pxd);
2429 dbUpdatePMap(ipbmap, false, xaddr, (s64) xlen,
2430 tblk);
2431 jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2432 (ulong) xaddr, xlen);
2433 }
2434 }
2435 }
2436
2437
2438
2439
2440
2441
2442
2443
2444 void txFreeMap(struct inode *ip,
2445 struct maplock * maplock, struct tblock * tblk, int maptype)
2446 {
2447 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2448 struct xdlistlock *xadlistlock;
2449 xad_t *xad;
2450 s64 xaddr;
2451 int xlen;
2452 struct pxd_lock *pxdlock;
2453 struct xdlistlock *pxdlistlock;
2454 pxd_t *pxd;
2455 int n;
2456
2457 jfs_info("txFreeMap: tblk:0x%p maplock:0x%p maptype:0x%x",
2458 tblk, maplock, maptype);
2459
2460
2461
2462
2463 if (maptype == COMMIT_PMAP || maptype == COMMIT_PWMAP) {
2464 if (maplock->flag & mlckFREEXADLIST) {
2465 xadlistlock = (struct xdlistlock *) maplock;
2466 xad = xadlistlock->xdlist;
2467 for (n = 0; n < xadlistlock->count; n++, xad++) {
2468 if (!(xad->flag & XAD_NEW)) {
2469 xaddr = addressXAD(xad);
2470 xlen = lengthXAD(xad);
2471 dbUpdatePMap(ipbmap, true, xaddr,
2472 (s64) xlen, tblk);
2473 jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2474 (ulong) xaddr, xlen);
2475 }
2476 }
2477 } else if (maplock->flag & mlckFREEPXD) {
2478 pxdlock = (struct pxd_lock *) maplock;
2479 xaddr = addressPXD(&pxdlock->pxd);
2480 xlen = lengthPXD(&pxdlock->pxd);
2481 dbUpdatePMap(ipbmap, true, xaddr, (s64) xlen,
2482 tblk);
2483 jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2484 (ulong) xaddr, xlen);
2485 } else {
2486
2487 pxdlistlock = (struct xdlistlock *) maplock;
2488 pxd = pxdlistlock->xdlist;
2489 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2490 xaddr = addressPXD(pxd);
2491 xlen = lengthPXD(pxd);
2492 dbUpdatePMap(ipbmap, true, xaddr,
2493 (s64) xlen, tblk);
2494 jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2495 (ulong) xaddr, xlen);
2496 }
2497 }
2498 }
2499
2500
2501
2502
2503 if (maptype == COMMIT_PWMAP || maptype == COMMIT_WMAP) {
2504 if (maplock->flag & mlckFREEXADLIST) {
2505 xadlistlock = (struct xdlistlock *) maplock;
2506 xad = xadlistlock->xdlist;
2507 for (n = 0; n < xadlistlock->count; n++, xad++) {
2508 xaddr = addressXAD(xad);
2509 xlen = lengthXAD(xad);
2510 dbFree(ip, xaddr, (s64) xlen);
2511 xad->flag = 0;
2512 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2513 (ulong) xaddr, xlen);
2514 }
2515 } else if (maplock->flag & mlckFREEPXD) {
2516 pxdlock = (struct pxd_lock *) maplock;
2517 xaddr = addressPXD(&pxdlock->pxd);
2518 xlen = lengthPXD(&pxdlock->pxd);
2519 dbFree(ip, xaddr, (s64) xlen);
2520 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2521 (ulong) xaddr, xlen);
2522 } else {
2523
2524 pxdlistlock = (struct xdlistlock *) maplock;
2525 pxd = pxdlistlock->xdlist;
2526 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2527 xaddr = addressPXD(pxd);
2528 xlen = lengthPXD(pxd);
2529 dbFree(ip, xaddr, (s64) xlen);
2530 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2531 (ulong) xaddr, xlen);
2532 }
2533 }
2534 }
2535 }
2536
2537
2538
2539
2540
2541
2542 void txFreelock(struct inode *ip)
2543 {
2544 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
2545 struct tlock *xtlck, *tlck;
2546 lid_t xlid = 0, lid;
2547
2548 if (!jfs_ip->atlhead)
2549 return;
2550
2551 TXN_LOCK();
2552 xtlck = (struct tlock *) &jfs_ip->atlhead;
2553
2554 while ((lid = xtlck->next) != 0) {
2555 tlck = lid_to_tlock(lid);
2556 if (tlck->flag & tlckFREELOCK) {
2557 xtlck->next = tlck->next;
2558 txLockFree(lid);
2559 } else {
2560 xtlck = tlck;
2561 xlid = lid;
2562 }
2563 }
2564
2565 if (jfs_ip->atlhead)
2566 jfs_ip->atltail = xlid;
2567 else {
2568 jfs_ip->atltail = 0;
2569
2570
2571
2572 list_del_init(&jfs_ip->anon_inode_list);
2573 }
2574 TXN_UNLOCK();
2575 }
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588 void txAbort(tid_t tid, int dirty)
2589 {
2590 lid_t lid, next;
2591 struct metapage *mp;
2592 struct tblock *tblk = tid_to_tblock(tid);
2593 struct tlock *tlck;
2594
2595
2596
2597
2598 for (lid = tblk->next; lid; lid = next) {
2599 tlck = lid_to_tlock(lid);
2600 next = tlck->next;
2601 mp = tlck->mp;
2602 JFS_IP(tlck->ip)->xtlid = 0;
2603
2604 if (mp) {
2605 mp->lid = 0;
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615 if (mp->xflag & COMMIT_PAGE && mp->lsn)
2616 LogSyncRelease(mp);
2617 }
2618
2619 TXN_LOCK();
2620 txLockFree(lid);
2621 TXN_UNLOCK();
2622 }
2623
2624
2625
2626 tblk->next = tblk->last = 0;
2627
2628
2629
2630
2631 if (dirty)
2632 jfs_error(tblk->sb, "\n");
2633
2634 return;
2635 }
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645 static void txLazyCommit(struct tblock * tblk)
2646 {
2647 struct jfs_log *log;
2648
2649 while (((tblk->flag & tblkGC_READY) == 0) &&
2650 ((tblk->flag & tblkGC_UNLOCKED) == 0)) {
2651
2652
2653 jfs_info("jfs_lazycommit: tblk 0x%p not unlocked", tblk);
2654 yield();
2655 }
2656
2657 jfs_info("txLazyCommit: processing tblk 0x%p", tblk);
2658
2659 txUpdateMap(tblk);
2660
2661 log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
2662
2663 spin_lock_irq(&log->gclock);
2664
2665 tblk->flag |= tblkGC_COMMITTED;
2666
2667 if (tblk->flag & tblkGC_READY)
2668 log->gcrtc--;
2669
2670 wake_up_all(&tblk->gcwait);
2671
2672
2673
2674
2675 if (tblk->flag & tblkGC_LAZY) {
2676 spin_unlock_irq(&log->gclock);
2677 txUnlock(tblk);
2678 tblk->flag &= ~tblkGC_LAZY;
2679 txEnd(tblk - TxBlock);
2680 } else
2681 spin_unlock_irq(&log->gclock);
2682
2683 jfs_info("txLazyCommit: done: tblk = 0x%p", tblk);
2684 }
2685
2686
2687
2688
2689
2690
2691
2692
2693 int jfs_lazycommit(void *arg)
2694 {
2695 int WorkDone;
2696 struct tblock *tblk;
2697 unsigned long flags;
2698 struct jfs_sb_info *sbi;
2699
2700 do {
2701 LAZY_LOCK(flags);
2702 jfs_commit_thread_waking = 0;
2703 while (!list_empty(&TxAnchor.unlock_queue)) {
2704 WorkDone = 0;
2705 list_for_each_entry(tblk, &TxAnchor.unlock_queue,
2706 cqueue) {
2707
2708 sbi = JFS_SBI(tblk->sb);
2709
2710
2711
2712
2713
2714
2715 if (sbi->commit_state & IN_LAZYCOMMIT)
2716 continue;
2717
2718 sbi->commit_state |= IN_LAZYCOMMIT;
2719 WorkDone = 1;
2720
2721
2722
2723
2724 list_del(&tblk->cqueue);
2725
2726 LAZY_UNLOCK(flags);
2727 txLazyCommit(tblk);
2728 LAZY_LOCK(flags);
2729
2730 sbi->commit_state &= ~IN_LAZYCOMMIT;
2731
2732
2733
2734
2735
2736 break;
2737 }
2738
2739
2740 if (!WorkDone)
2741 break;
2742 }
2743
2744 jfs_commit_thread_waking = 0;
2745
2746 if (freezing(current)) {
2747 LAZY_UNLOCK(flags);
2748 try_to_freeze();
2749 } else {
2750 DECLARE_WAITQUEUE(wq, current);
2751
2752 add_wait_queue(&jfs_commit_thread_wait, &wq);
2753 set_current_state(TASK_INTERRUPTIBLE);
2754 LAZY_UNLOCK(flags);
2755 schedule();
2756 remove_wait_queue(&jfs_commit_thread_wait, &wq);
2757 }
2758 } while (!kthread_should_stop());
2759
2760 if (!list_empty(&TxAnchor.unlock_queue))
2761 jfs_err("jfs_lazycommit being killed w/pending transactions!");
2762 else
2763 jfs_info("jfs_lazycommit being killed");
2764 return 0;
2765 }
2766
2767 void txLazyUnlock(struct tblock * tblk)
2768 {
2769 unsigned long flags;
2770
2771 LAZY_LOCK(flags);
2772
2773 list_add_tail(&tblk->cqueue, &TxAnchor.unlock_queue);
2774
2775
2776
2777
2778 if (!(JFS_SBI(tblk->sb)->commit_state & IN_LAZYCOMMIT) &&
2779 !jfs_commit_thread_waking) {
2780 jfs_commit_thread_waking = 1;
2781 wake_up(&jfs_commit_thread_wait);
2782 }
2783 LAZY_UNLOCK(flags);
2784 }
2785
2786 static void LogSyncRelease(struct metapage * mp)
2787 {
2788 struct jfs_log *log = mp->log;
2789
2790 assert(mp->nohomeok);
2791 assert(log);
2792 metapage_homeok(mp);
2793 }
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805 void txQuiesce(struct super_block *sb)
2806 {
2807 struct inode *ip;
2808 struct jfs_inode_info *jfs_ip;
2809 struct jfs_log *log = JFS_SBI(sb)->log;
2810 tid_t tid;
2811
2812 set_bit(log_QUIESCE, &log->flag);
2813
2814 TXN_LOCK();
2815 restart:
2816 while (!list_empty(&TxAnchor.anon_list)) {
2817 jfs_ip = list_entry(TxAnchor.anon_list.next,
2818 struct jfs_inode_info,
2819 anon_inode_list);
2820 ip = &jfs_ip->vfs_inode;
2821
2822
2823
2824
2825
2826 TXN_UNLOCK();
2827 tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE);
2828 mutex_lock(&jfs_ip->commit_mutex);
2829 txCommit(tid, 1, &ip, 0);
2830 txEnd(tid);
2831 mutex_unlock(&jfs_ip->commit_mutex);
2832
2833
2834
2835
2836 cond_resched();
2837 TXN_LOCK();
2838 }
2839
2840
2841
2842
2843
2844 if (!list_empty(&TxAnchor.anon_list2)) {
2845 list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2846 goto restart;
2847 }
2848 TXN_UNLOCK();
2849
2850
2851
2852
2853 jfs_flush_journal(log, 0);
2854 }
2855
2856
2857
2858
2859
2860
2861 void txResume(struct super_block *sb)
2862 {
2863 struct jfs_log *log = JFS_SBI(sb)->log;
2864
2865 clear_bit(log_QUIESCE, &log->flag);
2866 TXN_WAKEUP(&log->syncwait);
2867 }
2868
2869
2870
2871
2872
2873
2874
2875
2876 int jfs_sync(void *arg)
2877 {
2878 struct inode *ip;
2879 struct jfs_inode_info *jfs_ip;
2880 tid_t tid;
2881
2882 do {
2883
2884
2885
2886 TXN_LOCK();
2887 while (jfs_tlocks_low && !list_empty(&TxAnchor.anon_list)) {
2888 jfs_ip = list_entry(TxAnchor.anon_list.next,
2889 struct jfs_inode_info,
2890 anon_inode_list);
2891 ip = &jfs_ip->vfs_inode;
2892
2893 if (! igrab(ip)) {
2894
2895
2896
2897 list_del_init(&jfs_ip->anon_inode_list);
2898 } else if (mutex_trylock(&jfs_ip->commit_mutex)) {
2899
2900
2901
2902
2903 TXN_UNLOCK();
2904 tid = txBegin(ip->i_sb, COMMIT_INODE);
2905 txCommit(tid, 1, &ip, 0);
2906 txEnd(tid);
2907 mutex_unlock(&jfs_ip->commit_mutex);
2908
2909 iput(ip);
2910
2911
2912
2913
2914 cond_resched();
2915 TXN_LOCK();
2916 } else {
2917
2918
2919
2920
2921
2922
2923
2924 list_move(&jfs_ip->anon_inode_list,
2925 &TxAnchor.anon_list2);
2926
2927 TXN_UNLOCK();
2928 iput(ip);
2929 TXN_LOCK();
2930 }
2931 }
2932
2933 list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2934
2935 if (freezing(current)) {
2936 TXN_UNLOCK();
2937 try_to_freeze();
2938 } else {
2939 set_current_state(TASK_INTERRUPTIBLE);
2940 TXN_UNLOCK();
2941 schedule();
2942 }
2943 } while (!kthread_should_stop());
2944
2945 jfs_info("jfs_sync being killed");
2946 return 0;
2947 }
2948
2949 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG)
2950 int jfs_txanchor_proc_show(struct seq_file *m, void *v)
2951 {
2952 char *freewait;
2953 char *freelockwait;
2954 char *lowlockwait;
2955
2956 freewait =
2957 waitqueue_active(&TxAnchor.freewait) ? "active" : "empty";
2958 freelockwait =
2959 waitqueue_active(&TxAnchor.freelockwait) ? "active" : "empty";
2960 lowlockwait =
2961 waitqueue_active(&TxAnchor.lowlockwait) ? "active" : "empty";
2962
2963 seq_printf(m,
2964 "JFS TxAnchor\n"
2965 "============\n"
2966 "freetid = %d\n"
2967 "freewait = %s\n"
2968 "freelock = %d\n"
2969 "freelockwait = %s\n"
2970 "lowlockwait = %s\n"
2971 "tlocksInUse = %d\n"
2972 "jfs_tlocks_low = %d\n"
2973 "unlock_queue is %sempty\n",
2974 TxAnchor.freetid,
2975 freewait,
2976 TxAnchor.freelock,
2977 freelockwait,
2978 lowlockwait,
2979 TxAnchor.tlocksInUse,
2980 jfs_tlocks_low,
2981 list_empty(&TxAnchor.unlock_queue) ? "" : "not ");
2982 return 0;
2983 }
2984 #endif
2985
2986 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_STATISTICS)
2987 int jfs_txstats_proc_show(struct seq_file *m, void *v)
2988 {
2989 seq_printf(m,
2990 "JFS TxStats\n"
2991 "===========\n"
2992 "calls to txBegin = %d\n"
2993 "txBegin blocked by sync barrier = %d\n"
2994 "txBegin blocked by tlocks low = %d\n"
2995 "txBegin blocked by no free tid = %d\n"
2996 "calls to txBeginAnon = %d\n"
2997 "txBeginAnon blocked by sync barrier = %d\n"
2998 "txBeginAnon blocked by tlocks low = %d\n"
2999 "calls to txLockAlloc = %d\n"
3000 "tLockAlloc blocked by no free lock = %d\n",
3001 TxStat.txBegin,
3002 TxStat.txBegin_barrier,
3003 TxStat.txBegin_lockslow,
3004 TxStat.txBegin_freetid,
3005 TxStat.txBeginAnon,
3006 TxStat.txBeginAnon_barrier,
3007 TxStat.txBeginAnon_lockslow,
3008 TxStat.txLockAlloc,
3009 TxStat.txLockAlloc_freelock);
3010 return 0;
3011 }
3012 #endif