0001
0002
0003
0004
0005
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_shared.h"
0009 #include "xfs_format.h"
0010 #include "xfs_log_format.h"
0011 #include "xfs_trans_resv.h"
0012 #include "xfs_bit.h"
0013 #include "xfs_mount.h"
0014 #include "xfs_trans.h"
0015 #include "xfs_trans_priv.h"
0016 #include "xfs_buf_item.h"
0017 #include "xfs_inode.h"
0018 #include "xfs_inode_item.h"
0019 #include "xfs_quota.h"
0020 #include "xfs_dquot_item.h"
0021 #include "xfs_dquot.h"
0022 #include "xfs_trace.h"
0023 #include "xfs_log.h"
0024 #include "xfs_log_priv.h"
0025
0026
0027 struct kmem_cache *xfs_buf_item_cache;
0028
0029 static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
0030 {
0031 return container_of(lip, struct xfs_buf_log_item, bli_item);
0032 }
0033
0034
0035 bool
0036 xfs_buf_log_check_iovec(
0037 struct xfs_log_iovec *iovec)
0038 {
0039 struct xfs_buf_log_format *blfp = iovec->i_addr;
0040 char *bmp_end;
0041 char *item_end;
0042
0043 if (offsetof(struct xfs_buf_log_format, blf_data_map) > iovec->i_len)
0044 return false;
0045
0046 item_end = (char *)iovec->i_addr + iovec->i_len;
0047 bmp_end = (char *)&blfp->blf_data_map[blfp->blf_map_size];
0048 return bmp_end <= item_end;
0049 }
0050
0051 static inline int
0052 xfs_buf_log_format_size(
0053 struct xfs_buf_log_format *blfp)
0054 {
0055 return offsetof(struct xfs_buf_log_format, blf_data_map) +
0056 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
0057 }
0058
0059 static inline bool
0060 xfs_buf_item_straddle(
0061 struct xfs_buf *bp,
0062 uint offset,
0063 int first_bit,
0064 int nbits)
0065 {
0066 void *first, *last;
0067
0068 first = xfs_buf_offset(bp, offset + (first_bit << XFS_BLF_SHIFT));
0069 last = xfs_buf_offset(bp,
0070 offset + ((first_bit + nbits) << XFS_BLF_SHIFT));
0071
0072 if (last - first != nbits * XFS_BLF_CHUNK)
0073 return true;
0074 return false;
0075 }
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085 STATIC void
0086 xfs_buf_item_size_segment(
0087 struct xfs_buf_log_item *bip,
0088 struct xfs_buf_log_format *blfp,
0089 uint offset,
0090 int *nvecs,
0091 int *nbytes)
0092 {
0093 struct xfs_buf *bp = bip->bli_buf;
0094 int first_bit;
0095 int nbits;
0096 int next_bit;
0097 int last_bit;
0098
0099 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
0100 if (first_bit == -1)
0101 return;
0102
0103 (*nvecs)++;
0104 *nbytes += xfs_buf_log_format_size(blfp);
0105
0106 do {
0107 nbits = xfs_contig_bits(blfp->blf_data_map,
0108 blfp->blf_map_size, first_bit);
0109 ASSERT(nbits > 0);
0110
0111
0112
0113
0114
0115 if (nbits > 1 &&
0116 xfs_buf_item_straddle(bp, offset, first_bit, nbits))
0117 goto slow_scan;
0118
0119 (*nvecs)++;
0120 *nbytes += nbits * XFS_BLF_CHUNK;
0121
0122
0123
0124
0125
0126
0127
0128 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
0129 (uint)first_bit + nbits + 1);
0130 } while (first_bit != -1);
0131
0132 return;
0133
0134 slow_scan:
0135
0136 (*nvecs)++;
0137 *nbytes += XFS_BLF_CHUNK;
0138 last_bit = first_bit;
0139 while (last_bit != -1) {
0140
0141
0142
0143
0144
0145
0146 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
0147 last_bit + 1);
0148
0149
0150
0151
0152
0153 if (next_bit == -1) {
0154 break;
0155 } else if (next_bit != last_bit + 1 ||
0156 xfs_buf_item_straddle(bp, offset, first_bit, nbits)) {
0157 last_bit = next_bit;
0158 first_bit = next_bit;
0159 (*nvecs)++;
0160 nbits = 1;
0161 } else {
0162 last_bit++;
0163 nbits++;
0164 }
0165 *nbytes += XFS_BLF_CHUNK;
0166 }
0167 }
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 STATIC void
0188 xfs_buf_item_size(
0189 struct xfs_log_item *lip,
0190 int *nvecs,
0191 int *nbytes)
0192 {
0193 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
0194 struct xfs_buf *bp = bip->bli_buf;
0195 int i;
0196 int bytes;
0197 uint offset = 0;
0198
0199 ASSERT(atomic_read(&bip->bli_refcount) > 0);
0200 if (bip->bli_flags & XFS_BLI_STALE) {
0201
0202
0203
0204
0205
0206 trace_xfs_buf_item_size_stale(bip);
0207 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
0208 *nvecs += bip->bli_format_count;
0209 for (i = 0; i < bip->bli_format_count; i++) {
0210 *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
0211 }
0212 return;
0213 }
0214
0215 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
0216
0217 if (bip->bli_flags & XFS_BLI_ORDERED) {
0218
0219
0220
0221
0222
0223 trace_xfs_buf_item_size_ordered(bip);
0224 *nvecs = XFS_LOG_VEC_ORDERED;
0225 return;
0226 }
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237 bytes = 0;
0238 for (i = 0; i < bip->bli_format_count; i++) {
0239 xfs_buf_item_size_segment(bip, &bip->bli_formats[i], offset,
0240 nvecs, &bytes);
0241 offset += BBTOB(bp->b_maps[i].bm_len);
0242 }
0243
0244
0245
0246
0247
0248
0249 *nbytes = round_up(bytes, 512);
0250 trace_xfs_buf_item_size(bip);
0251 }
0252
0253 static inline void
0254 xfs_buf_item_copy_iovec(
0255 struct xfs_log_vec *lv,
0256 struct xfs_log_iovec **vecp,
0257 struct xfs_buf *bp,
0258 uint offset,
0259 int first_bit,
0260 uint nbits)
0261 {
0262 offset += first_bit * XFS_BLF_CHUNK;
0263 xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
0264 xfs_buf_offset(bp, offset),
0265 nbits * XFS_BLF_CHUNK);
0266 }
0267
0268 static void
0269 xfs_buf_item_format_segment(
0270 struct xfs_buf_log_item *bip,
0271 struct xfs_log_vec *lv,
0272 struct xfs_log_iovec **vecp,
0273 uint offset,
0274 struct xfs_buf_log_format *blfp)
0275 {
0276 struct xfs_buf *bp = bip->bli_buf;
0277 uint base_size;
0278 int first_bit;
0279 int last_bit;
0280 int next_bit;
0281 uint nbits;
0282
0283
0284 blfp->blf_flags = bip->__bli_format.blf_flags;
0285
0286
0287
0288
0289
0290
0291 base_size = xfs_buf_log_format_size(blfp);
0292
0293 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
0294 if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
0295
0296
0297
0298
0299 return;
0300 }
0301
0302 blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
0303 blfp->blf_size = 1;
0304
0305 if (bip->bli_flags & XFS_BLI_STALE) {
0306
0307
0308
0309
0310
0311 trace_xfs_buf_item_format_stale(bip);
0312 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
0313 return;
0314 }
0315
0316
0317
0318
0319
0320 do {
0321 ASSERT(first_bit >= 0);
0322 nbits = xfs_contig_bits(blfp->blf_data_map,
0323 blfp->blf_map_size, first_bit);
0324 ASSERT(nbits > 0);
0325
0326
0327
0328
0329
0330 if (nbits > 1 &&
0331 xfs_buf_item_straddle(bp, offset, first_bit, nbits))
0332 goto slow_scan;
0333
0334 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
0335 first_bit, nbits);
0336 blfp->blf_size++;
0337
0338
0339
0340
0341
0342
0343
0344 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
0345 (uint)first_bit + nbits + 1);
0346 } while (first_bit != -1);
0347
0348 return;
0349
0350 slow_scan:
0351 ASSERT(bp->b_addr == NULL);
0352 last_bit = first_bit;
0353 nbits = 1;
0354 for (;;) {
0355
0356
0357
0358
0359
0360
0361 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
0362 (uint)last_bit + 1);
0363
0364
0365
0366
0367
0368
0369
0370 if (next_bit == -1) {
0371 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
0372 first_bit, nbits);
0373 blfp->blf_size++;
0374 break;
0375 } else if (next_bit != last_bit + 1 ||
0376 xfs_buf_item_straddle(bp, offset, first_bit, nbits)) {
0377 xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
0378 first_bit, nbits);
0379 blfp->blf_size++;
0380 first_bit = next_bit;
0381 last_bit = next_bit;
0382 nbits = 1;
0383 } else {
0384 last_bit++;
0385 nbits++;
0386 }
0387 }
0388 }
0389
0390
0391
0392
0393
0394
0395
0396 STATIC void
0397 xfs_buf_item_format(
0398 struct xfs_log_item *lip,
0399 struct xfs_log_vec *lv)
0400 {
0401 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
0402 struct xfs_buf *bp = bip->bli_buf;
0403 struct xfs_log_iovec *vecp = NULL;
0404 uint offset = 0;
0405 int i;
0406
0407 ASSERT(atomic_read(&bip->bli_refcount) > 0);
0408 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
0409 (bip->bli_flags & XFS_BLI_STALE));
0410 ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
0411 (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
0412 && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
0413 ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
0414 (bip->bli_flags & XFS_BLI_STALE));
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
0432 if (xfs_has_v3inodes(lip->li_log->l_mp) ||
0433 !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
0434 xfs_log_item_in_current_chkpt(lip)))
0435 bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
0436 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
0437 }
0438
0439 for (i = 0; i < bip->bli_format_count; i++) {
0440 xfs_buf_item_format_segment(bip, lv, &vecp, offset,
0441 &bip->bli_formats[i]);
0442 offset += BBTOB(bp->b_maps[i].bm_len);
0443 }
0444
0445
0446
0447
0448 trace_xfs_buf_item_format(bip);
0449 }
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460 STATIC void
0461 xfs_buf_item_pin(
0462 struct xfs_log_item *lip)
0463 {
0464 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
0465
0466 ASSERT(atomic_read(&bip->bli_refcount) > 0);
0467 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
0468 (bip->bli_flags & XFS_BLI_ORDERED) ||
0469 (bip->bli_flags & XFS_BLI_STALE));
0470
0471 trace_xfs_buf_item_pin(bip);
0472
0473 atomic_inc(&bip->bli_refcount);
0474 atomic_inc(&bip->bli_buf->b_pin_count);
0475 }
0476
0477
0478
0479
0480
0481 STATIC void
0482 xfs_buf_item_unpin(
0483 struct xfs_log_item *lip,
0484 int remove)
0485 {
0486 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
0487 struct xfs_buf *bp = bip->bli_buf;
0488 int stale = bip->bli_flags & XFS_BLI_STALE;
0489 int freed;
0490
0491 ASSERT(bp->b_log_item == bip);
0492 ASSERT(atomic_read(&bip->bli_refcount) > 0);
0493
0494 trace_xfs_buf_item_unpin(bip);
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505 freed = atomic_dec_and_test(&bip->bli_refcount);
0506 if (freed && !stale && remove)
0507 xfs_buf_hold(bp);
0508 if (atomic_dec_and_test(&bp->b_pin_count))
0509 wake_up_all(&bp->b_waiters);
0510
0511
0512 if (!freed)
0513 return;
0514
0515 if (stale) {
0516 ASSERT(bip->bli_flags & XFS_BLI_STALE);
0517 ASSERT(xfs_buf_islocked(bp));
0518 ASSERT(bp->b_flags & XBF_STALE);
0519 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
0520 ASSERT(list_empty(&lip->li_trans));
0521 ASSERT(!bp->b_transp);
0522
0523 trace_xfs_buf_item_unpin_stale(bip);
0524
0525
0526
0527
0528
0529
0530
0531 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
0532 xfs_buf_item_done(bp);
0533 xfs_buf_inode_iodone(bp);
0534 ASSERT(list_empty(&bp->b_li_list));
0535 } else {
0536 xfs_trans_ail_delete(lip, SHUTDOWN_LOG_IO_ERROR);
0537 xfs_buf_item_relse(bp);
0538 ASSERT(bp->b_log_item == NULL);
0539 }
0540 xfs_buf_relse(bp);
0541 } else if (remove) {
0542
0543
0544
0545
0546
0547 xfs_buf_lock(bp);
0548 bp->b_flags |= XBF_ASYNC;
0549 xfs_buf_ioend_fail(bp);
0550 }
0551 }
0552
0553 STATIC uint
0554 xfs_buf_item_push(
0555 struct xfs_log_item *lip,
0556 struct list_head *buffer_list)
0557 {
0558 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
0559 struct xfs_buf *bp = bip->bli_buf;
0560 uint rval = XFS_ITEM_SUCCESS;
0561
0562 if (xfs_buf_ispinned(bp))
0563 return XFS_ITEM_PINNED;
0564 if (!xfs_buf_trylock(bp)) {
0565
0566
0567
0568
0569
0570
0571
0572 if (xfs_buf_ispinned(bp))
0573 return XFS_ITEM_PINNED;
0574 return XFS_ITEM_LOCKED;
0575 }
0576
0577 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
0578
0579 trace_xfs_buf_item_push(bip);
0580
0581
0582 if (bp->b_flags & XBF_WRITE_FAIL) {
0583 xfs_buf_alert_ratelimited(bp, "XFS: Failing async write",
0584 "Failing async write on buffer block 0x%llx. Retrying async write.",
0585 (long long)xfs_buf_daddr(bp));
0586 }
0587
0588 if (!xfs_buf_delwri_queue(bp, buffer_list))
0589 rval = XFS_ITEM_FLUSHING;
0590 xfs_buf_unlock(bp);
0591 return rval;
0592 }
0593
0594
0595
0596
0597
0598
0599
0600
0601 bool
0602 xfs_buf_item_put(
0603 struct xfs_buf_log_item *bip)
0604 {
0605 struct xfs_log_item *lip = &bip->bli_item;
0606 bool aborted;
0607 bool dirty;
0608
0609
0610 if (!atomic_dec_and_test(&bip->bli_refcount))
0611 return false;
0612
0613
0614
0615
0616
0617
0618
0619 aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
0620 xlog_is_shutdown(lip->li_log);
0621 dirty = bip->bli_flags & XFS_BLI_DIRTY;
0622 if (dirty && !aborted)
0623 return false;
0624
0625
0626
0627
0628
0629
0630
0631 if (aborted)
0632 xfs_trans_ail_delete(lip, 0);
0633 xfs_buf_item_relse(bip->bli_buf);
0634 return true;
0635 }
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656 STATIC void
0657 xfs_buf_item_release(
0658 struct xfs_log_item *lip)
0659 {
0660 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
0661 struct xfs_buf *bp = bip->bli_buf;
0662 bool released;
0663 bool hold = bip->bli_flags & XFS_BLI_HOLD;
0664 bool stale = bip->bli_flags & XFS_BLI_STALE;
0665 #if defined(DEBUG) || defined(XFS_WARN)
0666 bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
0667 bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
0668 bool aborted = test_bit(XFS_LI_ABORTED,
0669 &lip->li_flags);
0670 #endif
0671
0672 trace_xfs_buf_item_release(bip);
0673
0674
0675
0676
0677
0678 ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
0679 (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
0680 ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
0681
0682
0683
0684
0685
0686 bp->b_transp = NULL;
0687 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
0688
0689
0690
0691
0692
0693
0694
0695 released = xfs_buf_item_put(bip);
0696 if (hold || (stale && !released))
0697 return;
0698 ASSERT(!stale || aborted);
0699 xfs_buf_relse(bp);
0700 }
0701
0702 STATIC void
0703 xfs_buf_item_committing(
0704 struct xfs_log_item *lip,
0705 xfs_csn_t seq)
0706 {
0707 return xfs_buf_item_release(lip);
0708 }
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728 STATIC xfs_lsn_t
0729 xfs_buf_item_committed(
0730 struct xfs_log_item *lip,
0731 xfs_lsn_t lsn)
0732 {
0733 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
0734
0735 trace_xfs_buf_item_committed(bip);
0736
0737 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
0738 return lip->li_lsn;
0739 return lsn;
0740 }
0741
0742 static const struct xfs_item_ops xfs_buf_item_ops = {
0743 .iop_size = xfs_buf_item_size,
0744 .iop_format = xfs_buf_item_format,
0745 .iop_pin = xfs_buf_item_pin,
0746 .iop_unpin = xfs_buf_item_unpin,
0747 .iop_release = xfs_buf_item_release,
0748 .iop_committing = xfs_buf_item_committing,
0749 .iop_committed = xfs_buf_item_committed,
0750 .iop_push = xfs_buf_item_push,
0751 };
0752
0753 STATIC void
0754 xfs_buf_item_get_format(
0755 struct xfs_buf_log_item *bip,
0756 int count)
0757 {
0758 ASSERT(bip->bli_formats == NULL);
0759 bip->bli_format_count = count;
0760
0761 if (count == 1) {
0762 bip->bli_formats = &bip->__bli_format;
0763 return;
0764 }
0765
0766 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
0767 0);
0768 }
0769
0770 STATIC void
0771 xfs_buf_item_free_format(
0772 struct xfs_buf_log_item *bip)
0773 {
0774 if (bip->bli_formats != &bip->__bli_format) {
0775 kmem_free(bip->bli_formats);
0776 bip->bli_formats = NULL;
0777 }
0778 }
0779
0780
0781
0782
0783
0784
0785 int
0786 xfs_buf_item_init(
0787 struct xfs_buf *bp,
0788 struct xfs_mount *mp)
0789 {
0790 struct xfs_buf_log_item *bip = bp->b_log_item;
0791 int chunks;
0792 int map_size;
0793 int i;
0794
0795
0796
0797
0798
0799
0800 ASSERT(bp->b_mount == mp);
0801 if (bip) {
0802 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
0803 ASSERT(!bp->b_transp);
0804 ASSERT(bip->bli_buf == bp);
0805 return 0;
0806 }
0807
0808 bip = kmem_cache_zalloc(xfs_buf_item_cache, GFP_KERNEL | __GFP_NOFAIL);
0809 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
0810 bip->bli_buf = bp;
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821 xfs_buf_item_get_format(bip, bp->b_map_count);
0822
0823 for (i = 0; i < bip->bli_format_count; i++) {
0824 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
0825 XFS_BLF_CHUNK);
0826 map_size = DIV_ROUND_UP(chunks, NBWORD);
0827
0828 if (map_size > XFS_BLF_DATAMAP_SIZE) {
0829 kmem_cache_free(xfs_buf_item_cache, bip);
0830 xfs_err(mp,
0831 "buffer item dirty bitmap (%u uints) too small to reflect %u bytes!",
0832 map_size,
0833 BBTOB(bp->b_maps[i].bm_len));
0834 return -EFSCORRUPTED;
0835 }
0836
0837 bip->bli_formats[i].blf_type = XFS_LI_BUF;
0838 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
0839 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
0840 bip->bli_formats[i].blf_map_size = map_size;
0841 }
0842
0843 bp->b_log_item = bip;
0844 xfs_buf_hold(bp);
0845 return 0;
0846 }
0847
0848
0849
0850
0851
0852
0853 static void
0854 xfs_buf_item_log_segment(
0855 uint first,
0856 uint last,
0857 uint *map)
0858 {
0859 uint first_bit;
0860 uint last_bit;
0861 uint bits_to_set;
0862 uint bits_set;
0863 uint word_num;
0864 uint *wordp;
0865 uint bit;
0866 uint end_bit;
0867 uint mask;
0868
0869 ASSERT(first < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
0870 ASSERT(last < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
0871
0872
0873
0874
0875 first_bit = first >> XFS_BLF_SHIFT;
0876 last_bit = last >> XFS_BLF_SHIFT;
0877
0878
0879
0880
0881 bits_to_set = last_bit - first_bit + 1;
0882
0883
0884
0885
0886
0887 word_num = first_bit >> BIT_TO_WORD_SHIFT;
0888 wordp = &map[word_num];
0889
0890
0891
0892
0893 bit = first_bit & (uint)(NBWORD - 1);
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903 if (bit) {
0904 end_bit = min(bit + bits_to_set, (uint)NBWORD);
0905 mask = ((1U << (end_bit - bit)) - 1) << bit;
0906 *wordp |= mask;
0907 wordp++;
0908 bits_set = end_bit - bit;
0909 } else {
0910 bits_set = 0;
0911 }
0912
0913
0914
0915
0916
0917 while ((bits_to_set - bits_set) >= NBWORD) {
0918 *wordp = 0xffffffff;
0919 bits_set += NBWORD;
0920 wordp++;
0921 }
0922
0923
0924
0925
0926 end_bit = bits_to_set - bits_set;
0927 if (end_bit) {
0928 mask = (1U << end_bit) - 1;
0929 *wordp |= mask;
0930 }
0931 }
0932
0933
0934
0935
0936
0937 void
0938 xfs_buf_item_log(
0939 struct xfs_buf_log_item *bip,
0940 uint first,
0941 uint last)
0942 {
0943 int i;
0944 uint start;
0945 uint end;
0946 struct xfs_buf *bp = bip->bli_buf;
0947
0948
0949
0950
0951 start = 0;
0952 for (i = 0; i < bip->bli_format_count; i++) {
0953 if (start > last)
0954 break;
0955 end = start + BBTOB(bp->b_maps[i].bm_len) - 1;
0956
0957
0958 if (first > end) {
0959 start += BBTOB(bp->b_maps[i].bm_len);
0960 continue;
0961 }
0962
0963
0964
0965
0966
0967
0968
0969 if (first < start)
0970 first = start;
0971 if (end > last)
0972 end = last;
0973 xfs_buf_item_log_segment(first - start, end - start,
0974 &bip->bli_formats[i].blf_data_map[0]);
0975
0976 start += BBTOB(bp->b_maps[i].bm_len);
0977 }
0978 }
0979
0980
0981
0982
0983
0984
0985 bool
0986 xfs_buf_item_dirty_format(
0987 struct xfs_buf_log_item *bip)
0988 {
0989 int i;
0990
0991 for (i = 0; i < bip->bli_format_count; i++) {
0992 if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
0993 bip->bli_formats[i].blf_map_size))
0994 return true;
0995 }
0996
0997 return false;
0998 }
0999
1000 STATIC void
1001 xfs_buf_item_free(
1002 struct xfs_buf_log_item *bip)
1003 {
1004 xfs_buf_item_free_format(bip);
1005 kmem_free(bip->bli_item.li_lv_shadow);
1006 kmem_cache_free(xfs_buf_item_cache, bip);
1007 }
1008
1009
1010
1011
1012 void
1013 xfs_buf_item_relse(
1014 struct xfs_buf *bp)
1015 {
1016 struct xfs_buf_log_item *bip = bp->b_log_item;
1017
1018 trace_xfs_buf_item_relse(bp, _RET_IP_);
1019 ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
1020
1021 bp->b_log_item = NULL;
1022 xfs_buf_rele(bp);
1023 xfs_buf_item_free(bip);
1024 }
1025
1026 void
1027 xfs_buf_item_done(
1028 struct xfs_buf *bp)
1029 {
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 xfs_trans_ail_delete(&bp->b_log_item->bli_item,
1043 (bp->b_flags & _XBF_LOGRECOVERY) ? 0 :
1044 SHUTDOWN_CORRUPT_INCORE);
1045 xfs_buf_item_relse(bp);
1046 }