0001
0002
0003
0004
0005
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_format.h"
0009 #include "xfs_log_format.h"
0010 #include "xfs_shared.h"
0011 #include "xfs_trans_resv.h"
0012 #include "xfs_mount.h"
0013 #include "xfs_extent_busy.h"
0014 #include "xfs_trans.h"
0015 #include "xfs_trans_priv.h"
0016 #include "xfs_log.h"
0017 #include "xfs_log_priv.h"
0018 #include "xfs_trace.h"
0019
0020 struct workqueue_struct *xfs_discard_wq;
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 static struct xlog_ticket *
0035 xlog_cil_ticket_alloc(
0036 struct xlog *log)
0037 {
0038 struct xlog_ticket *tic;
0039
0040 tic = xlog_ticket_alloc(log, 0, 1, 0);
0041
0042
0043
0044
0045
0046 tic->t_curr_res = 0;
0047 tic->t_iclog_hdrs = 0;
0048 return tic;
0049 }
0050
0051 static inline void
0052 xlog_cil_set_iclog_hdr_count(struct xfs_cil *cil)
0053 {
0054 struct xlog *log = cil->xc_log;
0055
0056 atomic_set(&cil->xc_iclog_hdrs,
0057 (XLOG_CIL_BLOCKING_SPACE_LIMIT(log) /
0058 (log->l_iclog_size - log->l_iclog_hsize)));
0059 }
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070 static bool
0071 xlog_item_in_current_chkpt(
0072 struct xfs_cil *cil,
0073 struct xfs_log_item *lip)
0074 {
0075 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
0076 return false;
0077
0078
0079
0080
0081
0082
0083 return lip->li_seq == READ_ONCE(cil->xc_current_sequence);
0084 }
0085
0086 bool
0087 xfs_log_item_in_current_chkpt(
0088 struct xfs_log_item *lip)
0089 {
0090 return xlog_item_in_current_chkpt(lip->li_log->l_cilp, lip);
0091 }
0092
0093
0094
0095
0096
0097 static void xlog_cil_push_work(struct work_struct *work);
0098
0099 static struct xfs_cil_ctx *
0100 xlog_cil_ctx_alloc(void)
0101 {
0102 struct xfs_cil_ctx *ctx;
0103
0104 ctx = kmem_zalloc(sizeof(*ctx), KM_NOFS);
0105 INIT_LIST_HEAD(&ctx->committing);
0106 INIT_LIST_HEAD(&ctx->busy_extents);
0107 INIT_LIST_HEAD(&ctx->log_items);
0108 INIT_LIST_HEAD(&ctx->lv_chain);
0109 INIT_WORK(&ctx->push_work, xlog_cil_push_work);
0110 return ctx;
0111 }
0112
0113
0114
0115
0116
0117
0118
0119 static void
0120 xlog_cil_push_pcp_aggregate(
0121 struct xfs_cil *cil,
0122 struct xfs_cil_ctx *ctx)
0123 {
0124 struct xlog_cil_pcp *cilpcp;
0125 int cpu;
0126
0127 for_each_online_cpu(cpu) {
0128 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
0129
0130 ctx->ticket->t_curr_res += cilpcp->space_reserved;
0131 cilpcp->space_reserved = 0;
0132
0133 if (!list_empty(&cilpcp->busy_extents)) {
0134 list_splice_init(&cilpcp->busy_extents,
0135 &ctx->busy_extents);
0136 }
0137 if (!list_empty(&cilpcp->log_items))
0138 list_splice_init(&cilpcp->log_items, &ctx->log_items);
0139
0140
0141
0142
0143
0144
0145 cilpcp->space_used = 0;
0146 }
0147 }
0148
0149
0150
0151
0152
0153
0154
0155 static void
0156 xlog_cil_insert_pcp_aggregate(
0157 struct xfs_cil *cil,
0158 struct xfs_cil_ctx *ctx)
0159 {
0160 struct xlog_cil_pcp *cilpcp;
0161 int cpu;
0162 int count = 0;
0163
0164
0165 if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags))
0166 return;
0167
0168 for_each_online_cpu(cpu) {
0169 int old, prev;
0170
0171 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
0172 do {
0173 old = cilpcp->space_used;
0174 prev = cmpxchg(&cilpcp->space_used, old, 0);
0175 } while (old != prev);
0176 count += old;
0177 }
0178 atomic_add(count, &ctx->space_used);
0179 }
0180
0181 static void
0182 xlog_cil_ctx_switch(
0183 struct xfs_cil *cil,
0184 struct xfs_cil_ctx *ctx)
0185 {
0186 xlog_cil_set_iclog_hdr_count(cil);
0187 set_bit(XLOG_CIL_EMPTY, &cil->xc_flags);
0188 set_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags);
0189 ctx->sequence = ++cil->xc_current_sequence;
0190 ctx->cil = cil;
0191 cil->xc_ctx = ctx;
0192 }
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204 void
0205 xlog_cil_init_post_recovery(
0206 struct xlog *log)
0207 {
0208 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
0209 log->l_cilp->xc_ctx->sequence = 1;
0210 xlog_cil_set_iclog_hdr_count(log->l_cilp);
0211 }
0212
0213 static inline int
0214 xlog_cil_iovec_space(
0215 uint niovecs)
0216 {
0217 return round_up((sizeof(struct xfs_log_vec) +
0218 niovecs * sizeof(struct xfs_log_iovec)),
0219 sizeof(uint64_t));
0220 }
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268 static void
0269 xlog_cil_alloc_shadow_bufs(
0270 struct xlog *log,
0271 struct xfs_trans *tp)
0272 {
0273 struct xfs_log_item *lip;
0274
0275 list_for_each_entry(lip, &tp->t_items, li_trans) {
0276 struct xfs_log_vec *lv;
0277 int niovecs = 0;
0278 int nbytes = 0;
0279 int buf_size;
0280 bool ordered = false;
0281
0282
0283 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
0284 continue;
0285
0286
0287 lip->li_ops->iop_size(lip, &niovecs, &nbytes);
0288
0289
0290
0291
0292
0293
0294 if (niovecs == XFS_LOG_VEC_ORDERED) {
0295 ordered = true;
0296 niovecs = 0;
0297 nbytes = 0;
0298 }
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313 nbytes += niovecs *
0314 (sizeof(uint64_t) + sizeof(struct xlog_op_header));
0315 nbytes = round_up(nbytes, sizeof(uint64_t));
0316
0317
0318
0319
0320
0321
0322 buf_size = nbytes + xlog_cil_iovec_space(niovecs);
0323
0324
0325
0326
0327
0328 if (!lip->li_lv_shadow ||
0329 buf_size > lip->li_lv_shadow->lv_size) {
0330
0331
0332
0333
0334
0335
0336
0337 kmem_free(lip->li_lv_shadow);
0338 lv = xlog_kvmalloc(buf_size);
0339
0340 memset(lv, 0, xlog_cil_iovec_space(niovecs));
0341
0342 INIT_LIST_HEAD(&lv->lv_list);
0343 lv->lv_item = lip;
0344 lv->lv_size = buf_size;
0345 if (ordered)
0346 lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
0347 else
0348 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
0349 lip->li_lv_shadow = lv;
0350 } else {
0351
0352 lv = lip->li_lv_shadow;
0353 if (ordered)
0354 lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
0355 else
0356 lv->lv_buf_len = 0;
0357 lv->lv_bytes = 0;
0358 }
0359
0360
0361 lv->lv_niovecs = niovecs;
0362
0363
0364 lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
0365 }
0366
0367 }
0368
0369
0370
0371
0372
0373 STATIC void
0374 xfs_cil_prepare_item(
0375 struct xlog *log,
0376 struct xfs_log_vec *lv,
0377 struct xfs_log_vec *old_lv,
0378 int *diff_len)
0379 {
0380
0381 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
0382 *diff_len += lv->lv_bytes;
0383
0384
0385
0386
0387
0388
0389
0390
0391 if (!old_lv) {
0392 if (lv->lv_item->li_ops->iop_pin)
0393 lv->lv_item->li_ops->iop_pin(lv->lv_item);
0394 lv->lv_item->li_lv_shadow = NULL;
0395 } else if (old_lv != lv) {
0396 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
0397
0398 *diff_len -= old_lv->lv_bytes;
0399 lv->lv_item->li_lv_shadow = old_lv;
0400 }
0401
0402
0403 lv->lv_item->li_lv = lv;
0404
0405
0406
0407
0408
0409
0410
0411 if (!lv->lv_item->li_seq)
0412 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
0413 }
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443 static void
0444 xlog_cil_insert_format_items(
0445 struct xlog *log,
0446 struct xfs_trans *tp,
0447 int *diff_len)
0448 {
0449 struct xfs_log_item *lip;
0450
0451
0452 if (list_empty(&tp->t_items)) {
0453 ASSERT(0);
0454 return;
0455 }
0456
0457 list_for_each_entry(lip, &tp->t_items, li_trans) {
0458 struct xfs_log_vec *lv;
0459 struct xfs_log_vec *old_lv = NULL;
0460 struct xfs_log_vec *shadow;
0461 bool ordered = false;
0462
0463
0464 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
0465 continue;
0466
0467
0468
0469
0470
0471 shadow = lip->li_lv_shadow;
0472 if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
0473 ordered = true;
0474
0475
0476 if (!shadow->lv_niovecs && !ordered)
0477 continue;
0478
0479
0480 old_lv = lip->li_lv;
0481 if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
0482
0483 lv = lip->li_lv;
0484
0485 if (ordered)
0486 goto insert;
0487
0488
0489
0490
0491
0492 *diff_len -= lv->lv_bytes;
0493
0494
0495 lv->lv_niovecs = shadow->lv_niovecs;
0496
0497
0498 lv->lv_buf_len = 0;
0499 lv->lv_bytes = 0;
0500 lv->lv_buf = (char *)lv +
0501 xlog_cil_iovec_space(lv->lv_niovecs);
0502 } else {
0503
0504 lv = shadow;
0505 lv->lv_item = lip;
0506 if (ordered) {
0507
0508 ASSERT(lip->li_lv == NULL);
0509 goto insert;
0510 }
0511 }
0512
0513 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
0514 lip->li_ops->iop_format(lip, lv);
0515 insert:
0516 xfs_cil_prepare_item(log, lv, old_lv, diff_len);
0517 }
0518 }
0519
0520
0521
0522
0523
0524
0525 static inline bool
0526 xlog_cil_over_hard_limit(
0527 struct xlog *log,
0528 int32_t space_used)
0529 {
0530 if (waitqueue_active(&log->l_cilp->xc_push_wait))
0531 return true;
0532 if (space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
0533 return true;
0534 return false;
0535 }
0536
0537
0538
0539
0540
0541
0542
0543
0544 static void
0545 xlog_cil_insert_items(
0546 struct xlog *log,
0547 struct xfs_trans *tp,
0548 uint32_t released_space)
0549 {
0550 struct xfs_cil *cil = log->l_cilp;
0551 struct xfs_cil_ctx *ctx = cil->xc_ctx;
0552 struct xfs_log_item *lip;
0553 int len = 0;
0554 int iovhdr_res = 0, split_res = 0, ctx_res = 0;
0555 int space_used;
0556 int order;
0557 struct xlog_cil_pcp *cilpcp;
0558
0559 ASSERT(tp);
0560
0561
0562
0563
0564
0565 xlog_cil_insert_format_items(log, tp, &len);
0566
0567
0568
0569
0570
0571
0572 len -= released_space;
0573
0574
0575
0576
0577
0578
0579
0580 cilpcp = get_cpu_ptr(cil->xc_pcp);
0581
0582
0583
0584
0585
0586
0587
0588
0589 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) &&
0590 test_and_clear_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
0591 ctx_res = ctx->ticket->t_unit_res;
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611 space_used = atomic_read(&ctx->space_used) + cilpcp->space_used + len;
0612 if (atomic_read(&cil->xc_iclog_hdrs) > 0 ||
0613 xlog_cil_over_hard_limit(log, space_used)) {
0614 split_res = log->l_iclog_hsize +
0615 sizeof(struct xlog_op_header);
0616 if (ctx_res)
0617 ctx_res += split_res * (tp->t_ticket->t_iclog_hdrs - 1);
0618 else
0619 ctx_res = split_res * tp->t_ticket->t_iclog_hdrs;
0620 atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs);
0621 }
0622 cilpcp->space_reserved += ctx_res;
0623
0624
0625
0626
0627
0628 if (!test_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) {
0629 atomic_add(len, &ctx->space_used);
0630 } else if (cilpcp->space_used + len >
0631 (XLOG_CIL_SPACE_LIMIT(log) / num_online_cpus())) {
0632 space_used = atomic_add_return(cilpcp->space_used + len,
0633 &ctx->space_used);
0634 cilpcp->space_used = 0;
0635
0636
0637
0638
0639
0640 if (space_used >= XLOG_CIL_SPACE_LIMIT(log))
0641 xlog_cil_insert_pcp_aggregate(cil, ctx);
0642 } else {
0643 cilpcp->space_used += len;
0644 }
0645
0646 if (!list_empty(&tp->t_busy))
0647 list_splice_init(&tp->t_busy, &cilpcp->busy_extents);
0648
0649
0650
0651
0652
0653
0654
0655 order = atomic_inc_return(&ctx->order_id);
0656 list_for_each_entry(lip, &tp->t_items, li_trans) {
0657
0658 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
0659 continue;
0660
0661 lip->li_order_id = order;
0662 if (!list_empty(&lip->li_cil))
0663 continue;
0664 list_add_tail(&lip->li_cil, &cilpcp->log_items);
0665 }
0666 put_cpu_ptr(cilpcp);
0667
0668
0669
0670
0671
0672 tp->t_ticket->t_curr_res -= ctx_res + len;
0673 if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
0674 xfs_warn(log->l_mp, "Transaction log reservation overrun:");
0675 xfs_warn(log->l_mp,
0676 " log items: %d bytes (iov hdrs: %d bytes)",
0677 len, iovhdr_res);
0678 xfs_warn(log->l_mp, " split region headers: %d bytes",
0679 split_res);
0680 xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res);
0681 xlog_print_trans(tp);
0682 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
0683 }
0684 }
0685
0686 static void
0687 xlog_cil_free_logvec(
0688 struct list_head *lv_chain)
0689 {
0690 struct xfs_log_vec *lv;
0691
0692 while (!list_empty(lv_chain)) {
0693 lv = list_first_entry(lv_chain, struct xfs_log_vec, lv_list);
0694 list_del_init(&lv->lv_list);
0695 kmem_free(lv);
0696 }
0697 }
0698
0699 static void
0700 xlog_discard_endio_work(
0701 struct work_struct *work)
0702 {
0703 struct xfs_cil_ctx *ctx =
0704 container_of(work, struct xfs_cil_ctx, discard_endio_work);
0705 struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
0706
0707 xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
0708 kmem_free(ctx);
0709 }
0710
0711
0712
0713
0714
0715
0716 static void
0717 xlog_discard_endio(
0718 struct bio *bio)
0719 {
0720 struct xfs_cil_ctx *ctx = bio->bi_private;
0721
0722 INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
0723 queue_work(xfs_discard_wq, &ctx->discard_endio_work);
0724 bio_put(bio);
0725 }
0726
0727 static void
0728 xlog_discard_busy_extents(
0729 struct xfs_mount *mp,
0730 struct xfs_cil_ctx *ctx)
0731 {
0732 struct list_head *list = &ctx->busy_extents;
0733 struct xfs_extent_busy *busyp;
0734 struct bio *bio = NULL;
0735 struct blk_plug plug;
0736 int error = 0;
0737
0738 ASSERT(xfs_has_discard(mp));
0739
0740 blk_start_plug(&plug);
0741 list_for_each_entry(busyp, list, list) {
0742 trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
0743 busyp->length);
0744
0745 error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
0746 XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
0747 XFS_FSB_TO_BB(mp, busyp->length),
0748 GFP_NOFS, &bio);
0749 if (error && error != -EOPNOTSUPP) {
0750 xfs_info(mp,
0751 "discard failed for extent [0x%llx,%u], error %d",
0752 (unsigned long long)busyp->bno,
0753 busyp->length,
0754 error);
0755 break;
0756 }
0757 }
0758
0759 if (bio) {
0760 bio->bi_private = ctx;
0761 bio->bi_end_io = xlog_discard_endio;
0762 submit_bio(bio);
0763 } else {
0764 xlog_discard_endio_work(&ctx->discard_endio_work);
0765 }
0766 blk_finish_plug(&plug);
0767 }
0768
0769
0770
0771
0772
0773
0774 static void
0775 xlog_cil_committed(
0776 struct xfs_cil_ctx *ctx)
0777 {
0778 struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
0779 bool abort = xlog_is_shutdown(ctx->cil->xc_log);
0780
0781
0782
0783
0784
0785
0786
0787
0788 if (abort) {
0789 spin_lock(&ctx->cil->xc_push_lock);
0790 wake_up_all(&ctx->cil->xc_start_wait);
0791 wake_up_all(&ctx->cil->xc_commit_wait);
0792 spin_unlock(&ctx->cil->xc_push_lock);
0793 }
0794
0795 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, &ctx->lv_chain,
0796 ctx->start_lsn, abort);
0797
0798 xfs_extent_busy_sort(&ctx->busy_extents);
0799 xfs_extent_busy_clear(mp, &ctx->busy_extents,
0800 xfs_has_discard(mp) && !abort);
0801
0802 spin_lock(&ctx->cil->xc_push_lock);
0803 list_del(&ctx->committing);
0804 spin_unlock(&ctx->cil->xc_push_lock);
0805
0806 xlog_cil_free_logvec(&ctx->lv_chain);
0807
0808 if (!list_empty(&ctx->busy_extents))
0809 xlog_discard_busy_extents(mp, ctx);
0810 else
0811 kmem_free(ctx);
0812 }
0813
0814 void
0815 xlog_cil_process_committed(
0816 struct list_head *list)
0817 {
0818 struct xfs_cil_ctx *ctx;
0819
0820 while ((ctx = list_first_entry_or_null(list,
0821 struct xfs_cil_ctx, iclog_entry))) {
0822 list_del(&ctx->iclog_entry);
0823 xlog_cil_committed(ctx);
0824 }
0825 }
0826
0827
0828
0829
0830
0831
0832
0833 void
0834 xlog_cil_set_ctx_write_state(
0835 struct xfs_cil_ctx *ctx,
0836 struct xlog_in_core *iclog)
0837 {
0838 struct xfs_cil *cil = ctx->cil;
0839 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn);
0840
0841 ASSERT(!ctx->commit_lsn);
0842 if (!ctx->start_lsn) {
0843 spin_lock(&cil->xc_push_lock);
0844
0845
0846
0847
0848
0849
0850
0851 ctx->start_lsn = lsn;
0852 wake_up_all(&cil->xc_start_wait);
0853 spin_unlock(&cil->xc_push_lock);
0854
0855
0856
0857
0858
0859
0860 spin_lock(&cil->xc_log->l_icloglock);
0861 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
0862 spin_unlock(&cil->xc_log->l_icloglock);
0863 return;
0864 }
0865
0866
0867
0868
0869
0870
0871 atomic_inc(&iclog->ic_refcnt);
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881 spin_lock(&cil->xc_log->l_icloglock);
0882 list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks);
0883 spin_unlock(&cil->xc_log->l_icloglock);
0884
0885
0886
0887
0888
0889
0890 spin_lock(&cil->xc_push_lock);
0891 ctx->commit_iclog = iclog;
0892 ctx->commit_lsn = lsn;
0893 wake_up_all(&cil->xc_commit_wait);
0894 spin_unlock(&cil->xc_push_lock);
0895 }
0896
0897
0898
0899
0900
0901
0902
0903 enum _record_type {
0904 _START_RECORD,
0905 _COMMIT_RECORD,
0906 };
0907
0908 static int
0909 xlog_cil_order_write(
0910 struct xfs_cil *cil,
0911 xfs_csn_t sequence,
0912 enum _record_type record)
0913 {
0914 struct xfs_cil_ctx *ctx;
0915
0916 restart:
0917 spin_lock(&cil->xc_push_lock);
0918 list_for_each_entry(ctx, &cil->xc_committing, committing) {
0919
0920
0921
0922
0923
0924 if (xlog_is_shutdown(cil->xc_log)) {
0925 spin_unlock(&cil->xc_push_lock);
0926 return -EIO;
0927 }
0928
0929
0930
0931
0932
0933 if (ctx->sequence >= sequence)
0934 continue;
0935
0936
0937 switch (record) {
0938 case _START_RECORD:
0939 if (!ctx->start_lsn) {
0940 xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock);
0941 goto restart;
0942 }
0943 break;
0944 case _COMMIT_RECORD:
0945 if (!ctx->commit_lsn) {
0946 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
0947 goto restart;
0948 }
0949 break;
0950 }
0951 }
0952 spin_unlock(&cil->xc_push_lock);
0953 return 0;
0954 }
0955
0956
0957
0958
0959
0960
0961
0962 static int
0963 xlog_cil_write_chain(
0964 struct xfs_cil_ctx *ctx,
0965 uint32_t chain_len)
0966 {
0967 struct xlog *log = ctx->cil->xc_log;
0968 int error;
0969
0970 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD);
0971 if (error)
0972 return error;
0973 return xlog_write(log, ctx, &ctx->lv_chain, ctx->ticket, chain_len);
0974 }
0975
0976
0977
0978
0979
0980
0981
0982 static int
0983 xlog_cil_write_commit_record(
0984 struct xfs_cil_ctx *ctx)
0985 {
0986 struct xlog *log = ctx->cil->xc_log;
0987 struct xlog_op_header ophdr = {
0988 .oh_clientid = XFS_TRANSACTION,
0989 .oh_tid = cpu_to_be32(ctx->ticket->t_tid),
0990 .oh_flags = XLOG_COMMIT_TRANS,
0991 };
0992 struct xfs_log_iovec reg = {
0993 .i_addr = &ophdr,
0994 .i_len = sizeof(struct xlog_op_header),
0995 .i_type = XLOG_REG_TYPE_COMMIT,
0996 };
0997 struct xfs_log_vec vec = {
0998 .lv_niovecs = 1,
0999 .lv_iovecp = ®,
1000 };
1001 int error;
1002 LIST_HEAD(lv_chain);
1003 list_add(&vec.lv_list, &lv_chain);
1004
1005 if (xlog_is_shutdown(log))
1006 return -EIO;
1007
1008 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD);
1009 if (error)
1010 return error;
1011
1012
1013 ctx->ticket->t_curr_res -= reg.i_len;
1014 error = xlog_write(log, ctx, &lv_chain, ctx->ticket, reg.i_len);
1015 if (error)
1016 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1017 return error;
1018 }
1019
1020 struct xlog_cil_trans_hdr {
1021 struct xlog_op_header oph[2];
1022 struct xfs_trans_header thdr;
1023 struct xfs_log_iovec lhdr[2];
1024 };
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038 static void
1039 xlog_cil_build_trans_hdr(
1040 struct xfs_cil_ctx *ctx,
1041 struct xlog_cil_trans_hdr *hdr,
1042 struct xfs_log_vec *lvhdr,
1043 int num_iovecs)
1044 {
1045 struct xlog_ticket *tic = ctx->ticket;
1046 __be32 tid = cpu_to_be32(tic->t_tid);
1047
1048 memset(hdr, 0, sizeof(*hdr));
1049
1050
1051 hdr->oph[0].oh_tid = tid;
1052 hdr->oph[0].oh_clientid = XFS_TRANSACTION;
1053 hdr->oph[0].oh_flags = XLOG_START_TRANS;
1054
1055
1056 hdr->lhdr[0].i_addr = &hdr->oph[0];
1057 hdr->lhdr[0].i_len = sizeof(struct xlog_op_header);
1058 hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER;
1059
1060
1061 hdr->oph[1].oh_tid = tid;
1062 hdr->oph[1].oh_clientid = XFS_TRANSACTION;
1063 hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header));
1064
1065
1066 hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
1067 hdr->thdr.th_type = XFS_TRANS_CHECKPOINT;
1068 hdr->thdr.th_tid = tic->t_tid;
1069 hdr->thdr.th_num_items = num_iovecs;
1070
1071
1072 hdr->lhdr[1].i_addr = &hdr->oph[1];
1073 hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) +
1074 sizeof(struct xfs_trans_header);
1075 hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR;
1076
1077 lvhdr->lv_niovecs = 2;
1078 lvhdr->lv_iovecp = &hdr->lhdr[0];
1079 lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len;
1080
1081 tic->t_curr_res -= lvhdr->lv_bytes;
1082 }
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092 static int
1093 xlog_cil_order_cmp(
1094 void *priv,
1095 const struct list_head *a,
1096 const struct list_head *b)
1097 {
1098 struct xfs_log_vec *l1 = container_of(a, struct xfs_log_vec, lv_list);
1099 struct xfs_log_vec *l2 = container_of(b, struct xfs_log_vec, lv_list);
1100
1101 return l1->lv_order_id > l2->lv_order_id;
1102 }
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113 static void
1114 xlog_cil_build_lv_chain(
1115 struct xfs_cil_ctx *ctx,
1116 struct list_head *whiteouts,
1117 uint32_t *num_iovecs,
1118 uint32_t *num_bytes)
1119 {
1120 while (!list_empty(&ctx->log_items)) {
1121 struct xfs_log_item *item;
1122 struct xfs_log_vec *lv;
1123
1124 item = list_first_entry(&ctx->log_items,
1125 struct xfs_log_item, li_cil);
1126
1127 if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) {
1128 list_move(&item->li_cil, whiteouts);
1129 trace_xfs_cil_whiteout_skip(item);
1130 continue;
1131 }
1132
1133 lv = item->li_lv;
1134 lv->lv_order_id = item->li_order_id;
1135
1136
1137 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
1138 *num_bytes += lv->lv_bytes;
1139 *num_iovecs += lv->lv_niovecs;
1140 list_add_tail(&lv->lv_list, &ctx->lv_chain);
1141
1142 list_del_init(&item->li_cil);
1143 item->li_order_id = 0;
1144 item->li_lv = NULL;
1145 }
1146 }
1147
1148 static void
1149 xlog_cil_cleanup_whiteouts(
1150 struct list_head *whiteouts)
1151 {
1152 while (!list_empty(whiteouts)) {
1153 struct xfs_log_item *item = list_first_entry(whiteouts,
1154 struct xfs_log_item, li_cil);
1155 list_del_init(&item->li_cil);
1156 trace_xfs_cil_whiteout_unpin(item);
1157 item->li_ops->iop_unpin(item, 1);
1158 }
1159 }
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175 static void
1176 xlog_cil_push_work(
1177 struct work_struct *work)
1178 {
1179 struct xfs_cil_ctx *ctx =
1180 container_of(work, struct xfs_cil_ctx, push_work);
1181 struct xfs_cil *cil = ctx->cil;
1182 struct xlog *log = cil->xc_log;
1183 struct xfs_cil_ctx *new_ctx;
1184 int num_iovecs = 0;
1185 int num_bytes = 0;
1186 int error = 0;
1187 struct xlog_cil_trans_hdr thdr;
1188 struct xfs_log_vec lvhdr = {};
1189 xfs_csn_t push_seq;
1190 bool push_commit_stable;
1191 LIST_HEAD (whiteouts);
1192 struct xlog_ticket *ticket;
1193
1194 new_ctx = xlog_cil_ctx_alloc();
1195 new_ctx->ticket = xlog_cil_ticket_alloc(log);
1196
1197 down_write(&cil->xc_ctx_lock);
1198
1199 spin_lock(&cil->xc_push_lock);
1200 push_seq = cil->xc_push_seq;
1201 ASSERT(push_seq <= ctx->sequence);
1202 push_commit_stable = cil->xc_push_commit_stable;
1203 cil->xc_push_commit_stable = false;
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213 if (waitqueue_active(&cil->xc_push_wait))
1214 wake_up_all(&cil->xc_push_wait);
1215
1216 xlog_cil_push_pcp_aggregate(cil, ctx);
1217
1218
1219
1220
1221
1222
1223 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1224 cil->xc_push_seq = 0;
1225 spin_unlock(&cil->xc_push_lock);
1226 goto out_skip;
1227 }
1228
1229
1230
1231 if (push_seq < ctx->sequence) {
1232 spin_unlock(&cil->xc_push_lock);
1233 goto out_skip;
1234 }
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260 list_add(&ctx->committing, &cil->xc_committing);
1261 spin_unlock(&cil->xc_push_lock);
1262
1263 xlog_cil_build_lv_chain(ctx, &whiteouts, &num_iovecs, &num_bytes);
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290 spin_lock(&cil->xc_push_lock);
1291 xlog_cil_ctx_switch(cil, new_ctx);
1292 spin_unlock(&cil->xc_push_lock);
1293 up_write(&cil->xc_ctx_lock);
1294
1295
1296
1297
1298
1299
1300 list_sort(NULL, &ctx->lv_chain, xlog_cil_order_cmp);
1301
1302
1303
1304
1305
1306
1307
1308
1309 xlog_cil_build_trans_hdr(ctx, &thdr, &lvhdr, num_iovecs);
1310 num_bytes += lvhdr.lv_bytes;
1311 list_add(&lvhdr.lv_list, &ctx->lv_chain);
1312
1313
1314
1315
1316
1317
1318 error = xlog_cil_write_chain(ctx, num_bytes);
1319 list_del(&lvhdr.lv_list);
1320 if (error)
1321 goto out_abort_free_ticket;
1322
1323 error = xlog_cil_write_commit_record(ctx);
1324 if (error)
1325 goto out_abort_free_ticket;
1326
1327
1328
1329
1330
1331
1332
1333
1334 ticket = ctx->ticket;
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348 spin_lock(&log->l_icloglock);
1349 if (ctx->start_lsn != ctx->commit_lsn) {
1350 xfs_lsn_t plsn;
1351
1352 plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn);
1353 if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) {
1354
1355
1356
1357
1358
1359 xlog_wait_on_iclog(ctx->commit_iclog->ic_prev);
1360 spin_lock(&log->l_icloglock);
1361 }
1362
1363
1364
1365
1366
1367 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
1368 }
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
1381 if (push_commit_stable &&
1382 ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE)
1383 xlog_state_switch_iclogs(log, ctx->commit_iclog, 0);
1384 ticket = ctx->ticket;
1385 xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1386
1387
1388
1389 spin_unlock(&log->l_icloglock);
1390 xlog_cil_cleanup_whiteouts(&whiteouts);
1391 xfs_log_ticket_ungrant(log, ticket);
1392 return;
1393
1394 out_skip:
1395 up_write(&cil->xc_ctx_lock);
1396 xfs_log_ticket_put(new_ctx->ticket);
1397 kmem_free(new_ctx);
1398 return;
1399
1400 out_abort_free_ticket:
1401 ASSERT(xlog_is_shutdown(log));
1402 xlog_cil_cleanup_whiteouts(&whiteouts);
1403 if (!ctx->commit_iclog) {
1404 xfs_log_ticket_ungrant(log, ctx->ticket);
1405 xlog_cil_committed(ctx);
1406 return;
1407 }
1408 spin_lock(&log->l_icloglock);
1409 ticket = ctx->ticket;
1410 xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1411
1412 spin_unlock(&log->l_icloglock);
1413 xfs_log_ticket_ungrant(log, ticket);
1414 }
1415
1416
1417
1418
1419
1420
1421
1422
1423 static void
1424 xlog_cil_push_background(
1425 struct xlog *log) __releases(cil->xc_ctx_lock)
1426 {
1427 struct xfs_cil *cil = log->l_cilp;
1428 int space_used = atomic_read(&cil->xc_ctx->space_used);
1429
1430
1431
1432
1433
1434 ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445 if (space_used < XLOG_CIL_SPACE_LIMIT(log) ||
1446 (cil->xc_push_seq == cil->xc_current_sequence &&
1447 space_used < XLOG_CIL_BLOCKING_SPACE_LIMIT(log) &&
1448 !waitqueue_active(&cil->xc_push_wait))) {
1449 up_read(&cil->xc_ctx_lock);
1450 return;
1451 }
1452
1453 spin_lock(&cil->xc_push_lock);
1454 if (cil->xc_push_seq < cil->xc_current_sequence) {
1455 cil->xc_push_seq = cil->xc_current_sequence;
1456 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1457 }
1458
1459
1460
1461
1462
1463
1464
1465 up_read(&cil->xc_ctx_lock);
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477 if (xlog_cil_over_hard_limit(log, space_used)) {
1478 trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
1479 ASSERT(space_used < log->l_logsize);
1480 xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
1481 return;
1482 }
1483
1484 spin_unlock(&cil->xc_push_lock);
1485
1486 }
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506 static void
1507 xlog_cil_push_now(
1508 struct xlog *log,
1509 xfs_lsn_t push_seq,
1510 bool async)
1511 {
1512 struct xfs_cil *cil = log->l_cilp;
1513
1514 if (!cil)
1515 return;
1516
1517 ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
1518
1519
1520 if (!async)
1521 flush_workqueue(cil->xc_push_wq);
1522
1523 spin_lock(&cil->xc_push_lock);
1524
1525
1526
1527
1528
1529
1530
1531
1532 cil->xc_push_commit_stable = async;
1533
1534
1535
1536
1537
1538 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) ||
1539 push_seq <= cil->xc_push_seq) {
1540 spin_unlock(&cil->xc_push_lock);
1541 return;
1542 }
1543
1544 cil->xc_push_seq = push_seq;
1545 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1546 spin_unlock(&cil->xc_push_lock);
1547 }
1548
1549 bool
1550 xlog_cil_empty(
1551 struct xlog *log)
1552 {
1553 struct xfs_cil *cil = log->l_cilp;
1554 bool empty = false;
1555
1556 spin_lock(&cil->xc_push_lock);
1557 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
1558 empty = true;
1559 spin_unlock(&cil->xc_push_lock);
1560 return empty;
1561 }
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573 static uint32_t
1574 xlog_cil_process_intents(
1575 struct xfs_cil *cil,
1576 struct xfs_trans *tp)
1577 {
1578 struct xfs_log_item *lip, *ilip, *next;
1579 uint32_t len = 0;
1580
1581 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1582 if (!(lip->li_ops->flags & XFS_ITEM_INTENT_DONE))
1583 continue;
1584
1585 ilip = lip->li_ops->iop_intent(lip);
1586 if (!ilip || !xlog_item_in_current_chkpt(cil, ilip))
1587 continue;
1588 set_bit(XFS_LI_WHITEOUT, &ilip->li_flags);
1589 trace_xfs_cil_whiteout_mark(ilip);
1590 len += ilip->li_lv->lv_bytes;
1591 kmem_free(ilip->li_lv);
1592 ilip->li_lv = NULL;
1593
1594 xfs_trans_del_item(lip);
1595 lip->li_ops->iop_release(lip);
1596 }
1597 return len;
1598 }
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613 void
1614 xlog_cil_commit(
1615 struct xlog *log,
1616 struct xfs_trans *tp,
1617 xfs_csn_t *commit_seq,
1618 bool regrant)
1619 {
1620 struct xfs_cil *cil = log->l_cilp;
1621 struct xfs_log_item *lip, *next;
1622 uint32_t released_space = 0;
1623
1624
1625
1626
1627
1628
1629 xlog_cil_alloc_shadow_bufs(log, tp);
1630
1631
1632 down_read(&cil->xc_ctx_lock);
1633
1634 if (tp->t_flags & XFS_TRANS_HAS_INTENT_DONE)
1635 released_space = xlog_cil_process_intents(cil, tp);
1636
1637 xlog_cil_insert_items(log, tp, released_space);
1638
1639 if (regrant && !xlog_is_shutdown(log))
1640 xfs_log_ticket_regrant(log, tp->t_ticket);
1641 else
1642 xfs_log_ticket_ungrant(log, tp->t_ticket);
1643 tp->t_ticket = NULL;
1644 xfs_trans_unreserve_and_mod_sb(tp);
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657 trace_xfs_trans_commit_items(tp, _RET_IP_);
1658 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1659 xfs_trans_del_item(lip);
1660 if (lip->li_ops->iop_committing)
1661 lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence);
1662 }
1663 if (commit_seq)
1664 *commit_seq = cil->xc_ctx->sequence;
1665
1666
1667 xlog_cil_push_background(log);
1668 }
1669
1670
1671
1672
1673
1674
1675 void
1676 xlog_cil_flush(
1677 struct xlog *log)
1678 {
1679 xfs_csn_t seq = log->l_cilp->xc_current_sequence;
1680
1681 trace_xfs_log_force(log->l_mp, seq, _RET_IP_);
1682 xlog_cil_push_now(log, seq, true);
1683
1684
1685
1686
1687
1688 if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags))
1689 xfs_log_force(log->l_mp, 0);
1690 }
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702 xfs_lsn_t
1703 xlog_cil_force_seq(
1704 struct xlog *log,
1705 xfs_csn_t sequence)
1706 {
1707 struct xfs_cil *cil = log->l_cilp;
1708 struct xfs_cil_ctx *ctx;
1709 xfs_lsn_t commit_lsn = NULLCOMMITLSN;
1710
1711 ASSERT(sequence <= cil->xc_current_sequence);
1712
1713 if (!sequence)
1714 sequence = cil->xc_current_sequence;
1715 trace_xfs_log_force(log->l_mp, sequence, _RET_IP_);
1716
1717
1718
1719
1720
1721
1722 restart:
1723 xlog_cil_push_now(log, sequence, false);
1724
1725
1726
1727
1728
1729
1730
1731 spin_lock(&cil->xc_push_lock);
1732 list_for_each_entry(ctx, &cil->xc_committing, committing) {
1733
1734
1735
1736
1737
1738 if (xlog_is_shutdown(log))
1739 goto out_shutdown;
1740 if (ctx->sequence > sequence)
1741 continue;
1742 if (!ctx->commit_lsn) {
1743
1744
1745
1746
1747 XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
1748 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1749 goto restart;
1750 }
1751 if (ctx->sequence != sequence)
1752 continue;
1753
1754 commit_lsn = ctx->commit_lsn;
1755 }
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772 if (sequence == cil->xc_current_sequence &&
1773 !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1774 spin_unlock(&cil->xc_push_lock);
1775 goto restart;
1776 }
1777
1778 spin_unlock(&cil->xc_push_lock);
1779 return commit_lsn;
1780
1781
1782
1783
1784
1785
1786
1787
1788 out_shutdown:
1789 spin_unlock(&cil->xc_push_lock);
1790 return 0;
1791 }
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801 void
1802 xlog_cil_pcp_dead(
1803 struct xlog *log,
1804 unsigned int cpu)
1805 {
1806 struct xfs_cil *cil = log->l_cilp;
1807 struct xlog_cil_pcp *cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
1808 struct xfs_cil_ctx *ctx;
1809
1810 down_write(&cil->xc_ctx_lock);
1811 ctx = cil->xc_ctx;
1812 if (ctx->ticket)
1813 ctx->ticket->t_curr_res += cilpcp->space_reserved;
1814 cilpcp->space_reserved = 0;
1815
1816 if (!list_empty(&cilpcp->log_items))
1817 list_splice_init(&cilpcp->log_items, &ctx->log_items);
1818 if (!list_empty(&cilpcp->busy_extents))
1819 list_splice_init(&cilpcp->busy_extents, &ctx->busy_extents);
1820 atomic_add(cilpcp->space_used, &ctx->space_used);
1821 cilpcp->space_used = 0;
1822 up_write(&cil->xc_ctx_lock);
1823 }
1824
1825
1826
1827
1828 int
1829 xlog_cil_init(
1830 struct xlog *log)
1831 {
1832 struct xfs_cil *cil;
1833 struct xfs_cil_ctx *ctx;
1834 struct xlog_cil_pcp *cilpcp;
1835 int cpu;
1836
1837 cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
1838 if (!cil)
1839 return -ENOMEM;
1840
1841
1842
1843
1844 cil->xc_push_wq = alloc_workqueue("xfs-cil/%s",
1845 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
1846 4, log->l_mp->m_super->s_id);
1847 if (!cil->xc_push_wq)
1848 goto out_destroy_cil;
1849
1850 cil->xc_log = log;
1851 cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp);
1852 if (!cil->xc_pcp)
1853 goto out_destroy_wq;
1854
1855 for_each_possible_cpu(cpu) {
1856 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
1857 INIT_LIST_HEAD(&cilpcp->busy_extents);
1858 INIT_LIST_HEAD(&cilpcp->log_items);
1859 }
1860
1861 INIT_LIST_HEAD(&cil->xc_committing);
1862 spin_lock_init(&cil->xc_push_lock);
1863 init_waitqueue_head(&cil->xc_push_wait);
1864 init_rwsem(&cil->xc_ctx_lock);
1865 init_waitqueue_head(&cil->xc_start_wait);
1866 init_waitqueue_head(&cil->xc_commit_wait);
1867 log->l_cilp = cil;
1868
1869 ctx = xlog_cil_ctx_alloc();
1870 xlog_cil_ctx_switch(cil, ctx);
1871 return 0;
1872
1873 out_destroy_wq:
1874 destroy_workqueue(cil->xc_push_wq);
1875 out_destroy_cil:
1876 kmem_free(cil);
1877 return -ENOMEM;
1878 }
1879
1880 void
1881 xlog_cil_destroy(
1882 struct xlog *log)
1883 {
1884 struct xfs_cil *cil = log->l_cilp;
1885
1886 if (cil->xc_ctx) {
1887 if (cil->xc_ctx->ticket)
1888 xfs_log_ticket_put(cil->xc_ctx->ticket);
1889 kmem_free(cil->xc_ctx);
1890 }
1891
1892 ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1893 free_percpu(cil->xc_pcp);
1894 destroy_workqueue(cil->xc_push_wq);
1895 kmem_free(cil);
1896 }
1897