Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * NILFS segment buffer
0004  *
0005  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
0006  *
0007  * Written by Ryusuke Konishi.
0008  *
0009  */
0010 
0011 #include <linux/buffer_head.h>
0012 #include <linux/writeback.h>
0013 #include <linux/crc32.h>
0014 #include <linux/backing-dev.h>
0015 #include <linux/slab.h>
0016 #include "page.h"
0017 #include "segbuf.h"
0018 
0019 
0020 struct nilfs_write_info {
0021     struct the_nilfs       *nilfs;
0022     struct bio         *bio;
0023     int         start, end; /* The region to be submitted */
0024     int         rest_blocks;
0025     int         max_pages;
0026     int         nr_vecs;
0027     sector_t        blocknr;
0028 };
0029 
0030 static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
0031                   struct the_nilfs *nilfs);
0032 static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
0033 
0034 struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
0035 {
0036     struct nilfs_segment_buffer *segbuf;
0037 
0038     segbuf = kmem_cache_alloc(nilfs_segbuf_cachep, GFP_NOFS);
0039     if (unlikely(!segbuf))
0040         return NULL;
0041 
0042     segbuf->sb_super = sb;
0043     INIT_LIST_HEAD(&segbuf->sb_list);
0044     INIT_LIST_HEAD(&segbuf->sb_segsum_buffers);
0045     INIT_LIST_HEAD(&segbuf->sb_payload_buffers);
0046     segbuf->sb_super_root = NULL;
0047 
0048     init_completion(&segbuf->sb_bio_event);
0049     atomic_set(&segbuf->sb_err, 0);
0050     segbuf->sb_nbio = 0;
0051 
0052     return segbuf;
0053 }
0054 
0055 void nilfs_segbuf_free(struct nilfs_segment_buffer *segbuf)
0056 {
0057     kmem_cache_free(nilfs_segbuf_cachep, segbuf);
0058 }
0059 
0060 void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum,
0061              unsigned long offset, struct the_nilfs *nilfs)
0062 {
0063     segbuf->sb_segnum = segnum;
0064     nilfs_get_segment_range(nilfs, segnum, &segbuf->sb_fseg_start,
0065                 &segbuf->sb_fseg_end);
0066 
0067     segbuf->sb_pseg_start = segbuf->sb_fseg_start + offset;
0068     segbuf->sb_rest_blocks =
0069         segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
0070 }
0071 
0072 /**
0073  * nilfs_segbuf_map_cont - map a new log behind a given log
0074  * @segbuf: new segment buffer
0075  * @prev: segment buffer containing a log to be continued
0076  */
0077 void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
0078                struct nilfs_segment_buffer *prev)
0079 {
0080     segbuf->sb_segnum = prev->sb_segnum;
0081     segbuf->sb_fseg_start = prev->sb_fseg_start;
0082     segbuf->sb_fseg_end = prev->sb_fseg_end;
0083     segbuf->sb_pseg_start = prev->sb_pseg_start + prev->sb_sum.nblocks;
0084     segbuf->sb_rest_blocks =
0085         segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
0086 }
0087 
0088 void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf,
0089                   __u64 nextnum, struct the_nilfs *nilfs)
0090 {
0091     segbuf->sb_nextnum = nextnum;
0092     segbuf->sb_sum.next = nilfs_get_segment_start_blocknr(nilfs, nextnum);
0093 }
0094 
0095 int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf)
0096 {
0097     struct buffer_head *bh;
0098 
0099     bh = sb_getblk(segbuf->sb_super,
0100                segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk);
0101     if (unlikely(!bh))
0102         return -ENOMEM;
0103 
0104     nilfs_segbuf_add_segsum_buffer(segbuf, bh);
0105     return 0;
0106 }
0107 
0108 int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf,
0109                 struct buffer_head **bhp)
0110 {
0111     struct buffer_head *bh;
0112 
0113     bh = sb_getblk(segbuf->sb_super,
0114                segbuf->sb_pseg_start + segbuf->sb_sum.nblocks);
0115     if (unlikely(!bh))
0116         return -ENOMEM;
0117 
0118     nilfs_segbuf_add_payload_buffer(segbuf, bh);
0119     *bhp = bh;
0120     return 0;
0121 }
0122 
0123 int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned int flags,
0124                time64_t ctime, __u64 cno)
0125 {
0126     int err;
0127 
0128     segbuf->sb_sum.nblocks = segbuf->sb_sum.nsumblk = 0;
0129     err = nilfs_segbuf_extend_segsum(segbuf);
0130     if (unlikely(err))
0131         return err;
0132 
0133     segbuf->sb_sum.flags = flags;
0134     segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary);
0135     segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0;
0136     segbuf->sb_sum.ctime = ctime;
0137     segbuf->sb_sum.cno = cno;
0138     return 0;
0139 }
0140 
0141 /*
0142  * Setup segment summary
0143  */
0144 void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *segbuf)
0145 {
0146     struct nilfs_segment_summary *raw_sum;
0147     struct buffer_head *bh_sum;
0148 
0149     bh_sum = list_entry(segbuf->sb_segsum_buffers.next,
0150                 struct buffer_head, b_assoc_buffers);
0151     raw_sum = (struct nilfs_segment_summary *)bh_sum->b_data;
0152 
0153     raw_sum->ss_magic    = cpu_to_le32(NILFS_SEGSUM_MAGIC);
0154     raw_sum->ss_bytes    = cpu_to_le16(sizeof(*raw_sum));
0155     raw_sum->ss_flags    = cpu_to_le16(segbuf->sb_sum.flags);
0156     raw_sum->ss_seq      = cpu_to_le64(segbuf->sb_sum.seg_seq);
0157     raw_sum->ss_create   = cpu_to_le64(segbuf->sb_sum.ctime);
0158     raw_sum->ss_next     = cpu_to_le64(segbuf->sb_sum.next);
0159     raw_sum->ss_nblocks  = cpu_to_le32(segbuf->sb_sum.nblocks);
0160     raw_sum->ss_nfinfo   = cpu_to_le32(segbuf->sb_sum.nfinfo);
0161     raw_sum->ss_sumbytes = cpu_to_le32(segbuf->sb_sum.sumbytes);
0162     raw_sum->ss_pad      = 0;
0163     raw_sum->ss_cno      = cpu_to_le64(segbuf->sb_sum.cno);
0164 }
0165 
0166 /*
0167  * CRC calculation routines
0168  */
0169 static void
0170 nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf, u32 seed)
0171 {
0172     struct buffer_head *bh;
0173     struct nilfs_segment_summary *raw_sum;
0174     unsigned long size, bytes = segbuf->sb_sum.sumbytes;
0175     u32 crc;
0176 
0177     bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
0178             b_assoc_buffers);
0179 
0180     raw_sum = (struct nilfs_segment_summary *)bh->b_data;
0181     size = min_t(unsigned long, bytes, bh->b_size);
0182     crc = crc32_le(seed,
0183                (unsigned char *)raw_sum +
0184                sizeof(raw_sum->ss_datasum) + sizeof(raw_sum->ss_sumsum),
0185                size - (sizeof(raw_sum->ss_datasum) +
0186                    sizeof(raw_sum->ss_sumsum)));
0187 
0188     list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
0189                      b_assoc_buffers) {
0190         bytes -= size;
0191         size = min_t(unsigned long, bytes, bh->b_size);
0192         crc = crc32_le(crc, bh->b_data, size);
0193     }
0194     raw_sum->ss_sumsum = cpu_to_le32(crc);
0195 }
0196 
0197 static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
0198                       u32 seed)
0199 {
0200     struct buffer_head *bh;
0201     struct nilfs_segment_summary *raw_sum;
0202     void *kaddr;
0203     u32 crc;
0204 
0205     bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
0206             b_assoc_buffers);
0207     raw_sum = (struct nilfs_segment_summary *)bh->b_data;
0208     crc = crc32_le(seed,
0209                (unsigned char *)raw_sum + sizeof(raw_sum->ss_datasum),
0210                bh->b_size - sizeof(raw_sum->ss_datasum));
0211 
0212     list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
0213                      b_assoc_buffers) {
0214         crc = crc32_le(crc, bh->b_data, bh->b_size);
0215     }
0216     list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
0217         kaddr = kmap_atomic(bh->b_page);
0218         crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size);
0219         kunmap_atomic(kaddr);
0220     }
0221     raw_sum->ss_datasum = cpu_to_le32(crc);
0222 }
0223 
0224 static void
0225 nilfs_segbuf_fill_in_super_root_crc(struct nilfs_segment_buffer *segbuf,
0226                     u32 seed)
0227 {
0228     struct nilfs_super_root *raw_sr;
0229     struct the_nilfs *nilfs = segbuf->sb_super->s_fs_info;
0230     unsigned int srsize;
0231     u32 crc;
0232 
0233     raw_sr = (struct nilfs_super_root *)segbuf->sb_super_root->b_data;
0234     srsize = NILFS_SR_BYTES(nilfs->ns_inode_size);
0235     crc = crc32_le(seed,
0236                (unsigned char *)raw_sr + sizeof(raw_sr->sr_sum),
0237                srsize - sizeof(raw_sr->sr_sum));
0238     raw_sr->sr_sum = cpu_to_le32(crc);
0239 }
0240 
0241 static void nilfs_release_buffers(struct list_head *list)
0242 {
0243     struct buffer_head *bh, *n;
0244 
0245     list_for_each_entry_safe(bh, n, list, b_assoc_buffers) {
0246         list_del_init(&bh->b_assoc_buffers);
0247         brelse(bh);
0248     }
0249 }
0250 
0251 static void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf)
0252 {
0253     nilfs_release_buffers(&segbuf->sb_segsum_buffers);
0254     nilfs_release_buffers(&segbuf->sb_payload_buffers);
0255     segbuf->sb_super_root = NULL;
0256 }
0257 
0258 /*
0259  * Iterators for segment buffers
0260  */
0261 void nilfs_clear_logs(struct list_head *logs)
0262 {
0263     struct nilfs_segment_buffer *segbuf;
0264 
0265     list_for_each_entry(segbuf, logs, sb_list)
0266         nilfs_segbuf_clear(segbuf);
0267 }
0268 
0269 void nilfs_truncate_logs(struct list_head *logs,
0270              struct nilfs_segment_buffer *last)
0271 {
0272     struct nilfs_segment_buffer *n, *segbuf;
0273 
0274     segbuf = list_prepare_entry(last, logs, sb_list);
0275     list_for_each_entry_safe_continue(segbuf, n, logs, sb_list) {
0276         list_del_init(&segbuf->sb_list);
0277         nilfs_segbuf_clear(segbuf);
0278         nilfs_segbuf_free(segbuf);
0279     }
0280 }
0281 
0282 int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs)
0283 {
0284     struct nilfs_segment_buffer *segbuf;
0285     int ret = 0;
0286 
0287     list_for_each_entry(segbuf, logs, sb_list) {
0288         ret = nilfs_segbuf_write(segbuf, nilfs);
0289         if (ret)
0290             break;
0291     }
0292     return ret;
0293 }
0294 
0295 int nilfs_wait_on_logs(struct list_head *logs)
0296 {
0297     struct nilfs_segment_buffer *segbuf;
0298     int err, ret = 0;
0299 
0300     list_for_each_entry(segbuf, logs, sb_list) {
0301         err = nilfs_segbuf_wait(segbuf);
0302         if (err && !ret)
0303             ret = err;
0304     }
0305     return ret;
0306 }
0307 
0308 /**
0309  * nilfs_add_checksums_on_logs - add checksums on the logs
0310  * @logs: list of segment buffers storing target logs
0311  * @seed: checksum seed value
0312  */
0313 void nilfs_add_checksums_on_logs(struct list_head *logs, u32 seed)
0314 {
0315     struct nilfs_segment_buffer *segbuf;
0316 
0317     list_for_each_entry(segbuf, logs, sb_list) {
0318         if (segbuf->sb_super_root)
0319             nilfs_segbuf_fill_in_super_root_crc(segbuf, seed);
0320         nilfs_segbuf_fill_in_segsum_crc(segbuf, seed);
0321         nilfs_segbuf_fill_in_data_crc(segbuf, seed);
0322     }
0323 }
0324 
0325 /*
0326  * BIO operations
0327  */
0328 static void nilfs_end_bio_write(struct bio *bio)
0329 {
0330     struct nilfs_segment_buffer *segbuf = bio->bi_private;
0331 
0332     if (bio->bi_status)
0333         atomic_inc(&segbuf->sb_err);
0334 
0335     bio_put(bio);
0336     complete(&segbuf->sb_bio_event);
0337 }
0338 
0339 static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
0340                    struct nilfs_write_info *wi)
0341 {
0342     struct bio *bio = wi->bio;
0343 
0344     bio->bi_end_io = nilfs_end_bio_write;
0345     bio->bi_private = segbuf;
0346     submit_bio(bio);
0347     segbuf->sb_nbio++;
0348 
0349     wi->bio = NULL;
0350     wi->rest_blocks -= wi->end - wi->start;
0351     wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
0352     wi->start = wi->end;
0353     return 0;
0354 }
0355 
0356 static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
0357                        struct nilfs_write_info *wi)
0358 {
0359     wi->bio = NULL;
0360     wi->rest_blocks = segbuf->sb_sum.nblocks;
0361     wi->max_pages = BIO_MAX_VECS;
0362     wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
0363     wi->start = wi->end = 0;
0364     wi->blocknr = segbuf->sb_pseg_start;
0365 }
0366 
0367 static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
0368                   struct nilfs_write_info *wi,
0369                   struct buffer_head *bh)
0370 {
0371     int len, err;
0372 
0373     BUG_ON(wi->nr_vecs <= 0);
0374  repeat:
0375     if (!wi->bio) {
0376         wi->bio = bio_alloc(wi->nilfs->ns_bdev, wi->nr_vecs,
0377                     REQ_OP_WRITE, GFP_NOIO);
0378         wi->bio->bi_iter.bi_sector = (wi->blocknr + wi->end) <<
0379             (wi->nilfs->ns_blocksize_bits - 9);
0380     }
0381 
0382     len = bio_add_page(wi->bio, bh->b_page, bh->b_size, bh_offset(bh));
0383     if (len == bh->b_size) {
0384         wi->end++;
0385         return 0;
0386     }
0387     /* bio is FULL */
0388     err = nilfs_segbuf_submit_bio(segbuf, wi);
0389     /* never submit current bh */
0390     if (likely(!err))
0391         goto repeat;
0392     return err;
0393 }
0394 
0395 /**
0396  * nilfs_segbuf_write - submit write requests of a log
0397  * @segbuf: buffer storing a log to be written
0398  * @nilfs: nilfs object
0399  *
0400  * Return Value: On Success, 0 is returned. On Error, one of the following
0401  * negative error code is returned.
0402  *
0403  * %-EIO - I/O error
0404  *
0405  * %-ENOMEM - Insufficient memory available.
0406  */
0407 static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
0408                   struct the_nilfs *nilfs)
0409 {
0410     struct nilfs_write_info wi;
0411     struct buffer_head *bh;
0412     int res = 0;
0413 
0414     wi.nilfs = nilfs;
0415     nilfs_segbuf_prepare_write(segbuf, &wi);
0416 
0417     list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) {
0418         res = nilfs_segbuf_submit_bh(segbuf, &wi, bh);
0419         if (unlikely(res))
0420             goto failed_bio;
0421     }
0422 
0423     list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
0424         res = nilfs_segbuf_submit_bh(segbuf, &wi, bh);
0425         if (unlikely(res))
0426             goto failed_bio;
0427     }
0428 
0429     if (wi.bio) {
0430         /*
0431          * Last BIO is always sent through the following
0432          * submission.
0433          */
0434         wi.bio->bi_opf |= REQ_SYNC;
0435         res = nilfs_segbuf_submit_bio(segbuf, &wi);
0436     }
0437 
0438  failed_bio:
0439     return res;
0440 }
0441 
0442 /**
0443  * nilfs_segbuf_wait - wait for completion of requested BIOs
0444  * @segbuf: segment buffer
0445  *
0446  * Return Value: On Success, 0 is returned. On Error, one of the following
0447  * negative error code is returned.
0448  *
0449  * %-EIO - I/O error
0450  */
0451 static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
0452 {
0453     int err = 0;
0454 
0455     if (!segbuf->sb_nbio)
0456         return 0;
0457 
0458     do {
0459         wait_for_completion(&segbuf->sb_bio_event);
0460     } while (--segbuf->sb_nbio > 0);
0461 
0462     if (unlikely(atomic_read(&segbuf->sb_err) > 0)) {
0463         nilfs_err(segbuf->sb_super,
0464               "I/O error writing log (start-blocknr=%llu, block-count=%lu) in segment %llu",
0465               (unsigned long long)segbuf->sb_pseg_start,
0466               segbuf->sb_sum.nblocks,
0467               (unsigned long long)segbuf->sb_segnum);
0468         err = -EIO;
0469     }
0470     return err;
0471 }