0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/types.h>
0011 #include <linux/buffer_head.h>
0012 #include <linux/string.h>
0013 #include <linux/errno.h>
0014 #include "nilfs.h"
0015 #include "mdt.h"
0016 #include "alloc.h"
0017 #include "dat.h"
0018
0019
0020 #define NILFS_CNO_MIN ((__u64)1)
0021 #define NILFS_CNO_MAX (~(__u64)0)
0022
0023
0024
0025
0026
0027
0028
0029 struct nilfs_dat_info {
0030 struct nilfs_mdt_info mi;
0031 struct nilfs_palloc_cache palloc_cache;
0032 struct nilfs_shadow_map shadow;
0033 };
0034
0035 static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
0036 {
0037 return (struct nilfs_dat_info *)NILFS_MDT(dat);
0038 }
0039
0040 static int nilfs_dat_prepare_entry(struct inode *dat,
0041 struct nilfs_palloc_req *req, int create)
0042 {
0043 return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
0044 create, &req->pr_entry_bh);
0045 }
0046
0047 static void nilfs_dat_commit_entry(struct inode *dat,
0048 struct nilfs_palloc_req *req)
0049 {
0050 mark_buffer_dirty(req->pr_entry_bh);
0051 nilfs_mdt_mark_dirty(dat);
0052 brelse(req->pr_entry_bh);
0053 }
0054
0055 static void nilfs_dat_abort_entry(struct inode *dat,
0056 struct nilfs_palloc_req *req)
0057 {
0058 brelse(req->pr_entry_bh);
0059 }
0060
0061 int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
0062 {
0063 int ret;
0064
0065 ret = nilfs_palloc_prepare_alloc_entry(dat, req);
0066 if (ret < 0)
0067 return ret;
0068
0069 ret = nilfs_dat_prepare_entry(dat, req, 1);
0070 if (ret < 0)
0071 nilfs_palloc_abort_alloc_entry(dat, req);
0072
0073 return ret;
0074 }
0075
0076 void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
0077 {
0078 struct nilfs_dat_entry *entry;
0079 void *kaddr;
0080
0081 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
0082 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
0083 req->pr_entry_bh, kaddr);
0084 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
0085 entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
0086 entry->de_blocknr = cpu_to_le64(0);
0087 kunmap_atomic(kaddr);
0088
0089 nilfs_palloc_commit_alloc_entry(dat, req);
0090 nilfs_dat_commit_entry(dat, req);
0091 }
0092
0093 void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
0094 {
0095 nilfs_dat_abort_entry(dat, req);
0096 nilfs_palloc_abort_alloc_entry(dat, req);
0097 }
0098
0099 static void nilfs_dat_commit_free(struct inode *dat,
0100 struct nilfs_palloc_req *req)
0101 {
0102 struct nilfs_dat_entry *entry;
0103 void *kaddr;
0104
0105 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
0106 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
0107 req->pr_entry_bh, kaddr);
0108 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
0109 entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
0110 entry->de_blocknr = cpu_to_le64(0);
0111 kunmap_atomic(kaddr);
0112
0113 nilfs_dat_commit_entry(dat, req);
0114 nilfs_palloc_commit_free_entry(dat, req);
0115 }
0116
0117 int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
0118 {
0119 int ret;
0120
0121 ret = nilfs_dat_prepare_entry(dat, req, 0);
0122 WARN_ON(ret == -ENOENT);
0123 return ret;
0124 }
0125
0126 void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
0127 sector_t blocknr)
0128 {
0129 struct nilfs_dat_entry *entry;
0130 void *kaddr;
0131
0132 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
0133 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
0134 req->pr_entry_bh, kaddr);
0135 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
0136 entry->de_blocknr = cpu_to_le64(blocknr);
0137 kunmap_atomic(kaddr);
0138
0139 nilfs_dat_commit_entry(dat, req);
0140 }
0141
0142 int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
0143 {
0144 struct nilfs_dat_entry *entry;
0145 sector_t blocknr;
0146 void *kaddr;
0147 int ret;
0148
0149 ret = nilfs_dat_prepare_entry(dat, req, 0);
0150 if (ret < 0) {
0151 WARN_ON(ret == -ENOENT);
0152 return ret;
0153 }
0154
0155 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
0156 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
0157 req->pr_entry_bh, kaddr);
0158 blocknr = le64_to_cpu(entry->de_blocknr);
0159 kunmap_atomic(kaddr);
0160
0161 if (blocknr == 0) {
0162 ret = nilfs_palloc_prepare_free_entry(dat, req);
0163 if (ret < 0) {
0164 nilfs_dat_abort_entry(dat, req);
0165 return ret;
0166 }
0167 }
0168
0169 return 0;
0170 }
0171
0172 void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
0173 int dead)
0174 {
0175 struct nilfs_dat_entry *entry;
0176 __u64 start, end;
0177 sector_t blocknr;
0178 void *kaddr;
0179
0180 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
0181 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
0182 req->pr_entry_bh, kaddr);
0183 end = start = le64_to_cpu(entry->de_start);
0184 if (!dead) {
0185 end = nilfs_mdt_cno(dat);
0186 WARN_ON(start > end);
0187 }
0188 entry->de_end = cpu_to_le64(end);
0189 blocknr = le64_to_cpu(entry->de_blocknr);
0190 kunmap_atomic(kaddr);
0191
0192 if (blocknr == 0)
0193 nilfs_dat_commit_free(dat, req);
0194 else
0195 nilfs_dat_commit_entry(dat, req);
0196 }
0197
0198 void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
0199 {
0200 struct nilfs_dat_entry *entry;
0201 __u64 start;
0202 sector_t blocknr;
0203 void *kaddr;
0204
0205 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
0206 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
0207 req->pr_entry_bh, kaddr);
0208 start = le64_to_cpu(entry->de_start);
0209 blocknr = le64_to_cpu(entry->de_blocknr);
0210 kunmap_atomic(kaddr);
0211
0212 if (start == nilfs_mdt_cno(dat) && blocknr == 0)
0213 nilfs_palloc_abort_free_entry(dat, req);
0214 nilfs_dat_abort_entry(dat, req);
0215 }
0216
0217 int nilfs_dat_prepare_update(struct inode *dat,
0218 struct nilfs_palloc_req *oldreq,
0219 struct nilfs_palloc_req *newreq)
0220 {
0221 int ret;
0222
0223 ret = nilfs_dat_prepare_end(dat, oldreq);
0224 if (!ret) {
0225 ret = nilfs_dat_prepare_alloc(dat, newreq);
0226 if (ret < 0)
0227 nilfs_dat_abort_end(dat, oldreq);
0228 }
0229 return ret;
0230 }
0231
0232 void nilfs_dat_commit_update(struct inode *dat,
0233 struct nilfs_palloc_req *oldreq,
0234 struct nilfs_palloc_req *newreq, int dead)
0235 {
0236 nilfs_dat_commit_end(dat, oldreq, dead);
0237 nilfs_dat_commit_alloc(dat, newreq);
0238 }
0239
0240 void nilfs_dat_abort_update(struct inode *dat,
0241 struct nilfs_palloc_req *oldreq,
0242 struct nilfs_palloc_req *newreq)
0243 {
0244 nilfs_dat_abort_end(dat, oldreq);
0245 nilfs_dat_abort_alloc(dat, newreq);
0246 }
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262 int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
0263 {
0264 struct nilfs_palloc_req req;
0265 int ret;
0266
0267 req.pr_entry_nr = vblocknr;
0268 ret = nilfs_dat_prepare_entry(dat, &req, 0);
0269 if (ret == 0)
0270 nilfs_dat_commit_entry(dat, &req);
0271 return ret;
0272 }
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292 int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
0293 {
0294 return nilfs_palloc_freev(dat, vblocknrs, nitems);
0295 }
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313 int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
0314 {
0315 struct buffer_head *entry_bh;
0316 struct nilfs_dat_entry *entry;
0317 void *kaddr;
0318 int ret;
0319
0320 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
0321 if (ret < 0)
0322 return ret;
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332 if (!buffer_nilfs_redirected(entry_bh)) {
0333 ret = nilfs_mdt_freeze_buffer(dat, entry_bh);
0334 if (ret) {
0335 brelse(entry_bh);
0336 return ret;
0337 }
0338 }
0339
0340 kaddr = kmap_atomic(entry_bh->b_page);
0341 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
0342 if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
0343 nilfs_crit(dat->i_sb,
0344 "%s: invalid vblocknr = %llu, [%llu, %llu)",
0345 __func__, (unsigned long long)vblocknr,
0346 (unsigned long long)le64_to_cpu(entry->de_start),
0347 (unsigned long long)le64_to_cpu(entry->de_end));
0348 kunmap_atomic(kaddr);
0349 brelse(entry_bh);
0350 return -EINVAL;
0351 }
0352 WARN_ON(blocknr == 0);
0353 entry->de_blocknr = cpu_to_le64(blocknr);
0354 kunmap_atomic(kaddr);
0355
0356 mark_buffer_dirty(entry_bh);
0357 nilfs_mdt_mark_dirty(dat);
0358
0359 brelse(entry_bh);
0360
0361 return 0;
0362 }
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383 int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
0384 {
0385 struct buffer_head *entry_bh, *bh;
0386 struct nilfs_dat_entry *entry;
0387 sector_t blocknr;
0388 void *kaddr;
0389 int ret;
0390
0391 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
0392 if (ret < 0)
0393 return ret;
0394
0395 if (!nilfs_doing_gc() && buffer_nilfs_redirected(entry_bh)) {
0396 bh = nilfs_mdt_get_frozen_buffer(dat, entry_bh);
0397 if (bh) {
0398 WARN_ON(!buffer_uptodate(bh));
0399 brelse(entry_bh);
0400 entry_bh = bh;
0401 }
0402 }
0403
0404 kaddr = kmap_atomic(entry_bh->b_page);
0405 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
0406 blocknr = le64_to_cpu(entry->de_blocknr);
0407 if (blocknr == 0) {
0408 ret = -ENOENT;
0409 goto out;
0410 }
0411 *blocknrp = blocknr;
0412
0413 out:
0414 kunmap_atomic(kaddr);
0415 brelse(entry_bh);
0416 return ret;
0417 }
0418
0419 ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz,
0420 size_t nvi)
0421 {
0422 struct buffer_head *entry_bh;
0423 struct nilfs_dat_entry *entry;
0424 struct nilfs_vinfo *vinfo = buf;
0425 __u64 first, last;
0426 void *kaddr;
0427 unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
0428 int i, j, n, ret;
0429
0430 for (i = 0; i < nvi; i += n) {
0431 ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
0432 0, &entry_bh);
0433 if (ret < 0)
0434 return ret;
0435 kaddr = kmap_atomic(entry_bh->b_page);
0436
0437 first = vinfo->vi_vblocknr;
0438 do_div(first, entries_per_block);
0439 first *= entries_per_block;
0440 last = first + entries_per_block - 1;
0441 for (j = i, n = 0;
0442 j < nvi && vinfo->vi_vblocknr >= first &&
0443 vinfo->vi_vblocknr <= last;
0444 j++, n++, vinfo = (void *)vinfo + visz) {
0445 entry = nilfs_palloc_block_get_entry(
0446 dat, vinfo->vi_vblocknr, entry_bh, kaddr);
0447 vinfo->vi_start = le64_to_cpu(entry->de_start);
0448 vinfo->vi_end = le64_to_cpu(entry->de_end);
0449 vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
0450 }
0451 kunmap_atomic(kaddr);
0452 brelse(entry_bh);
0453 }
0454
0455 return nvi;
0456 }
0457
0458
0459
0460
0461
0462
0463
0464
0465 int nilfs_dat_read(struct super_block *sb, size_t entry_size,
0466 struct nilfs_inode *raw_inode, struct inode **inodep)
0467 {
0468 static struct lock_class_key dat_lock_key;
0469 struct inode *dat;
0470 struct nilfs_dat_info *di;
0471 int err;
0472
0473 if (entry_size > sb->s_blocksize) {
0474 nilfs_err(sb, "too large DAT entry size: %zu bytes",
0475 entry_size);
0476 return -EINVAL;
0477 } else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) {
0478 nilfs_err(sb, "too small DAT entry size: %zu bytes",
0479 entry_size);
0480 return -EINVAL;
0481 }
0482
0483 dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO);
0484 if (unlikely(!dat))
0485 return -ENOMEM;
0486 if (!(dat->i_state & I_NEW))
0487 goto out;
0488
0489 err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di));
0490 if (err)
0491 goto failed;
0492
0493 err = nilfs_palloc_init_blockgroup(dat, entry_size);
0494 if (err)
0495 goto failed;
0496
0497 di = NILFS_DAT_I(dat);
0498 lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
0499 nilfs_palloc_setup_cache(dat, &di->palloc_cache);
0500 err = nilfs_mdt_setup_shadow_map(dat, &di->shadow);
0501 if (err)
0502 goto failed;
0503
0504 err = nilfs_read_inode_common(dat, raw_inode);
0505 if (err)
0506 goto failed;
0507
0508 unlock_new_inode(dat);
0509 out:
0510 *inodep = dat;
0511 return 0;
0512 failed:
0513 iget_failed(dat);
0514 return err;
0515 }