0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0054
0055 #include <linux/slab.h>
0056 #include <linux/spinlock.h>
0057 #include <linux/buffer_head.h>
0058 #include <linux/sort.h>
0059 #include <linux/gfs2_ondisk.h>
0060 #include <linux/crc32.h>
0061 #include <linux/vmalloc.h>
0062 #include <linux/bio.h>
0063
0064 #include "gfs2.h"
0065 #include "incore.h"
0066 #include "dir.h"
0067 #include "glock.h"
0068 #include "inode.h"
0069 #include "meta_io.h"
0070 #include "quota.h"
0071 #include "rgrp.h"
0072 #include "trans.h"
0073 #include "bmap.h"
0074 #include "util.h"
0075
0076 #define MAX_RA_BLOCKS 32
0077
0078 #define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1)
0079 #define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1))
0080 #define GFS2_HASH_INDEX_MASK 0xffffc000
0081 #define GFS2_USE_HASH_FLAG 0x2000
0082
0083 struct qstr gfs2_qdot __read_mostly;
0084 struct qstr gfs2_qdotdot __read_mostly;
0085
0086 typedef int (*gfs2_dscan_t)(const struct gfs2_dirent *dent,
0087 const struct qstr *name, void *opaque);
0088
0089 int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
0090 struct buffer_head **bhp)
0091 {
0092 struct buffer_head *bh;
0093
0094 bh = gfs2_meta_new(ip->i_gl, block);
0095 gfs2_trans_add_meta(ip->i_gl, bh);
0096 gfs2_metatype_set(bh, GFS2_METATYPE_JD, GFS2_FORMAT_JD);
0097 gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
0098 *bhp = bh;
0099 return 0;
0100 }
0101
0102 static int gfs2_dir_get_existing_buffer(struct gfs2_inode *ip, u64 block,
0103 struct buffer_head **bhp)
0104 {
0105 struct buffer_head *bh;
0106 int error;
0107
0108 error = gfs2_meta_read(ip->i_gl, block, DIO_WAIT, 0, &bh);
0109 if (error)
0110 return error;
0111 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_JD)) {
0112 brelse(bh);
0113 return -EIO;
0114 }
0115 *bhp = bh;
0116 return 0;
0117 }
0118
0119 static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
0120 unsigned int offset, unsigned int size)
0121 {
0122 struct buffer_head *dibh;
0123 int error;
0124
0125 error = gfs2_meta_inode_buffer(ip, &dibh);
0126 if (error)
0127 return error;
0128
0129 gfs2_trans_add_meta(ip->i_gl, dibh);
0130 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
0131 if (ip->i_inode.i_size < offset + size)
0132 i_size_write(&ip->i_inode, offset + size);
0133 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
0134 gfs2_dinode_out(ip, dibh->b_data);
0135
0136 brelse(dibh);
0137
0138 return size;
0139 }
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152 static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf,
0153 u64 offset, unsigned int size)
0154 {
0155 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
0156 struct buffer_head *dibh;
0157 u64 lblock, dblock;
0158 u32 extlen = 0;
0159 unsigned int o;
0160 int copied = 0;
0161 int error = 0;
0162 bool new = false;
0163
0164 if (!size)
0165 return 0;
0166
0167 if (gfs2_is_stuffed(ip) && offset + size <= gfs2_max_stuffed_size(ip))
0168 return gfs2_dir_write_stuffed(ip, buf, (unsigned int)offset,
0169 size);
0170
0171 if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip)))
0172 return -EINVAL;
0173
0174 if (gfs2_is_stuffed(ip)) {
0175 error = gfs2_unstuff_dinode(ip);
0176 if (error)
0177 return error;
0178 }
0179
0180 lblock = offset;
0181 o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header);
0182
0183 while (copied < size) {
0184 unsigned int amount;
0185 struct buffer_head *bh;
0186
0187 amount = size - copied;
0188 if (amount > sdp->sd_sb.sb_bsize - o)
0189 amount = sdp->sd_sb.sb_bsize - o;
0190
0191 if (!extlen) {
0192 extlen = 1;
0193 error = gfs2_alloc_extent(&ip->i_inode, lblock, &dblock,
0194 &extlen, &new);
0195 if (error)
0196 goto fail;
0197 error = -EIO;
0198 if (gfs2_assert_withdraw(sdp, dblock))
0199 goto fail;
0200 }
0201
0202 if (amount == sdp->sd_jbsize || new)
0203 error = gfs2_dir_get_new_buffer(ip, dblock, &bh);
0204 else
0205 error = gfs2_dir_get_existing_buffer(ip, dblock, &bh);
0206
0207 if (error)
0208 goto fail;
0209
0210 gfs2_trans_add_meta(ip->i_gl, bh);
0211 memcpy(bh->b_data + o, buf, amount);
0212 brelse(bh);
0213
0214 buf += amount;
0215 copied += amount;
0216 lblock++;
0217 dblock++;
0218 extlen--;
0219
0220 o = sizeof(struct gfs2_meta_header);
0221 }
0222
0223 out:
0224 error = gfs2_meta_inode_buffer(ip, &dibh);
0225 if (error)
0226 return error;
0227
0228 if (ip->i_inode.i_size < offset + copied)
0229 i_size_write(&ip->i_inode, offset + copied);
0230 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
0231
0232 gfs2_trans_add_meta(ip->i_gl, dibh);
0233 gfs2_dinode_out(ip, dibh->b_data);
0234 brelse(dibh);
0235
0236 return copied;
0237 fail:
0238 if (copied)
0239 goto out;
0240 return error;
0241 }
0242
0243 static int gfs2_dir_read_stuffed(struct gfs2_inode *ip, __be64 *buf,
0244 unsigned int size)
0245 {
0246 struct buffer_head *dibh;
0247 int error;
0248
0249 error = gfs2_meta_inode_buffer(ip, &dibh);
0250 if (!error) {
0251 memcpy(buf, dibh->b_data + sizeof(struct gfs2_dinode), size);
0252 brelse(dibh);
0253 }
0254
0255 return (error) ? error : size;
0256 }
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267 static int gfs2_dir_read_data(struct gfs2_inode *ip, __be64 *buf,
0268 unsigned int size)
0269 {
0270 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
0271 u64 lblock, dblock;
0272 u32 extlen = 0;
0273 unsigned int o;
0274 int copied = 0;
0275 int error = 0;
0276
0277 if (gfs2_is_stuffed(ip))
0278 return gfs2_dir_read_stuffed(ip, buf, size);
0279
0280 if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip)))
0281 return -EINVAL;
0282
0283 lblock = 0;
0284 o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header);
0285
0286 while (copied < size) {
0287 unsigned int amount;
0288 struct buffer_head *bh;
0289
0290 amount = size - copied;
0291 if (amount > sdp->sd_sb.sb_bsize - o)
0292 amount = sdp->sd_sb.sb_bsize - o;
0293
0294 if (!extlen) {
0295 extlen = 32;
0296 error = gfs2_get_extent(&ip->i_inode, lblock,
0297 &dblock, &extlen);
0298 if (error || !dblock)
0299 goto fail;
0300 BUG_ON(extlen < 1);
0301 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
0302 } else {
0303 error = gfs2_meta_read(ip->i_gl, dblock, DIO_WAIT, 0, &bh);
0304 if (error)
0305 goto fail;
0306 }
0307 error = gfs2_metatype_check(sdp, bh, GFS2_METATYPE_JD);
0308 if (error) {
0309 brelse(bh);
0310 goto fail;
0311 }
0312 dblock++;
0313 extlen--;
0314 memcpy(buf, bh->b_data + o, amount);
0315 brelse(bh);
0316 buf += (amount/sizeof(__be64));
0317 copied += amount;
0318 lblock++;
0319 o = sizeof(struct gfs2_meta_header);
0320 }
0321
0322 return copied;
0323 fail:
0324 return (copied) ? copied : error;
0325 }
0326
0327
0328
0329
0330
0331
0332
0333
0334 static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip)
0335 {
0336 struct inode *inode = &ip->i_inode;
0337 int ret;
0338 u32 hsize;
0339 __be64 *hc;
0340
0341 BUG_ON(!(ip->i_diskflags & GFS2_DIF_EXHASH));
0342
0343 hc = ip->i_hash_cache;
0344 if (hc)
0345 return hc;
0346
0347 hsize = BIT(ip->i_depth);
0348 hsize *= sizeof(__be64);
0349 if (hsize != i_size_read(&ip->i_inode)) {
0350 gfs2_consist_inode(ip);
0351 return ERR_PTR(-EIO);
0352 }
0353
0354 hc = kmalloc(hsize, GFP_NOFS | __GFP_NOWARN);
0355 if (hc == NULL)
0356 hc = __vmalloc(hsize, GFP_NOFS);
0357
0358 if (hc == NULL)
0359 return ERR_PTR(-ENOMEM);
0360
0361 ret = gfs2_dir_read_data(ip, hc, hsize);
0362 if (ret < 0) {
0363 kvfree(hc);
0364 return ERR_PTR(ret);
0365 }
0366
0367 spin_lock(&inode->i_lock);
0368 if (likely(!ip->i_hash_cache)) {
0369 ip->i_hash_cache = hc;
0370 hc = NULL;
0371 }
0372 spin_unlock(&inode->i_lock);
0373 kvfree(hc);
0374
0375 return ip->i_hash_cache;
0376 }
0377
0378
0379
0380
0381
0382
0383
0384 void gfs2_dir_hash_inval(struct gfs2_inode *ip)
0385 {
0386 __be64 *hc;
0387
0388 spin_lock(&ip->i_inode.i_lock);
0389 hc = ip->i_hash_cache;
0390 ip->i_hash_cache = NULL;
0391 spin_unlock(&ip->i_inode.i_lock);
0392
0393 kvfree(hc);
0394 }
0395
0396 static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent)
0397 {
0398 return dent->de_inum.no_addr == 0 || dent->de_inum.no_formal_ino == 0;
0399 }
0400
0401 static inline int __gfs2_dirent_find(const struct gfs2_dirent *dent,
0402 const struct qstr *name, int ret)
0403 {
0404 if (!gfs2_dirent_sentinel(dent) &&
0405 be32_to_cpu(dent->de_hash) == name->hash &&
0406 be16_to_cpu(dent->de_name_len) == name->len &&
0407 memcmp(dent+1, name->name, name->len) == 0)
0408 return ret;
0409 return 0;
0410 }
0411
0412 static int gfs2_dirent_find(const struct gfs2_dirent *dent,
0413 const struct qstr *name,
0414 void *opaque)
0415 {
0416 return __gfs2_dirent_find(dent, name, 1);
0417 }
0418
0419 static int gfs2_dirent_prev(const struct gfs2_dirent *dent,
0420 const struct qstr *name,
0421 void *opaque)
0422 {
0423 return __gfs2_dirent_find(dent, name, 2);
0424 }
0425
0426
0427
0428
0429
0430 static int gfs2_dirent_last(const struct gfs2_dirent *dent,
0431 const struct qstr *name,
0432 void *opaque)
0433 {
0434 const char *start = name->name;
0435 const char *end = (const char *)dent + be16_to_cpu(dent->de_rec_len);
0436 if (name->len == (end - start))
0437 return 1;
0438 return 0;
0439 }
0440
0441
0442
0443 static int gfs2_dirent_find_offset(const struct gfs2_dirent *dent,
0444 const struct qstr *name,
0445 void *ptr)
0446 {
0447 unsigned required = GFS2_DIRENT_SIZE(name->len);
0448 unsigned actual = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
0449 unsigned totlen = be16_to_cpu(dent->de_rec_len);
0450
0451 if (ptr < (void *)dent || ptr >= (void *)dent + totlen)
0452 return 0;
0453 if (gfs2_dirent_sentinel(dent))
0454 actual = 0;
0455 if (ptr < (void *)dent + actual)
0456 return -1;
0457 if ((void *)dent + totlen >= ptr + required)
0458 return 1;
0459 return -1;
0460 }
0461
0462 static int gfs2_dirent_find_space(const struct gfs2_dirent *dent,
0463 const struct qstr *name,
0464 void *opaque)
0465 {
0466 unsigned required = GFS2_DIRENT_SIZE(name->len);
0467 unsigned actual = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
0468 unsigned totlen = be16_to_cpu(dent->de_rec_len);
0469
0470 if (gfs2_dirent_sentinel(dent))
0471 actual = 0;
0472 if (totlen - actual >= required)
0473 return 1;
0474 return 0;
0475 }
0476
0477 struct dirent_gather {
0478 const struct gfs2_dirent **pdent;
0479 unsigned offset;
0480 };
0481
0482 static int gfs2_dirent_gather(const struct gfs2_dirent *dent,
0483 const struct qstr *name,
0484 void *opaque)
0485 {
0486 struct dirent_gather *g = opaque;
0487 if (!gfs2_dirent_sentinel(dent)) {
0488 g->pdent[g->offset++] = dent;
0489 }
0490 return 0;
0491 }
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502 static int gfs2_check_dirent(struct gfs2_sbd *sdp,
0503 struct gfs2_dirent *dent, unsigned int offset,
0504 unsigned int size, unsigned int len, int first)
0505 {
0506 const char *msg = "gfs2_dirent too small";
0507 if (unlikely(size < sizeof(struct gfs2_dirent)))
0508 goto error;
0509 msg = "gfs2_dirent misaligned";
0510 if (unlikely(offset & 0x7))
0511 goto error;
0512 msg = "gfs2_dirent points beyond end of block";
0513 if (unlikely(offset + size > len))
0514 goto error;
0515 msg = "zero inode number";
0516 if (unlikely(!first && gfs2_dirent_sentinel(dent)))
0517 goto error;
0518 msg = "name length is greater than space in dirent";
0519 if (!gfs2_dirent_sentinel(dent) &&
0520 unlikely(sizeof(struct gfs2_dirent)+be16_to_cpu(dent->de_name_len) >
0521 size))
0522 goto error;
0523 return 0;
0524 error:
0525 fs_warn(sdp, "%s: %s (%s)\n",
0526 __func__, msg, first ? "first in block" : "not first in block");
0527 return -EIO;
0528 }
0529
0530 static int gfs2_dirent_offset(struct gfs2_sbd *sdp, const void *buf)
0531 {
0532 const struct gfs2_meta_header *h = buf;
0533 int offset;
0534
0535 BUG_ON(buf == NULL);
0536
0537 switch(be32_to_cpu(h->mh_type)) {
0538 case GFS2_METATYPE_LF:
0539 offset = sizeof(struct gfs2_leaf);
0540 break;
0541 case GFS2_METATYPE_DI:
0542 offset = sizeof(struct gfs2_dinode);
0543 break;
0544 default:
0545 goto wrong_type;
0546 }
0547 return offset;
0548 wrong_type:
0549 fs_warn(sdp, "%s: wrong block type %u\n", __func__,
0550 be32_to_cpu(h->mh_type));
0551 return -1;
0552 }
0553
0554 static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
0555 unsigned int len, gfs2_dscan_t scan,
0556 const struct qstr *name,
0557 void *opaque)
0558 {
0559 struct gfs2_dirent *dent, *prev;
0560 unsigned offset;
0561 unsigned size;
0562 int ret = 0;
0563
0564 ret = gfs2_dirent_offset(GFS2_SB(inode), buf);
0565 if (ret < 0)
0566 goto consist_inode;
0567
0568 offset = ret;
0569 prev = NULL;
0570 dent = buf + offset;
0571 size = be16_to_cpu(dent->de_rec_len);
0572 if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size, len, 1))
0573 goto consist_inode;
0574 do {
0575 ret = scan(dent, name, opaque);
0576 if (ret)
0577 break;
0578 offset += size;
0579 if (offset == len)
0580 break;
0581 prev = dent;
0582 dent = buf + offset;
0583 size = be16_to_cpu(dent->de_rec_len);
0584 if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size,
0585 len, 0))
0586 goto consist_inode;
0587 } while(1);
0588
0589 switch(ret) {
0590 case 0:
0591 return NULL;
0592 case 1:
0593 return dent;
0594 case 2:
0595 return prev ? prev : dent;
0596 default:
0597 BUG_ON(ret > 0);
0598 return ERR_PTR(ret);
0599 }
0600
0601 consist_inode:
0602 gfs2_consist_inode(GFS2_I(inode));
0603 return ERR_PTR(-EIO);
0604 }
0605
0606 static int dirent_check_reclen(struct gfs2_inode *dip,
0607 const struct gfs2_dirent *d, const void *end_p)
0608 {
0609 const void *ptr = d;
0610 u16 rec_len = be16_to_cpu(d->de_rec_len);
0611
0612 if (unlikely(rec_len < sizeof(struct gfs2_dirent)))
0613 goto broken;
0614 ptr += rec_len;
0615 if (ptr < end_p)
0616 return rec_len;
0617 if (ptr == end_p)
0618 return -ENOENT;
0619 broken:
0620 gfs2_consist_inode(dip);
0621 return -EIO;
0622 }
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633 static int dirent_next(struct gfs2_inode *dip, struct buffer_head *bh,
0634 struct gfs2_dirent **dent)
0635 {
0636 struct gfs2_dirent *cur = *dent, *tmp;
0637 char *bh_end = bh->b_data + bh->b_size;
0638 int ret;
0639
0640 ret = dirent_check_reclen(dip, cur, bh_end);
0641 if (ret < 0)
0642 return ret;
0643
0644 tmp = (void *)cur + ret;
0645 ret = dirent_check_reclen(dip, tmp, bh_end);
0646 if (ret == -EIO)
0647 return ret;
0648
0649
0650 if (gfs2_dirent_sentinel(tmp)) {
0651 gfs2_consist_inode(dip);
0652 return -EIO;
0653 }
0654
0655 *dent = tmp;
0656 return 0;
0657 }
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668 static void dirent_del(struct gfs2_inode *dip, struct buffer_head *bh,
0669 struct gfs2_dirent *prev, struct gfs2_dirent *cur)
0670 {
0671 u16 cur_rec_len, prev_rec_len;
0672
0673 if (gfs2_dirent_sentinel(cur)) {
0674 gfs2_consist_inode(dip);
0675 return;
0676 }
0677
0678 gfs2_trans_add_meta(dip->i_gl, bh);
0679
0680
0681
0682
0683
0684 if (!prev) {
0685 cur->de_inum.no_addr = 0;
0686 cur->de_inum.no_formal_ino = 0;
0687 return;
0688 }
0689
0690
0691
0692 prev_rec_len = be16_to_cpu(prev->de_rec_len);
0693 cur_rec_len = be16_to_cpu(cur->de_rec_len);
0694
0695 if ((char *)prev + prev_rec_len != (char *)cur)
0696 gfs2_consist_inode(dip);
0697 if ((char *)cur + cur_rec_len > bh->b_data + bh->b_size)
0698 gfs2_consist_inode(dip);
0699
0700 prev_rec_len += cur_rec_len;
0701 prev->de_rec_len = cpu_to_be16(prev_rec_len);
0702 }
0703
0704
0705 static struct gfs2_dirent *do_init_dirent(struct inode *inode,
0706 struct gfs2_dirent *dent,
0707 const struct qstr *name,
0708 struct buffer_head *bh,
0709 unsigned offset)
0710 {
0711 struct gfs2_inode *ip = GFS2_I(inode);
0712 struct gfs2_dirent *ndent;
0713 unsigned totlen;
0714
0715 totlen = be16_to_cpu(dent->de_rec_len);
0716 BUG_ON(offset + name->len > totlen);
0717 gfs2_trans_add_meta(ip->i_gl, bh);
0718 ndent = (struct gfs2_dirent *)((char *)dent + offset);
0719 dent->de_rec_len = cpu_to_be16(offset);
0720 gfs2_qstr2dirent(name, totlen - offset, ndent);
0721 return ndent;
0722 }
0723
0724
0725
0726
0727
0728
0729 static struct gfs2_dirent *gfs2_init_dirent(struct inode *inode,
0730 struct gfs2_dirent *dent,
0731 const struct qstr *name,
0732 struct buffer_head *bh)
0733 {
0734 unsigned offset = 0;
0735
0736 if (!gfs2_dirent_sentinel(dent))
0737 offset = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
0738 return do_init_dirent(inode, dent, name, bh, offset);
0739 }
0740
0741 static struct gfs2_dirent *gfs2_dirent_split_alloc(struct inode *inode,
0742 struct buffer_head *bh,
0743 const struct qstr *name,
0744 void *ptr)
0745 {
0746 struct gfs2_dirent *dent;
0747 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size,
0748 gfs2_dirent_find_offset, name, ptr);
0749 if (IS_ERR_OR_NULL(dent))
0750 return dent;
0751 return do_init_dirent(inode, dent, name, bh,
0752 (unsigned)(ptr - (void *)dent));
0753 }
0754
0755 static int get_leaf(struct gfs2_inode *dip, u64 leaf_no,
0756 struct buffer_head **bhp)
0757 {
0758 int error;
0759
0760 error = gfs2_meta_read(dip->i_gl, leaf_no, DIO_WAIT, 0, bhp);
0761 if (!error && gfs2_metatype_check(GFS2_SB(&dip->i_inode), *bhp, GFS2_METATYPE_LF)) {
0762
0763 error = -EIO;
0764 }
0765
0766 return error;
0767 }
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778 static int get_leaf_nr(struct gfs2_inode *dip, u32 index, u64 *leaf_out)
0779 {
0780 __be64 *hash;
0781 int error;
0782
0783 hash = gfs2_dir_get_hash_table(dip);
0784 error = PTR_ERR_OR_ZERO(hash);
0785
0786 if (!error)
0787 *leaf_out = be64_to_cpu(*(hash + index));
0788
0789 return error;
0790 }
0791
0792 static int get_first_leaf(struct gfs2_inode *dip, u32 index,
0793 struct buffer_head **bh_out)
0794 {
0795 u64 leaf_no;
0796 int error;
0797
0798 error = get_leaf_nr(dip, index, &leaf_no);
0799 if (!error)
0800 error = get_leaf(dip, leaf_no, bh_out);
0801
0802 return error;
0803 }
0804
0805 static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode,
0806 const struct qstr *name,
0807 gfs2_dscan_t scan,
0808 struct buffer_head **pbh)
0809 {
0810 struct buffer_head *bh;
0811 struct gfs2_dirent *dent;
0812 struct gfs2_inode *ip = GFS2_I(inode);
0813 int error;
0814
0815 if (ip->i_diskflags & GFS2_DIF_EXHASH) {
0816 struct gfs2_leaf *leaf;
0817 unsigned int hsize = BIT(ip->i_depth);
0818 unsigned int index;
0819 u64 ln;
0820 if (hsize * sizeof(u64) != i_size_read(inode)) {
0821 gfs2_consist_inode(ip);
0822 return ERR_PTR(-EIO);
0823 }
0824
0825 index = name->hash >> (32 - ip->i_depth);
0826 error = get_first_leaf(ip, index, &bh);
0827 if (error)
0828 return ERR_PTR(error);
0829 do {
0830 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size,
0831 scan, name, NULL);
0832 if (dent)
0833 goto got_dent;
0834 leaf = (struct gfs2_leaf *)bh->b_data;
0835 ln = be64_to_cpu(leaf->lf_next);
0836 brelse(bh);
0837 if (!ln)
0838 break;
0839
0840 error = get_leaf(ip, ln, &bh);
0841 } while(!error);
0842
0843 return error ? ERR_PTR(error) : NULL;
0844 }
0845
0846
0847 error = gfs2_meta_inode_buffer(ip, &bh);
0848 if (error)
0849 return ERR_PTR(error);
0850 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size, scan, name, NULL);
0851 got_dent:
0852 if (IS_ERR_OR_NULL(dent)) {
0853 brelse(bh);
0854 bh = NULL;
0855 }
0856 *pbh = bh;
0857 return dent;
0858 }
0859
0860 static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh, u16 depth)
0861 {
0862 struct gfs2_inode *ip = GFS2_I(inode);
0863 unsigned int n = 1;
0864 u64 bn;
0865 int error;
0866 struct buffer_head *bh;
0867 struct gfs2_leaf *leaf;
0868 struct gfs2_dirent *dent;
0869 struct timespec64 tv = current_time(inode);
0870
0871 error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
0872 if (error)
0873 return NULL;
0874 bh = gfs2_meta_new(ip->i_gl, bn);
0875 if (!bh)
0876 return NULL;
0877
0878 gfs2_trans_remove_revoke(GFS2_SB(inode), bn, 1);
0879 gfs2_trans_add_meta(ip->i_gl, bh);
0880 gfs2_metatype_set(bh, GFS2_METATYPE_LF, GFS2_FORMAT_LF);
0881 leaf = (struct gfs2_leaf *)bh->b_data;
0882 leaf->lf_depth = cpu_to_be16(depth);
0883 leaf->lf_entries = 0;
0884 leaf->lf_dirent_format = cpu_to_be32(GFS2_FORMAT_DE);
0885 leaf->lf_next = 0;
0886 leaf->lf_inode = cpu_to_be64(ip->i_no_addr);
0887 leaf->lf_dist = cpu_to_be32(1);
0888 leaf->lf_nsec = cpu_to_be32(tv.tv_nsec);
0889 leaf->lf_sec = cpu_to_be64(tv.tv_sec);
0890 memset(leaf->lf_reserved2, 0, sizeof(leaf->lf_reserved2));
0891 dent = (struct gfs2_dirent *)(leaf+1);
0892 gfs2_qstr2dirent(&empty_name, bh->b_size - sizeof(struct gfs2_leaf), dent);
0893 *pbh = bh;
0894 return leaf;
0895 }
0896
0897
0898
0899
0900
0901
0902
0903
0904 static int dir_make_exhash(struct inode *inode)
0905 {
0906 struct gfs2_inode *dip = GFS2_I(inode);
0907 struct gfs2_sbd *sdp = GFS2_SB(inode);
0908 struct gfs2_dirent *dent;
0909 struct qstr args;
0910 struct buffer_head *bh, *dibh;
0911 struct gfs2_leaf *leaf;
0912 int y;
0913 u32 x;
0914 __be64 *lp;
0915 u64 bn;
0916 int error;
0917
0918 error = gfs2_meta_inode_buffer(dip, &dibh);
0919 if (error)
0920 return error;
0921
0922
0923
0924 leaf = new_leaf(inode, &bh, 0);
0925 if (!leaf)
0926 return -ENOSPC;
0927 bn = bh->b_blocknr;
0928
0929 gfs2_assert(sdp, dip->i_entries < BIT(16));
0930 leaf->lf_entries = cpu_to_be16(dip->i_entries);
0931
0932
0933
0934 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_leaf), dibh,
0935 sizeof(struct gfs2_dinode));
0936
0937
0938
0939 x = 0;
0940 args.len = bh->b_size - sizeof(struct gfs2_dinode) +
0941 sizeof(struct gfs2_leaf);
0942 args.name = bh->b_data;
0943 dent = gfs2_dirent_scan(&dip->i_inode, bh->b_data, bh->b_size,
0944 gfs2_dirent_last, &args, NULL);
0945 if (!dent) {
0946 brelse(bh);
0947 brelse(dibh);
0948 return -EIO;
0949 }
0950 if (IS_ERR(dent)) {
0951 brelse(bh);
0952 brelse(dibh);
0953 return PTR_ERR(dent);
0954 }
0955
0956
0957
0958
0959 dent->de_rec_len = cpu_to_be16(be16_to_cpu(dent->de_rec_len) +
0960 sizeof(struct gfs2_dinode) -
0961 sizeof(struct gfs2_leaf));
0962
0963 brelse(bh);
0964
0965
0966
0967
0968 gfs2_trans_add_meta(dip->i_gl, dibh);
0969 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
0970
0971 lp = (__be64 *)(dibh->b_data + sizeof(struct gfs2_dinode));
0972
0973 for (x = sdp->sd_hash_ptrs; x--; lp++)
0974 *lp = cpu_to_be64(bn);
0975
0976 i_size_write(inode, sdp->sd_sb.sb_bsize / 2);
0977 gfs2_add_inode_blocks(&dip->i_inode, 1);
0978 dip->i_diskflags |= GFS2_DIF_EXHASH;
0979
0980 for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ;
0981 dip->i_depth = y;
0982
0983 gfs2_dinode_out(dip, dibh->b_data);
0984
0985 brelse(dibh);
0986
0987 return 0;
0988 }
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998 static int dir_split_leaf(struct inode *inode, const struct qstr *name)
0999 {
1000 struct gfs2_inode *dip = GFS2_I(inode);
1001 struct buffer_head *nbh, *obh, *dibh;
1002 struct gfs2_leaf *nleaf, *oleaf;
1003 struct gfs2_dirent *dent = NULL, *prev = NULL, *next = NULL, *new;
1004 u32 start, len, half_len, divider;
1005 u64 bn, leaf_no;
1006 __be64 *lp;
1007 u32 index;
1008 int x;
1009 int error;
1010
1011 index = name->hash >> (32 - dip->i_depth);
1012 error = get_leaf_nr(dip, index, &leaf_no);
1013 if (error)
1014 return error;
1015
1016
1017 error = get_leaf(dip, leaf_no, &obh);
1018 if (error)
1019 return error;
1020
1021 oleaf = (struct gfs2_leaf *)obh->b_data;
1022 if (dip->i_depth == be16_to_cpu(oleaf->lf_depth)) {
1023 brelse(obh);
1024 return 1;
1025 }
1026
1027 gfs2_trans_add_meta(dip->i_gl, obh);
1028
1029 nleaf = new_leaf(inode, &nbh, be16_to_cpu(oleaf->lf_depth) + 1);
1030 if (!nleaf) {
1031 brelse(obh);
1032 return -ENOSPC;
1033 }
1034 bn = nbh->b_blocknr;
1035
1036
1037 len = BIT(dip->i_depth - be16_to_cpu(oleaf->lf_depth));
1038 half_len = len >> 1;
1039 if (!half_len) {
1040 fs_warn(GFS2_SB(inode), "i_depth %u lf_depth %u index %u\n",
1041 dip->i_depth, be16_to_cpu(oleaf->lf_depth), index);
1042 gfs2_consist_inode(dip);
1043 error = -EIO;
1044 goto fail_brelse;
1045 }
1046
1047 start = (index & ~(len - 1));
1048
1049
1050
1051
1052 lp = kmalloc_array(half_len, sizeof(__be64), GFP_NOFS);
1053 if (!lp) {
1054 error = -ENOMEM;
1055 goto fail_brelse;
1056 }
1057
1058
1059 for (x = 0; x < half_len; x++)
1060 lp[x] = cpu_to_be64(bn);
1061
1062 gfs2_dir_hash_inval(dip);
1063
1064 error = gfs2_dir_write_data(dip, (char *)lp, start * sizeof(u64),
1065 half_len * sizeof(u64));
1066 if (error != half_len * sizeof(u64)) {
1067 if (error >= 0)
1068 error = -EIO;
1069 goto fail_lpfree;
1070 }
1071
1072 kfree(lp);
1073
1074
1075 divider = (start + half_len) << (32 - dip->i_depth);
1076
1077
1078 dent = (struct gfs2_dirent *)(obh->b_data + sizeof(struct gfs2_leaf));
1079
1080 do {
1081 next = dent;
1082 if (dirent_next(dip, obh, &next))
1083 next = NULL;
1084
1085 if (!gfs2_dirent_sentinel(dent) &&
1086 be32_to_cpu(dent->de_hash) < divider) {
1087 struct qstr str;
1088 void *ptr = ((char *)dent - obh->b_data) + nbh->b_data;
1089 str.name = (char*)(dent+1);
1090 str.len = be16_to_cpu(dent->de_name_len);
1091 str.hash = be32_to_cpu(dent->de_hash);
1092 new = gfs2_dirent_split_alloc(inode, nbh, &str, ptr);
1093 if (IS_ERR(new)) {
1094 error = PTR_ERR(new);
1095 break;
1096 }
1097
1098 new->de_inum = dent->de_inum;
1099 new->de_type = dent->de_type;
1100 be16_add_cpu(&nleaf->lf_entries, 1);
1101
1102 dirent_del(dip, obh, prev, dent);
1103
1104 if (!oleaf->lf_entries)
1105 gfs2_consist_inode(dip);
1106 be16_add_cpu(&oleaf->lf_entries, -1);
1107
1108 if (!prev)
1109 prev = dent;
1110 } else {
1111 prev = dent;
1112 }
1113 dent = next;
1114 } while (dent);
1115
1116 oleaf->lf_depth = nleaf->lf_depth;
1117
1118 error = gfs2_meta_inode_buffer(dip, &dibh);
1119 if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) {
1120 gfs2_trans_add_meta(dip->i_gl, dibh);
1121 gfs2_add_inode_blocks(&dip->i_inode, 1);
1122 gfs2_dinode_out(dip, dibh->b_data);
1123 brelse(dibh);
1124 }
1125
1126 brelse(obh);
1127 brelse(nbh);
1128
1129 return error;
1130
1131 fail_lpfree:
1132 kfree(lp);
1133
1134 fail_brelse:
1135 brelse(obh);
1136 brelse(nbh);
1137 return error;
1138 }
1139
1140
1141
1142
1143
1144
1145
1146
1147 static int dir_double_exhash(struct gfs2_inode *dip)
1148 {
1149 struct buffer_head *dibh;
1150 u32 hsize;
1151 u32 hsize_bytes;
1152 __be64 *hc;
1153 __be64 *hc2, *h;
1154 int x;
1155 int error = 0;
1156
1157 hsize = BIT(dip->i_depth);
1158 hsize_bytes = hsize * sizeof(__be64);
1159
1160 hc = gfs2_dir_get_hash_table(dip);
1161 if (IS_ERR(hc))
1162 return PTR_ERR(hc);
1163
1164 hc2 = kmalloc_array(hsize_bytes, 2, GFP_NOFS | __GFP_NOWARN);
1165 if (hc2 == NULL)
1166 hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS);
1167
1168 if (!hc2)
1169 return -ENOMEM;
1170
1171 h = hc2;
1172 error = gfs2_meta_inode_buffer(dip, &dibh);
1173 if (error)
1174 goto out_kfree;
1175
1176 for (x = 0; x < hsize; x++) {
1177 *h++ = *hc;
1178 *h++ = *hc;
1179 hc++;
1180 }
1181
1182 error = gfs2_dir_write_data(dip, (char *)hc2, 0, hsize_bytes * 2);
1183 if (error != (hsize_bytes * 2))
1184 goto fail;
1185
1186 gfs2_dir_hash_inval(dip);
1187 dip->i_hash_cache = hc2;
1188 dip->i_depth++;
1189 gfs2_dinode_out(dip, dibh->b_data);
1190 brelse(dibh);
1191 return 0;
1192
1193 fail:
1194
1195 gfs2_dir_write_data(dip, (char *)hc, 0, hsize_bytes);
1196 i_size_write(&dip->i_inode, hsize_bytes);
1197 gfs2_dinode_out(dip, dibh->b_data);
1198 brelse(dibh);
1199 out_kfree:
1200 kvfree(hc2);
1201 return error;
1202 }
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215 static int compare_dents(const void *a, const void *b)
1216 {
1217 const struct gfs2_dirent *dent_a, *dent_b;
1218 u32 hash_a, hash_b;
1219 int ret = 0;
1220
1221 dent_a = *(const struct gfs2_dirent **)a;
1222 hash_a = dent_a->de_cookie;
1223
1224 dent_b = *(const struct gfs2_dirent **)b;
1225 hash_b = dent_b->de_cookie;
1226
1227 if (hash_a > hash_b)
1228 ret = 1;
1229 else if (hash_a < hash_b)
1230 ret = -1;
1231 else {
1232 unsigned int len_a = be16_to_cpu(dent_a->de_name_len);
1233 unsigned int len_b = be16_to_cpu(dent_b->de_name_len);
1234
1235 if (len_a > len_b)
1236 ret = 1;
1237 else if (len_a < len_b)
1238 ret = -1;
1239 else
1240 ret = memcmp(dent_a + 1, dent_b + 1, len_a);
1241 }
1242
1243 return ret;
1244 }
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263 static int do_filldir_main(struct gfs2_inode *dip, struct dir_context *ctx,
1264 struct gfs2_dirent **darr, u32 entries,
1265 u32 sort_start, int *copied)
1266 {
1267 const struct gfs2_dirent *dent, *dent_next;
1268 u64 off, off_next;
1269 unsigned int x, y;
1270 int run = 0;
1271
1272 if (sort_start < entries)
1273 sort(&darr[sort_start], entries - sort_start,
1274 sizeof(struct gfs2_dirent *), compare_dents, NULL);
1275
1276 dent_next = darr[0];
1277 off_next = dent_next->de_cookie;
1278
1279 for (x = 0, y = 1; x < entries; x++, y++) {
1280 dent = dent_next;
1281 off = off_next;
1282
1283 if (y < entries) {
1284 dent_next = darr[y];
1285 off_next = dent_next->de_cookie;
1286
1287 if (off < ctx->pos)
1288 continue;
1289 ctx->pos = off;
1290
1291 if (off_next == off) {
1292 if (*copied && !run)
1293 return 1;
1294 run = 1;
1295 } else
1296 run = 0;
1297 } else {
1298 if (off < ctx->pos)
1299 continue;
1300 ctx->pos = off;
1301 }
1302
1303 if (!dir_emit(ctx, (const char *)(dent + 1),
1304 be16_to_cpu(dent->de_name_len),
1305 be64_to_cpu(dent->de_inum.no_addr),
1306 be16_to_cpu(dent->de_type)))
1307 return 1;
1308
1309 *copied = 1;
1310 }
1311
1312
1313
1314
1315
1316 ctx->pos++;
1317
1318 return 0;
1319 }
1320
1321 static void *gfs2_alloc_sort_buffer(unsigned size)
1322 {
1323 void *ptr = NULL;
1324
1325 if (size < KMALLOC_MAX_SIZE)
1326 ptr = kmalloc(size, GFP_NOFS | __GFP_NOWARN);
1327 if (!ptr)
1328 ptr = __vmalloc(size, GFP_NOFS);
1329 return ptr;
1330 }
1331
1332
1333 static int gfs2_set_cookies(struct gfs2_sbd *sdp, struct buffer_head *bh,
1334 unsigned leaf_nr, struct gfs2_dirent **darr,
1335 unsigned entries)
1336 {
1337 int sort_id = -1;
1338 int i;
1339
1340 for (i = 0; i < entries; i++) {
1341 unsigned offset;
1342
1343 darr[i]->de_cookie = be32_to_cpu(darr[i]->de_hash);
1344 darr[i]->de_cookie = gfs2_disk_hash2offset(darr[i]->de_cookie);
1345
1346 if (!sdp->sd_args.ar_loccookie)
1347 continue;
1348 offset = (char *)(darr[i]) -
1349 (bh->b_data + gfs2_dirent_offset(sdp, bh->b_data));
1350 offset /= GFS2_MIN_DIRENT_SIZE;
1351 offset += leaf_nr * sdp->sd_max_dents_per_leaf;
1352 if (offset >= GFS2_USE_HASH_FLAG ||
1353 leaf_nr >= GFS2_USE_HASH_FLAG) {
1354 darr[i]->de_cookie |= GFS2_USE_HASH_FLAG;
1355 if (sort_id < 0)
1356 sort_id = i;
1357 continue;
1358 }
1359 darr[i]->de_cookie &= GFS2_HASH_INDEX_MASK;
1360 darr[i]->de_cookie |= offset;
1361 }
1362 return sort_id;
1363 }
1364
1365
1366 static int gfs2_dir_read_leaf(struct inode *inode, struct dir_context *ctx,
1367 int *copied, unsigned *depth,
1368 u64 leaf_no)
1369 {
1370 struct gfs2_inode *ip = GFS2_I(inode);
1371 struct gfs2_sbd *sdp = GFS2_SB(inode);
1372 struct buffer_head *bh;
1373 struct gfs2_leaf *lf;
1374 unsigned entries = 0, entries2 = 0;
1375 unsigned leaves = 0, leaf = 0, offset, sort_offset;
1376 struct gfs2_dirent **darr, *dent;
1377 struct dirent_gather g;
1378 struct buffer_head **larr;
1379 int error, i, need_sort = 0, sort_id;
1380 u64 lfn = leaf_no;
1381
1382 do {
1383 error = get_leaf(ip, lfn, &bh);
1384 if (error)
1385 goto out;
1386 lf = (struct gfs2_leaf *)bh->b_data;
1387 if (leaves == 0)
1388 *depth = be16_to_cpu(lf->lf_depth);
1389 entries += be16_to_cpu(lf->lf_entries);
1390 leaves++;
1391 lfn = be64_to_cpu(lf->lf_next);
1392 brelse(bh);
1393 } while(lfn);
1394
1395 if (*depth < GFS2_DIR_MAX_DEPTH || !sdp->sd_args.ar_loccookie) {
1396 need_sort = 1;
1397 sort_offset = 0;
1398 }
1399
1400 if (!entries)
1401 return 0;
1402
1403 error = -ENOMEM;
1404
1405
1406
1407
1408
1409
1410 larr = gfs2_alloc_sort_buffer((leaves + entries + 99) * sizeof(void *));
1411 if (!larr)
1412 goto out;
1413 darr = (struct gfs2_dirent **)(larr + leaves);
1414 g.pdent = (const struct gfs2_dirent **)darr;
1415 g.offset = 0;
1416 lfn = leaf_no;
1417
1418 do {
1419 error = get_leaf(ip, lfn, &bh);
1420 if (error)
1421 goto out_free;
1422 lf = (struct gfs2_leaf *)bh->b_data;
1423 lfn = be64_to_cpu(lf->lf_next);
1424 if (lf->lf_entries) {
1425 offset = g.offset;
1426 entries2 += be16_to_cpu(lf->lf_entries);
1427 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size,
1428 gfs2_dirent_gather, NULL, &g);
1429 error = PTR_ERR(dent);
1430 if (IS_ERR(dent))
1431 goto out_free;
1432 if (entries2 != g.offset) {
1433 fs_warn(sdp, "Number of entries corrupt in dir "
1434 "leaf %llu, entries2 (%u) != "
1435 "g.offset (%u)\n",
1436 (unsigned long long)bh->b_blocknr,
1437 entries2, g.offset);
1438 gfs2_consist_inode(ip);
1439 error = -EIO;
1440 goto out_free;
1441 }
1442 error = 0;
1443 sort_id = gfs2_set_cookies(sdp, bh, leaf, &darr[offset],
1444 be16_to_cpu(lf->lf_entries));
1445 if (!need_sort && sort_id >= 0) {
1446 need_sort = 1;
1447 sort_offset = offset + sort_id;
1448 }
1449 larr[leaf++] = bh;
1450 } else {
1451 larr[leaf++] = NULL;
1452 brelse(bh);
1453 }
1454 } while(lfn);
1455
1456 BUG_ON(entries2 != entries);
1457 error = do_filldir_main(ip, ctx, darr, entries, need_sort ?
1458 sort_offset : entries, copied);
1459 out_free:
1460 for(i = 0; i < leaf; i++)
1461 brelse(larr[i]);
1462 kvfree(larr);
1463 out:
1464 return error;
1465 }
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479 static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index,
1480 struct file_ra_state *f_ra)
1481 {
1482 struct gfs2_inode *ip = GFS2_I(inode);
1483 struct gfs2_glock *gl = ip->i_gl;
1484 struct buffer_head *bh;
1485 u64 blocknr = 0, last;
1486 unsigned count;
1487
1488
1489 if (index + MAX_RA_BLOCKS < f_ra->start)
1490 return;
1491
1492 f_ra->start = max((pgoff_t)index, f_ra->start);
1493 for (count = 0; count < MAX_RA_BLOCKS; count++) {
1494 if (f_ra->start >= hsize)
1495 break;
1496
1497 last = blocknr;
1498 blocknr = be64_to_cpu(ip->i_hash_cache[f_ra->start]);
1499 f_ra->start++;
1500 if (blocknr == last)
1501 continue;
1502
1503 bh = gfs2_getbuf(gl, blocknr, 1);
1504 if (trylock_buffer(bh)) {
1505 if (buffer_uptodate(bh)) {
1506 unlock_buffer(bh);
1507 brelse(bh);
1508 continue;
1509 }
1510 bh->b_end_io = end_buffer_read_sync;
1511 submit_bh(REQ_OP_READ | REQ_RAHEAD | REQ_META |
1512 REQ_PRIO, bh);
1513 continue;
1514 }
1515 brelse(bh);
1516 }
1517 }
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528 static int dir_e_read(struct inode *inode, struct dir_context *ctx,
1529 struct file_ra_state *f_ra)
1530 {
1531 struct gfs2_inode *dip = GFS2_I(inode);
1532 u32 hsize, len = 0;
1533 u32 hash, index;
1534 __be64 *lp;
1535 int copied = 0;
1536 int error = 0;
1537 unsigned depth = 0;
1538
1539 hsize = BIT(dip->i_depth);
1540 hash = gfs2_dir_offset2hash(ctx->pos);
1541 index = hash >> (32 - dip->i_depth);
1542
1543 if (dip->i_hash_cache == NULL)
1544 f_ra->start = 0;
1545 lp = gfs2_dir_get_hash_table(dip);
1546 if (IS_ERR(lp))
1547 return PTR_ERR(lp);
1548
1549 gfs2_dir_readahead(inode, hsize, index, f_ra);
1550
1551 while (index < hsize) {
1552 error = gfs2_dir_read_leaf(inode, ctx,
1553 &copied, &depth,
1554 be64_to_cpu(lp[index]));
1555 if (error)
1556 break;
1557
1558 len = BIT(dip->i_depth - depth);
1559 index = (index & ~(len - 1)) + len;
1560 }
1561
1562 if (error > 0)
1563 error = 0;
1564 return error;
1565 }
1566
1567 int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
1568 struct file_ra_state *f_ra)
1569 {
1570 struct gfs2_inode *dip = GFS2_I(inode);
1571 struct gfs2_sbd *sdp = GFS2_SB(inode);
1572 struct dirent_gather g;
1573 struct gfs2_dirent **darr, *dent;
1574 struct buffer_head *dibh;
1575 int copied = 0;
1576 int error;
1577
1578 if (!dip->i_entries)
1579 return 0;
1580
1581 if (dip->i_diskflags & GFS2_DIF_EXHASH)
1582 return dir_e_read(inode, ctx, f_ra);
1583
1584 if (!gfs2_is_stuffed(dip)) {
1585 gfs2_consist_inode(dip);
1586 return -EIO;
1587 }
1588
1589 error = gfs2_meta_inode_buffer(dip, &dibh);
1590 if (error)
1591 return error;
1592
1593 error = -ENOMEM;
1594
1595 darr = kmalloc_array(96, sizeof(struct gfs2_dirent *), GFP_NOFS);
1596 if (darr) {
1597 g.pdent = (const struct gfs2_dirent **)darr;
1598 g.offset = 0;
1599 dent = gfs2_dirent_scan(inode, dibh->b_data, dibh->b_size,
1600 gfs2_dirent_gather, NULL, &g);
1601 if (IS_ERR(dent)) {
1602 error = PTR_ERR(dent);
1603 goto out;
1604 }
1605 if (dip->i_entries != g.offset) {
1606 fs_warn(sdp, "Number of entries corrupt in dir %llu, "
1607 "ip->i_entries (%u) != g.offset (%u)\n",
1608 (unsigned long long)dip->i_no_addr,
1609 dip->i_entries,
1610 g.offset);
1611 gfs2_consist_inode(dip);
1612 error = -EIO;
1613 goto out;
1614 }
1615 gfs2_set_cookies(sdp, dibh, 0, darr, dip->i_entries);
1616 error = do_filldir_main(dip, ctx, darr,
1617 dip->i_entries, 0, &copied);
1618 out:
1619 kfree(darr);
1620 }
1621
1622 if (error > 0)
1623 error = 0;
1624
1625 brelse(dibh);
1626
1627 return error;
1628 }
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642 struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name,
1643 bool fail_on_exist)
1644 {
1645 struct buffer_head *bh;
1646 struct gfs2_dirent *dent;
1647 u64 addr, formal_ino;
1648 u16 dtype;
1649
1650 dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh);
1651 if (dent) {
1652 struct inode *inode;
1653 u16 rahead;
1654
1655 if (IS_ERR(dent))
1656 return ERR_CAST(dent);
1657 dtype = be16_to_cpu(dent->de_type);
1658 rahead = be16_to_cpu(dent->de_rahead);
1659 addr = be64_to_cpu(dent->de_inum.no_addr);
1660 formal_ino = be64_to_cpu(dent->de_inum.no_formal_ino);
1661 brelse(bh);
1662 if (fail_on_exist)
1663 return ERR_PTR(-EEXIST);
1664 inode = gfs2_inode_lookup(dir->i_sb, dtype, addr, formal_ino,
1665 GFS2_BLKST_FREE );
1666 if (!IS_ERR(inode))
1667 GFS2_I(inode)->i_rahead = rahead;
1668 return inode;
1669 }
1670 return ERR_PTR(-ENOENT);
1671 }
1672
1673 int gfs2_dir_check(struct inode *dir, const struct qstr *name,
1674 const struct gfs2_inode *ip)
1675 {
1676 struct buffer_head *bh;
1677 struct gfs2_dirent *dent;
1678 int ret = -ENOENT;
1679
1680 dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh);
1681 if (dent) {
1682 if (IS_ERR(dent))
1683 return PTR_ERR(dent);
1684 if (ip) {
1685 if (be64_to_cpu(dent->de_inum.no_addr) != ip->i_no_addr)
1686 goto out;
1687 if (be64_to_cpu(dent->de_inum.no_formal_ino) !=
1688 ip->i_no_formal_ino)
1689 goto out;
1690 if (unlikely(IF2DT(ip->i_inode.i_mode) !=
1691 be16_to_cpu(dent->de_type))) {
1692 gfs2_consist_inode(GFS2_I(dir));
1693 ret = -EIO;
1694 goto out;
1695 }
1696 }
1697 ret = 0;
1698 out:
1699 brelse(bh);
1700 }
1701 return ret;
1702 }
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723 static int dir_new_leaf(struct inode *inode, const struct qstr *name)
1724 {
1725 struct buffer_head *bh, *obh;
1726 struct gfs2_inode *ip = GFS2_I(inode);
1727 struct gfs2_leaf *leaf, *oleaf;
1728 u32 dist = 1;
1729 int error;
1730 u32 index;
1731 u64 bn;
1732
1733 index = name->hash >> (32 - ip->i_depth);
1734 error = get_first_leaf(ip, index, &obh);
1735 if (error)
1736 return error;
1737 do {
1738 dist++;
1739 oleaf = (struct gfs2_leaf *)obh->b_data;
1740 bn = be64_to_cpu(oleaf->lf_next);
1741 if (!bn)
1742 break;
1743 brelse(obh);
1744 error = get_leaf(ip, bn, &obh);
1745 if (error)
1746 return error;
1747 } while(1);
1748
1749 gfs2_trans_add_meta(ip->i_gl, obh);
1750
1751 leaf = new_leaf(inode, &bh, be16_to_cpu(oleaf->lf_depth));
1752 if (!leaf) {
1753 brelse(obh);
1754 return -ENOSPC;
1755 }
1756 leaf->lf_dist = cpu_to_be32(dist);
1757 oleaf->lf_next = cpu_to_be64(bh->b_blocknr);
1758 brelse(bh);
1759 brelse(obh);
1760
1761 error = gfs2_meta_inode_buffer(ip, &bh);
1762 if (error)
1763 return error;
1764 gfs2_trans_add_meta(ip->i_gl, bh);
1765 gfs2_add_inode_blocks(&ip->i_inode, 1);
1766 gfs2_dinode_out(ip, bh->b_data);
1767 brelse(bh);
1768 return 0;
1769 }
1770
1771 static u16 gfs2_inode_ra_len(const struct gfs2_inode *ip)
1772 {
1773 u64 where = ip->i_no_addr + 1;
1774 if (ip->i_eattr == where)
1775 return 1;
1776 return 0;
1777 }
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795 int gfs2_dir_add(struct inode *inode, const struct qstr *name,
1796 const struct gfs2_inode *nip, struct gfs2_diradd *da)
1797 {
1798 struct gfs2_inode *ip = GFS2_I(inode);
1799 struct buffer_head *bh = da->bh;
1800 struct gfs2_dirent *dent = da->dent;
1801 struct timespec64 tv;
1802 struct gfs2_leaf *leaf;
1803 int error;
1804
1805 while(1) {
1806 if (da->bh == NULL) {
1807 dent = gfs2_dirent_search(inode, name,
1808 gfs2_dirent_find_space, &bh);
1809 }
1810 if (dent) {
1811 if (IS_ERR(dent))
1812 return PTR_ERR(dent);
1813 dent = gfs2_init_dirent(inode, dent, name, bh);
1814 gfs2_inum_out(nip, dent);
1815 dent->de_type = cpu_to_be16(IF2DT(nip->i_inode.i_mode));
1816 dent->de_rahead = cpu_to_be16(gfs2_inode_ra_len(nip));
1817 tv = current_time(&ip->i_inode);
1818 if (ip->i_diskflags & GFS2_DIF_EXHASH) {
1819 leaf = (struct gfs2_leaf *)bh->b_data;
1820 be16_add_cpu(&leaf->lf_entries, 1);
1821 leaf->lf_nsec = cpu_to_be32(tv.tv_nsec);
1822 leaf->lf_sec = cpu_to_be64(tv.tv_sec);
1823 }
1824 da->dent = NULL;
1825 da->bh = NULL;
1826 brelse(bh);
1827 ip->i_entries++;
1828 ip->i_inode.i_mtime = ip->i_inode.i_ctime = tv;
1829 if (S_ISDIR(nip->i_inode.i_mode))
1830 inc_nlink(&ip->i_inode);
1831 mark_inode_dirty(inode);
1832 error = 0;
1833 break;
1834 }
1835 if (!(ip->i_diskflags & GFS2_DIF_EXHASH)) {
1836 error = dir_make_exhash(inode);
1837 if (error)
1838 break;
1839 continue;
1840 }
1841 error = dir_split_leaf(inode, name);
1842 if (error == 0)
1843 continue;
1844 if (error < 0)
1845 break;
1846 if (ip->i_depth < GFS2_DIR_MAX_DEPTH) {
1847 error = dir_double_exhash(ip);
1848 if (error)
1849 break;
1850 error = dir_split_leaf(inode, name);
1851 if (error < 0)
1852 break;
1853 if (error == 0)
1854 continue;
1855 }
1856 error = dir_new_leaf(inode, name);
1857 if (!error)
1858 continue;
1859 error = -ENOSPC;
1860 break;
1861 }
1862 return error;
1863 }
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874 int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
1875 {
1876 const struct qstr *name = &dentry->d_name;
1877 struct gfs2_dirent *dent, *prev = NULL;
1878 struct buffer_head *bh;
1879 struct timespec64 tv = current_time(&dip->i_inode);
1880
1881
1882
1883 dent = gfs2_dirent_search(&dip->i_inode, name, gfs2_dirent_prev, &bh);
1884 if (!dent) {
1885 gfs2_consist_inode(dip);
1886 return -EIO;
1887 }
1888 if (IS_ERR(dent)) {
1889 gfs2_consist_inode(dip);
1890 return PTR_ERR(dent);
1891 }
1892
1893 if (gfs2_dirent_find(dent, name, NULL) == 0) {
1894 prev = dent;
1895 dent = (struct gfs2_dirent *)((char *)dent + be16_to_cpu(prev->de_rec_len));
1896 }
1897
1898 dirent_del(dip, bh, prev, dent);
1899 if (dip->i_diskflags & GFS2_DIF_EXHASH) {
1900 struct gfs2_leaf *leaf = (struct gfs2_leaf *)bh->b_data;
1901 u16 entries = be16_to_cpu(leaf->lf_entries);
1902 if (!entries)
1903 gfs2_consist_inode(dip);
1904 leaf->lf_entries = cpu_to_be16(--entries);
1905 leaf->lf_nsec = cpu_to_be32(tv.tv_nsec);
1906 leaf->lf_sec = cpu_to_be64(tv.tv_sec);
1907 }
1908 brelse(bh);
1909
1910 if (!dip->i_entries)
1911 gfs2_consist_inode(dip);
1912 dip->i_entries--;
1913 dip->i_inode.i_mtime = dip->i_inode.i_ctime = tv;
1914 if (d_is_dir(dentry))
1915 drop_nlink(&dip->i_inode);
1916 mark_inode_dirty(&dip->i_inode);
1917
1918 return 0;
1919 }
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935 int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
1936 const struct gfs2_inode *nip, unsigned int new_type)
1937 {
1938 struct buffer_head *bh;
1939 struct gfs2_dirent *dent;
1940
1941 dent = gfs2_dirent_search(&dip->i_inode, filename, gfs2_dirent_find, &bh);
1942 if (!dent) {
1943 gfs2_consist_inode(dip);
1944 return -EIO;
1945 }
1946 if (IS_ERR(dent))
1947 return PTR_ERR(dent);
1948
1949 gfs2_trans_add_meta(dip->i_gl, bh);
1950 gfs2_inum_out(nip, dent);
1951 dent->de_type = cpu_to_be16(new_type);
1952 brelse(bh);
1953
1954 dip->i_inode.i_mtime = dip->i_inode.i_ctime = current_time(&dip->i_inode);
1955 mark_inode_dirty_sync(&dip->i_inode);
1956 return 0;
1957 }
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971 static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
1972 u64 leaf_no, struct buffer_head *leaf_bh,
1973 int last_dealloc)
1974 {
1975 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
1976 struct gfs2_leaf *tmp_leaf;
1977 struct gfs2_rgrp_list rlist;
1978 struct buffer_head *bh, *dibh;
1979 u64 blk, nblk;
1980 unsigned int rg_blocks = 0, l_blocks = 0;
1981 char *ht;
1982 unsigned int x, size = len * sizeof(u64);
1983 int error;
1984
1985 error = gfs2_rindex_update(sdp);
1986 if (error)
1987 return error;
1988
1989 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1990
1991 ht = kzalloc(size, GFP_NOFS | __GFP_NOWARN);
1992 if (ht == NULL)
1993 ht = __vmalloc(size, GFP_NOFS | __GFP_NOWARN | __GFP_ZERO);
1994 if (!ht)
1995 return -ENOMEM;
1996
1997 error = gfs2_quota_hold(dip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1998 if (error)
1999 goto out;
2000
2001
2002 bh = leaf_bh;
2003
2004 for (blk = leaf_no; blk; blk = nblk) {
2005 if (blk != leaf_no) {
2006 error = get_leaf(dip, blk, &bh);
2007 if (error)
2008 goto out_rlist;
2009 }
2010 tmp_leaf = (struct gfs2_leaf *)bh->b_data;
2011 nblk = be64_to_cpu(tmp_leaf->lf_next);
2012 if (blk != leaf_no)
2013 brelse(bh);
2014
2015 gfs2_rlist_add(dip, &rlist, blk);
2016 l_blocks++;
2017 }
2018
2019 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE);
2020
2021 for (x = 0; x < rlist.rl_rgrps; x++) {
2022 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
2023
2024 rg_blocks += rgd->rd_length;
2025 }
2026
2027 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
2028 if (error)
2029 goto out_rlist;
2030
2031 error = gfs2_trans_begin(sdp,
2032 rg_blocks + (DIV_ROUND_UP(size, sdp->sd_jbsize) + 1) +
2033 RES_DINODE + RES_STATFS + RES_QUOTA, RES_DINODE +
2034 l_blocks);
2035 if (error)
2036 goto out_rg_gunlock;
2037
2038 bh = leaf_bh;
2039
2040 for (blk = leaf_no; blk; blk = nblk) {
2041 struct gfs2_rgrpd *rgd;
2042
2043 if (blk != leaf_no) {
2044 error = get_leaf(dip, blk, &bh);
2045 if (error)
2046 goto out_end_trans;
2047 }
2048 tmp_leaf = (struct gfs2_leaf *)bh->b_data;
2049 nblk = be64_to_cpu(tmp_leaf->lf_next);
2050 if (blk != leaf_no)
2051 brelse(bh);
2052
2053 rgd = gfs2_blk2rgrpd(sdp, blk, true);
2054 gfs2_free_meta(dip, rgd, blk, 1);
2055 gfs2_add_inode_blocks(&dip->i_inode, -1);
2056 }
2057
2058 error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size);
2059 if (error != size) {
2060 if (error >= 0)
2061 error = -EIO;
2062 goto out_end_trans;
2063 }
2064
2065 error = gfs2_meta_inode_buffer(dip, &dibh);
2066 if (error)
2067 goto out_end_trans;
2068
2069 gfs2_trans_add_meta(dip->i_gl, dibh);
2070
2071
2072 if (last_dealloc)
2073 dip->i_inode.i_mode = S_IFREG;
2074 gfs2_dinode_out(dip, dibh->b_data);
2075 brelse(dibh);
2076
2077 out_end_trans:
2078 gfs2_trans_end(sdp);
2079 out_rg_gunlock:
2080 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
2081 out_rlist:
2082 gfs2_rlist_free(&rlist);
2083 gfs2_quota_unhold(dip);
2084 out:
2085 kvfree(ht);
2086 return error;
2087 }
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099 int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip)
2100 {
2101 struct buffer_head *bh;
2102 struct gfs2_leaf *leaf;
2103 u32 hsize, len;
2104 u32 index = 0, next_index;
2105 __be64 *lp;
2106 u64 leaf_no;
2107 int error = 0, last;
2108
2109 hsize = BIT(dip->i_depth);
2110
2111 lp = gfs2_dir_get_hash_table(dip);
2112 if (IS_ERR(lp))
2113 return PTR_ERR(lp);
2114
2115 while (index < hsize) {
2116 leaf_no = be64_to_cpu(lp[index]);
2117 if (leaf_no) {
2118 error = get_leaf(dip, leaf_no, &bh);
2119 if (error)
2120 goto out;
2121 leaf = (struct gfs2_leaf *)bh->b_data;
2122 len = BIT(dip->i_depth - be16_to_cpu(leaf->lf_depth));
2123
2124 next_index = (index & ~(len - 1)) + len;
2125 last = ((next_index >= hsize) ? 1 : 0);
2126 error = leaf_dealloc(dip, index, len, leaf_no, bh,
2127 last);
2128 brelse(bh);
2129 if (error)
2130 goto out;
2131 index = next_index;
2132 } else
2133 index++;
2134 }
2135
2136 if (index != hsize) {
2137 gfs2_consist_inode(dip);
2138 error = -EIO;
2139 }
2140
2141 out:
2142
2143 return error;
2144 }
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155 int gfs2_diradd_alloc_required(struct inode *inode, const struct qstr *name,
2156 struct gfs2_diradd *da)
2157 {
2158 struct gfs2_inode *ip = GFS2_I(inode);
2159 struct gfs2_sbd *sdp = GFS2_SB(inode);
2160 const unsigned int extra = sizeof(struct gfs2_dinode) - sizeof(struct gfs2_leaf);
2161 struct gfs2_dirent *dent;
2162 struct buffer_head *bh;
2163
2164 da->nr_blocks = 0;
2165 da->bh = NULL;
2166 da->dent = NULL;
2167
2168 dent = gfs2_dirent_search(inode, name, gfs2_dirent_find_space, &bh);
2169 if (!dent) {
2170 da->nr_blocks = sdp->sd_max_dirres;
2171 if (!(ip->i_diskflags & GFS2_DIF_EXHASH) &&
2172 (GFS2_DIRENT_SIZE(name->len) < extra))
2173 da->nr_blocks = 1;
2174 return 0;
2175 }
2176 if (IS_ERR(dent))
2177 return PTR_ERR(dent);
2178
2179 if (da->save_loc) {
2180 da->bh = bh;
2181 da->dent = dent;
2182 } else {
2183 brelse(bh);
2184 }
2185 return 0;
2186 }
2187