0001
0002
0003
0004
0005
0006 #include <linux/errno.h>
0007 #include <linux/fs.h>
0008 #include <linux/mount.h>
0009 #include <linux/dqblk_v2.h>
0010 #include <linux/kernel.h>
0011 #include <linux/init.h>
0012 #include <linux/module.h>
0013 #include <linux/slab.h>
0014 #include <linux/quotaops.h>
0015
0016 #include <asm/byteorder.h>
0017
0018 #include "quota_tree.h"
0019
0020 MODULE_AUTHOR("Jan Kara");
0021 MODULE_DESCRIPTION("Quota trie support");
0022 MODULE_LICENSE("GPL");
0023
0024 #define __QUOTA_QT_PARANOIA
0025
0026 static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
0027 {
0028 unsigned int epb = info->dqi_usable_bs >> 2;
0029
0030 depth = info->dqi_qtree_depth - depth - 1;
0031 while (depth--)
0032 id /= epb;
0033 return id % epb;
0034 }
0035
0036 static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
0037 {
0038 qid_t id = from_kqid(&init_user_ns, qid);
0039
0040 return __get_index(info, id, depth);
0041 }
0042
0043
0044 static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
0045 {
0046 return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
0047 / info->dqi_entry_size;
0048 }
0049
0050 static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
0051 {
0052 struct super_block *sb = info->dqi_sb;
0053
0054 memset(buf, 0, info->dqi_usable_bs);
0055 return sb->s_op->quota_read(sb, info->dqi_type, buf,
0056 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
0057 }
0058
0059 static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
0060 {
0061 struct super_block *sb = info->dqi_sb;
0062 ssize_t ret;
0063
0064 ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
0065 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
0066 if (ret != info->dqi_usable_bs) {
0067 quota_error(sb, "dquota write failed");
0068 if (ret >= 0)
0069 ret = -EIO;
0070 }
0071 return ret;
0072 }
0073
0074
0075 static int get_free_dqblk(struct qtree_mem_dqinfo *info)
0076 {
0077 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
0078 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
0079 int ret, blk;
0080
0081 if (!buf)
0082 return -ENOMEM;
0083 if (info->dqi_free_blk) {
0084 blk = info->dqi_free_blk;
0085 ret = read_blk(info, blk, buf);
0086 if (ret < 0)
0087 goto out_buf;
0088 info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
0089 }
0090 else {
0091 memset(buf, 0, info->dqi_usable_bs);
0092
0093 ret = write_blk(info, info->dqi_blocks, buf);
0094 if (ret < 0)
0095 goto out_buf;
0096 blk = info->dqi_blocks++;
0097 }
0098 mark_info_dirty(info->dqi_sb, info->dqi_type);
0099 ret = blk;
0100 out_buf:
0101 kfree(buf);
0102 return ret;
0103 }
0104
0105
0106 static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
0107 {
0108 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
0109 int err;
0110
0111 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
0112 dh->dqdh_prev_free = cpu_to_le32(0);
0113 dh->dqdh_entries = cpu_to_le16(0);
0114 err = write_blk(info, blk, buf);
0115 if (err < 0)
0116 return err;
0117 info->dqi_free_blk = blk;
0118 mark_info_dirty(info->dqi_sb, info->dqi_type);
0119 return 0;
0120 }
0121
0122
0123 static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
0124 uint blk)
0125 {
0126 char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
0127 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
0128 uint nextblk = le32_to_cpu(dh->dqdh_next_free);
0129 uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
0130 int err;
0131
0132 if (!tmpbuf)
0133 return -ENOMEM;
0134 if (nextblk) {
0135 err = read_blk(info, nextblk, tmpbuf);
0136 if (err < 0)
0137 goto out_buf;
0138 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
0139 dh->dqdh_prev_free;
0140 err = write_blk(info, nextblk, tmpbuf);
0141 if (err < 0)
0142 goto out_buf;
0143 }
0144 if (prevblk) {
0145 err = read_blk(info, prevblk, tmpbuf);
0146 if (err < 0)
0147 goto out_buf;
0148 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
0149 dh->dqdh_next_free;
0150 err = write_blk(info, prevblk, tmpbuf);
0151 if (err < 0)
0152 goto out_buf;
0153 } else {
0154 info->dqi_free_entry = nextblk;
0155 mark_info_dirty(info->dqi_sb, info->dqi_type);
0156 }
0157 kfree(tmpbuf);
0158 dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
0159
0160 if (write_blk(info, blk, buf) < 0)
0161 quota_error(info->dqi_sb, "Can't write block (%u) "
0162 "with free entries", blk);
0163 return 0;
0164 out_buf:
0165 kfree(tmpbuf);
0166 return err;
0167 }
0168
0169
0170 static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
0171 uint blk)
0172 {
0173 char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
0174 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
0175 int err;
0176
0177 if (!tmpbuf)
0178 return -ENOMEM;
0179 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
0180 dh->dqdh_prev_free = cpu_to_le32(0);
0181 err = write_blk(info, blk, buf);
0182 if (err < 0)
0183 goto out_buf;
0184 if (info->dqi_free_entry) {
0185 err = read_blk(info, info->dqi_free_entry, tmpbuf);
0186 if (err < 0)
0187 goto out_buf;
0188 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
0189 cpu_to_le32(blk);
0190 err = write_blk(info, info->dqi_free_entry, tmpbuf);
0191 if (err < 0)
0192 goto out_buf;
0193 }
0194 kfree(tmpbuf);
0195 info->dqi_free_entry = blk;
0196 mark_info_dirty(info->dqi_sb, info->dqi_type);
0197 return 0;
0198 out_buf:
0199 kfree(tmpbuf);
0200 return err;
0201 }
0202
0203
0204 int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
0205 {
0206 int i;
0207
0208 for (i = 0; i < info->dqi_entry_size; i++)
0209 if (disk[i])
0210 return 0;
0211 return 1;
0212 }
0213 EXPORT_SYMBOL(qtree_entry_unused);
0214
0215
0216 static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
0217 struct dquot *dquot, int *err)
0218 {
0219 uint blk, i;
0220 struct qt_disk_dqdbheader *dh;
0221 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
0222 char *ddquot;
0223
0224 *err = 0;
0225 if (!buf) {
0226 *err = -ENOMEM;
0227 return 0;
0228 }
0229 dh = (struct qt_disk_dqdbheader *)buf;
0230 if (info->dqi_free_entry) {
0231 blk = info->dqi_free_entry;
0232 *err = read_blk(info, blk, buf);
0233 if (*err < 0)
0234 goto out_buf;
0235 } else {
0236 blk = get_free_dqblk(info);
0237 if ((int)blk < 0) {
0238 *err = blk;
0239 kfree(buf);
0240 return 0;
0241 }
0242 memset(buf, 0, info->dqi_usable_bs);
0243
0244
0245 info->dqi_free_entry = blk;
0246 mark_info_dirty(dquot->dq_sb, dquot->dq_id.type);
0247 }
0248
0249 if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
0250 *err = remove_free_dqentry(info, buf, blk);
0251 if (*err < 0) {
0252 quota_error(dquot->dq_sb, "Can't remove block (%u) "
0253 "from entry free list", blk);
0254 goto out_buf;
0255 }
0256 }
0257 le16_add_cpu(&dh->dqdh_entries, 1);
0258
0259 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
0260 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
0261 if (qtree_entry_unused(info, ddquot))
0262 break;
0263 ddquot += info->dqi_entry_size;
0264 }
0265 #ifdef __QUOTA_QT_PARANOIA
0266 if (i == qtree_dqstr_in_blk(info)) {
0267 quota_error(dquot->dq_sb, "Data block full but it shouldn't");
0268 *err = -EIO;
0269 goto out_buf;
0270 }
0271 #endif
0272 *err = write_blk(info, blk, buf);
0273 if (*err < 0) {
0274 quota_error(dquot->dq_sb, "Can't write quota data block %u",
0275 blk);
0276 goto out_buf;
0277 }
0278 dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) +
0279 sizeof(struct qt_disk_dqdbheader) +
0280 i * info->dqi_entry_size;
0281 kfree(buf);
0282 return blk;
0283 out_buf:
0284 kfree(buf);
0285 return 0;
0286 }
0287
0288
0289 static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
0290 uint *treeblk, int depth)
0291 {
0292 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
0293 int ret = 0, newson = 0, newact = 0;
0294 __le32 *ref;
0295 uint newblk;
0296
0297 if (!buf)
0298 return -ENOMEM;
0299 if (!*treeblk) {
0300 ret = get_free_dqblk(info);
0301 if (ret < 0)
0302 goto out_buf;
0303 *treeblk = ret;
0304 memset(buf, 0, info->dqi_usable_bs);
0305 newact = 1;
0306 } else {
0307 ret = read_blk(info, *treeblk, buf);
0308 if (ret < 0) {
0309 quota_error(dquot->dq_sb, "Can't read tree quota "
0310 "block %u", *treeblk);
0311 goto out_buf;
0312 }
0313 }
0314 ref = (__le32 *)buf;
0315 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
0316 if (!newblk)
0317 newson = 1;
0318 if (depth == info->dqi_qtree_depth - 1) {
0319 #ifdef __QUOTA_QT_PARANOIA
0320 if (newblk) {
0321 quota_error(dquot->dq_sb, "Inserting already present "
0322 "quota entry (block %u)",
0323 le32_to_cpu(ref[get_index(info,
0324 dquot->dq_id, depth)]));
0325 ret = -EIO;
0326 goto out_buf;
0327 }
0328 #endif
0329 newblk = find_free_dqentry(info, dquot, &ret);
0330 } else {
0331 ret = do_insert_tree(info, dquot, &newblk, depth+1);
0332 }
0333 if (newson && ret >= 0) {
0334 ref[get_index(info, dquot->dq_id, depth)] =
0335 cpu_to_le32(newblk);
0336 ret = write_blk(info, *treeblk, buf);
0337 } else if (newact && ret < 0) {
0338 put_free_dqblk(info, buf, *treeblk);
0339 }
0340 out_buf:
0341 kfree(buf);
0342 return ret;
0343 }
0344
0345
0346 static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
0347 struct dquot *dquot)
0348 {
0349 int tmp = QT_TREEOFF;
0350
0351 #ifdef __QUOTA_QT_PARANOIA
0352 if (info->dqi_blocks <= QT_TREEOFF) {
0353 quota_error(dquot->dq_sb, "Quota tree root isn't allocated!");
0354 return -EIO;
0355 }
0356 #endif
0357 return do_insert_tree(info, dquot, &tmp, 0);
0358 }
0359
0360
0361
0362
0363
0364 int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
0365 {
0366 int type = dquot->dq_id.type;
0367 struct super_block *sb = dquot->dq_sb;
0368 ssize_t ret;
0369 char *ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS);
0370
0371 if (!ddquot)
0372 return -ENOMEM;
0373
0374
0375 if (!dquot->dq_off) {
0376 ret = dq_insert_tree(info, dquot);
0377 if (ret < 0) {
0378 quota_error(sb, "Error %zd occurred while creating "
0379 "quota", ret);
0380 kfree(ddquot);
0381 return ret;
0382 }
0383 }
0384 spin_lock(&dquot->dq_dqb_lock);
0385 info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
0386 spin_unlock(&dquot->dq_dqb_lock);
0387 ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
0388 dquot->dq_off);
0389 if (ret != info->dqi_entry_size) {
0390 quota_error(sb, "dquota write failed");
0391 if (ret >= 0)
0392 ret = -ENOSPC;
0393 } else {
0394 ret = 0;
0395 }
0396 dqstats_inc(DQST_WRITES);
0397 kfree(ddquot);
0398
0399 return ret;
0400 }
0401 EXPORT_SYMBOL(qtree_write_dquot);
0402
0403
0404 static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
0405 uint blk)
0406 {
0407 struct qt_disk_dqdbheader *dh;
0408 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
0409 int ret = 0;
0410
0411 if (!buf)
0412 return -ENOMEM;
0413 if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
0414 quota_error(dquot->dq_sb, "Quota structure has offset to "
0415 "other block (%u) than it should (%u)", blk,
0416 (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
0417 ret = -EIO;
0418 goto out_buf;
0419 }
0420 ret = read_blk(info, blk, buf);
0421 if (ret < 0) {
0422 quota_error(dquot->dq_sb, "Can't read quota data block %u",
0423 blk);
0424 goto out_buf;
0425 }
0426 dh = (struct qt_disk_dqdbheader *)buf;
0427 le16_add_cpu(&dh->dqdh_entries, -1);
0428 if (!le16_to_cpu(dh->dqdh_entries)) {
0429 ret = remove_free_dqentry(info, buf, blk);
0430 if (ret >= 0)
0431 ret = put_free_dqblk(info, buf, blk);
0432 if (ret < 0) {
0433 quota_error(dquot->dq_sb, "Can't move quota data block "
0434 "(%u) to free list", blk);
0435 goto out_buf;
0436 }
0437 } else {
0438 memset(buf +
0439 (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
0440 0, info->dqi_entry_size);
0441 if (le16_to_cpu(dh->dqdh_entries) ==
0442 qtree_dqstr_in_blk(info) - 1) {
0443
0444 ret = insert_free_dqentry(info, buf, blk);
0445 if (ret < 0) {
0446 quota_error(dquot->dq_sb, "Can't insert quota "
0447 "data block (%u) to free entry list", blk);
0448 goto out_buf;
0449 }
0450 } else {
0451 ret = write_blk(info, blk, buf);
0452 if (ret < 0) {
0453 quota_error(dquot->dq_sb, "Can't write quota "
0454 "data block %u", blk);
0455 goto out_buf;
0456 }
0457 }
0458 }
0459 dquot->dq_off = 0;
0460 out_buf:
0461 kfree(buf);
0462 return ret;
0463 }
0464
0465
0466 static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
0467 uint *blk, int depth)
0468 {
0469 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
0470 int ret = 0;
0471 uint newblk;
0472 __le32 *ref = (__le32 *)buf;
0473
0474 if (!buf)
0475 return -ENOMEM;
0476 ret = read_blk(info, *blk, buf);
0477 if (ret < 0) {
0478 quota_error(dquot->dq_sb, "Can't read quota data block %u",
0479 *blk);
0480 goto out_buf;
0481 }
0482 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
0483 if (newblk < QT_TREEOFF || newblk >= info->dqi_blocks) {
0484 quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)",
0485 newblk, info->dqi_blocks);
0486 ret = -EUCLEAN;
0487 goto out_buf;
0488 }
0489
0490 if (depth == info->dqi_qtree_depth - 1) {
0491 ret = free_dqentry(info, dquot, newblk);
0492 newblk = 0;
0493 } else {
0494 ret = remove_tree(info, dquot, &newblk, depth+1);
0495 }
0496 if (ret >= 0 && !newblk) {
0497 int i;
0498 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
0499
0500 for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
0501 ;
0502
0503 if (i == (info->dqi_usable_bs >> 2)
0504 && *blk != QT_TREEOFF) {
0505 put_free_dqblk(info, buf, *blk);
0506 *blk = 0;
0507 } else {
0508 ret = write_blk(info, *blk, buf);
0509 if (ret < 0)
0510 quota_error(dquot->dq_sb,
0511 "Can't write quota tree block %u",
0512 *blk);
0513 }
0514 }
0515 out_buf:
0516 kfree(buf);
0517 return ret;
0518 }
0519
0520
0521 int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
0522 {
0523 uint tmp = QT_TREEOFF;
0524
0525 if (!dquot->dq_off)
0526 return 0;
0527 return remove_tree(info, dquot, &tmp, 0);
0528 }
0529 EXPORT_SYMBOL(qtree_delete_dquot);
0530
0531
0532 static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
0533 struct dquot *dquot, uint blk)
0534 {
0535 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
0536 loff_t ret = 0;
0537 int i;
0538 char *ddquot;
0539
0540 if (!buf)
0541 return -ENOMEM;
0542 ret = read_blk(info, blk, buf);
0543 if (ret < 0) {
0544 quota_error(dquot->dq_sb, "Can't read quota tree "
0545 "block %u", blk);
0546 goto out_buf;
0547 }
0548 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
0549 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
0550 if (info->dqi_ops->is_id(ddquot, dquot))
0551 break;
0552 ddquot += info->dqi_entry_size;
0553 }
0554 if (i == qtree_dqstr_in_blk(info)) {
0555 quota_error(dquot->dq_sb,
0556 "Quota for id %u referenced but not present",
0557 from_kqid(&init_user_ns, dquot->dq_id));
0558 ret = -EIO;
0559 goto out_buf;
0560 } else {
0561 ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
0562 qt_disk_dqdbheader) + i * info->dqi_entry_size;
0563 }
0564 out_buf:
0565 kfree(buf);
0566 return ret;
0567 }
0568
0569
0570 static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
0571 struct dquot *dquot, uint blk, int depth)
0572 {
0573 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
0574 loff_t ret = 0;
0575 __le32 *ref = (__le32 *)buf;
0576
0577 if (!buf)
0578 return -ENOMEM;
0579 ret = read_blk(info, blk, buf);
0580 if (ret < 0) {
0581 quota_error(dquot->dq_sb, "Can't read quota tree block %u",
0582 blk);
0583 goto out_buf;
0584 }
0585 ret = 0;
0586 blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
0587 if (!blk)
0588 goto out_buf;
0589 if (blk < QT_TREEOFF || blk >= info->dqi_blocks) {
0590 quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)",
0591 blk, info->dqi_blocks);
0592 ret = -EUCLEAN;
0593 goto out_buf;
0594 }
0595
0596 if (depth < info->dqi_qtree_depth - 1)
0597 ret = find_tree_dqentry(info, dquot, blk, depth+1);
0598 else
0599 ret = find_block_dqentry(info, dquot, blk);
0600 out_buf:
0601 kfree(buf);
0602 return ret;
0603 }
0604
0605
0606 static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
0607 struct dquot *dquot)
0608 {
0609 return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
0610 }
0611
0612 int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
0613 {
0614 int type = dquot->dq_id.type;
0615 struct super_block *sb = dquot->dq_sb;
0616 loff_t offset;
0617 char *ddquot;
0618 int ret = 0;
0619
0620 #ifdef __QUOTA_QT_PARANOIA
0621
0622 if (!sb_dqopt(dquot->dq_sb)->files[type]) {
0623 quota_error(sb, "Quota invalidated while reading!");
0624 return -EIO;
0625 }
0626 #endif
0627
0628 if (!dquot->dq_off) {
0629 offset = find_dqentry(info, dquot);
0630 if (offset <= 0) {
0631 if (offset < 0)
0632 quota_error(sb,"Can't read quota structure "
0633 "for id %u",
0634 from_kqid(&init_user_ns,
0635 dquot->dq_id));
0636 dquot->dq_off = 0;
0637 set_bit(DQ_FAKE_B, &dquot->dq_flags);
0638 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
0639 ret = offset;
0640 goto out;
0641 }
0642 dquot->dq_off = offset;
0643 }
0644 ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS);
0645 if (!ddquot)
0646 return -ENOMEM;
0647 ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
0648 dquot->dq_off);
0649 if (ret != info->dqi_entry_size) {
0650 if (ret >= 0)
0651 ret = -EIO;
0652 quota_error(sb, "Error while reading quota structure for id %u",
0653 from_kqid(&init_user_ns, dquot->dq_id));
0654 set_bit(DQ_FAKE_B, &dquot->dq_flags);
0655 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
0656 kfree(ddquot);
0657 goto out;
0658 }
0659 spin_lock(&dquot->dq_dqb_lock);
0660 info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
0661 if (!dquot->dq_dqb.dqb_bhardlimit &&
0662 !dquot->dq_dqb.dqb_bsoftlimit &&
0663 !dquot->dq_dqb.dqb_ihardlimit &&
0664 !dquot->dq_dqb.dqb_isoftlimit)
0665 set_bit(DQ_FAKE_B, &dquot->dq_flags);
0666 spin_unlock(&dquot->dq_dqb_lock);
0667 kfree(ddquot);
0668 out:
0669 dqstats_inc(DQST_READS);
0670 return ret;
0671 }
0672 EXPORT_SYMBOL(qtree_read_dquot);
0673
0674
0675
0676 int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
0677 {
0678 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
0679 !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
0680 return qtree_delete_dquot(info, dquot);
0681 return 0;
0682 }
0683 EXPORT_SYMBOL(qtree_release_dquot);
0684
0685 static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id,
0686 unsigned int blk, int depth)
0687 {
0688 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
0689 __le32 *ref = (__le32 *)buf;
0690 ssize_t ret;
0691 unsigned int epb = info->dqi_usable_bs >> 2;
0692 unsigned int level_inc = 1;
0693 int i;
0694
0695 if (!buf)
0696 return -ENOMEM;
0697
0698 for (i = depth; i < info->dqi_qtree_depth - 1; i++)
0699 level_inc *= epb;
0700
0701 ret = read_blk(info, blk, buf);
0702 if (ret < 0) {
0703 quota_error(info->dqi_sb,
0704 "Can't read quota tree block %u", blk);
0705 goto out_buf;
0706 }
0707 for (i = __get_index(info, *id, depth); i < epb; i++) {
0708 if (ref[i] == cpu_to_le32(0)) {
0709 *id += level_inc;
0710 continue;
0711 }
0712 if (depth == info->dqi_qtree_depth - 1) {
0713 ret = 0;
0714 goto out_buf;
0715 }
0716 ret = find_next_id(info, id, le32_to_cpu(ref[i]), depth + 1);
0717 if (ret != -ENOENT)
0718 break;
0719 }
0720 if (i == epb) {
0721 ret = -ENOENT;
0722 goto out_buf;
0723 }
0724 out_buf:
0725 kfree(buf);
0726 return ret;
0727 }
0728
0729 int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid)
0730 {
0731 qid_t id = from_kqid(&init_user_ns, *qid);
0732 int ret;
0733
0734 ret = find_next_id(info, &id, QT_TREEOFF, 0);
0735 if (ret < 0)
0736 return ret;
0737 *qid = make_kqid(&init_user_ns, qid->type, id);
0738 return 0;
0739 }
0740 EXPORT_SYMBOL(qtree_get_next_id);