0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/slab.h>
0013 #include <linux/pagemap.h>
0014 #include <linux/log2.h>
0015
0016 #include "hfsplus_fs.h"
0017 #include "hfsplus_raw.h"
0018
0019
0020
0021
0022
0023 #define CLUMP_ENTRIES 15
0024
0025 static short clumptbl[CLUMP_ENTRIES * 3] = {
0026
0027
0028
0029
0030 4, 4, 4,
0031 6, 6, 4,
0032 8, 8, 4,
0033 11, 11, 5,
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062 64, 32, 5,
0063 84, 49, 6,
0064 111, 74, 7,
0065 147, 111, 8,
0066 194, 169, 9,
0067 256, 256, 11,
0068 294, 294, 14,
0069 338, 338, 16,
0070 388, 388, 20,
0071 446, 446, 25,
0072 512, 512, 32
0073 };
0074
0075 u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size,
0076 u64 sectors, int file_id)
0077 {
0078 u32 mod = max(node_size, block_size);
0079 u32 clump_size;
0080 int column;
0081 int i;
0082
0083
0084 switch (file_id) {
0085 case HFSPLUS_ATTR_CNID:
0086 column = 0;
0087 break;
0088 case HFSPLUS_CAT_CNID:
0089 column = 1;
0090 break;
0091 default:
0092 column = 2;
0093 break;
0094 }
0095
0096
0097
0098
0099
0100 if (sectors < 0x200000) {
0101 clump_size = sectors << 2;
0102 if (clump_size < (8 * node_size))
0103 clump_size = 8 * node_size;
0104 } else {
0105
0106 for (i = 0, sectors = sectors >> 22;
0107 sectors && (i < CLUMP_ENTRIES - 1);
0108 ++i, sectors = sectors >> 1) {
0109
0110 }
0111
0112 clump_size = clumptbl[column + (i) * 3] * 1024 * 1024;
0113 }
0114
0115
0116
0117
0118
0119 clump_size /= mod;
0120 clump_size *= mod;
0121
0122
0123
0124
0125
0126 if (clump_size == 0)
0127 clump_size = mod;
0128
0129 return clump_size;
0130 }
0131
0132
0133 struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
0134 {
0135 struct hfs_btree *tree;
0136 struct hfs_btree_header_rec *head;
0137 struct address_space *mapping;
0138 struct inode *inode;
0139 struct page *page;
0140 unsigned int size;
0141
0142 tree = kzalloc(sizeof(*tree), GFP_KERNEL);
0143 if (!tree)
0144 return NULL;
0145
0146 mutex_init(&tree->tree_lock);
0147 spin_lock_init(&tree->hash_lock);
0148 tree->sb = sb;
0149 tree->cnid = id;
0150 inode = hfsplus_iget(sb, id);
0151 if (IS_ERR(inode))
0152 goto free_tree;
0153 tree->inode = inode;
0154
0155 if (!HFSPLUS_I(tree->inode)->first_blocks) {
0156 pr_err("invalid btree extent records (0 size)\n");
0157 goto free_inode;
0158 }
0159
0160 mapping = tree->inode->i_mapping;
0161 page = read_mapping_page(mapping, 0, NULL);
0162 if (IS_ERR(page))
0163 goto free_inode;
0164
0165
0166 head = (struct hfs_btree_header_rec *)(kmap(page) +
0167 sizeof(struct hfs_bnode_desc));
0168 tree->root = be32_to_cpu(head->root);
0169 tree->leaf_count = be32_to_cpu(head->leaf_count);
0170 tree->leaf_head = be32_to_cpu(head->leaf_head);
0171 tree->leaf_tail = be32_to_cpu(head->leaf_tail);
0172 tree->node_count = be32_to_cpu(head->node_count);
0173 tree->free_nodes = be32_to_cpu(head->free_nodes);
0174 tree->attributes = be32_to_cpu(head->attributes);
0175 tree->node_size = be16_to_cpu(head->node_size);
0176 tree->max_key_len = be16_to_cpu(head->max_key_len);
0177 tree->depth = be16_to_cpu(head->depth);
0178
0179
0180 switch (id) {
0181 case HFSPLUS_EXT_CNID:
0182 if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) {
0183 pr_err("invalid extent max_key_len %d\n",
0184 tree->max_key_len);
0185 goto fail_page;
0186 }
0187 if (tree->attributes & HFS_TREE_VARIDXKEYS) {
0188 pr_err("invalid extent btree flag\n");
0189 goto fail_page;
0190 }
0191
0192 tree->keycmp = hfsplus_ext_cmp_key;
0193 break;
0194 case HFSPLUS_CAT_CNID:
0195 if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) {
0196 pr_err("invalid catalog max_key_len %d\n",
0197 tree->max_key_len);
0198 goto fail_page;
0199 }
0200 if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
0201 pr_err("invalid catalog btree flag\n");
0202 goto fail_page;
0203 }
0204
0205 if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) &&
0206 (head->key_type == HFSPLUS_KEY_BINARY))
0207 tree->keycmp = hfsplus_cat_bin_cmp_key;
0208 else {
0209 tree->keycmp = hfsplus_cat_case_cmp_key;
0210 set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
0211 }
0212 break;
0213 case HFSPLUS_ATTR_CNID:
0214 if (tree->max_key_len != HFSPLUS_ATTR_KEYLEN - sizeof(u16)) {
0215 pr_err("invalid attributes max_key_len %d\n",
0216 tree->max_key_len);
0217 goto fail_page;
0218 }
0219 tree->keycmp = hfsplus_attr_bin_cmp_key;
0220 break;
0221 default:
0222 pr_err("unknown B*Tree requested\n");
0223 goto fail_page;
0224 }
0225
0226 if (!(tree->attributes & HFS_TREE_BIGKEYS)) {
0227 pr_err("invalid btree flag\n");
0228 goto fail_page;
0229 }
0230
0231 size = tree->node_size;
0232 if (!is_power_of_2(size))
0233 goto fail_page;
0234 if (!tree->node_count)
0235 goto fail_page;
0236
0237 tree->node_size_shift = ffs(size) - 1;
0238
0239 tree->pages_per_bnode =
0240 (tree->node_size + PAGE_SIZE - 1) >>
0241 PAGE_SHIFT;
0242
0243 kunmap(page);
0244 put_page(page);
0245 return tree;
0246
0247 fail_page:
0248 put_page(page);
0249 free_inode:
0250 tree->inode->i_mapping->a_ops = &hfsplus_aops;
0251 iput(tree->inode);
0252 free_tree:
0253 kfree(tree);
0254 return NULL;
0255 }
0256
0257
0258 void hfs_btree_close(struct hfs_btree *tree)
0259 {
0260 struct hfs_bnode *node;
0261 int i;
0262
0263 if (!tree)
0264 return;
0265
0266 for (i = 0; i < NODE_HASH_SIZE; i++) {
0267 while ((node = tree->node_hash[i])) {
0268 tree->node_hash[i] = node->next_hash;
0269 if (atomic_read(&node->refcnt))
0270 pr_crit("node %d:%d "
0271 "still has %d user(s)!\n",
0272 node->tree->cnid, node->this,
0273 atomic_read(&node->refcnt));
0274 hfs_bnode_free(node);
0275 tree->node_hash_cnt--;
0276 }
0277 }
0278 iput(tree->inode);
0279 kfree(tree);
0280 }
0281
0282 int hfs_btree_write(struct hfs_btree *tree)
0283 {
0284 struct hfs_btree_header_rec *head;
0285 struct hfs_bnode *node;
0286 struct page *page;
0287
0288 node = hfs_bnode_find(tree, 0);
0289 if (IS_ERR(node))
0290
0291 return -EIO;
0292
0293 page = node->page[0];
0294 head = (struct hfs_btree_header_rec *)(kmap(page) +
0295 sizeof(struct hfs_bnode_desc));
0296
0297 head->root = cpu_to_be32(tree->root);
0298 head->leaf_count = cpu_to_be32(tree->leaf_count);
0299 head->leaf_head = cpu_to_be32(tree->leaf_head);
0300 head->leaf_tail = cpu_to_be32(tree->leaf_tail);
0301 head->node_count = cpu_to_be32(tree->node_count);
0302 head->free_nodes = cpu_to_be32(tree->free_nodes);
0303 head->attributes = cpu_to_be32(tree->attributes);
0304 head->depth = cpu_to_be16(tree->depth);
0305
0306 kunmap(page);
0307 set_page_dirty(page);
0308 hfs_bnode_put(node);
0309 return 0;
0310 }
0311
0312 static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
0313 {
0314 struct hfs_btree *tree = prev->tree;
0315 struct hfs_bnode *node;
0316 struct hfs_bnode_desc desc;
0317 __be32 cnid;
0318
0319 node = hfs_bnode_create(tree, idx);
0320 if (IS_ERR(node))
0321 return node;
0322
0323 tree->free_nodes--;
0324 prev->next = idx;
0325 cnid = cpu_to_be32(idx);
0326 hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
0327
0328 node->type = HFS_NODE_MAP;
0329 node->num_recs = 1;
0330 hfs_bnode_clear(node, 0, tree->node_size);
0331 desc.next = 0;
0332 desc.prev = 0;
0333 desc.type = HFS_NODE_MAP;
0334 desc.height = 0;
0335 desc.num_recs = cpu_to_be16(1);
0336 desc.reserved = 0;
0337 hfs_bnode_write(node, &desc, 0, sizeof(desc));
0338 hfs_bnode_write_u16(node, 14, 0x8000);
0339 hfs_bnode_write_u16(node, tree->node_size - 2, 14);
0340 hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
0341
0342 return node;
0343 }
0344
0345
0346 int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
0347 {
0348 struct inode *inode = tree->inode;
0349 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
0350 u32 count;
0351 int res;
0352
0353 if (rsvd_nodes <= 0)
0354 return 0;
0355
0356 while (tree->free_nodes < rsvd_nodes) {
0357 res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree));
0358 if (res)
0359 return res;
0360 hip->phys_size = inode->i_size =
0361 (loff_t)hip->alloc_blocks <<
0362 HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
0363 hip->fs_blocks =
0364 hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
0365 inode_set_bytes(inode, inode->i_size);
0366 count = inode->i_size >> tree->node_size_shift;
0367 tree->free_nodes += count - tree->node_count;
0368 tree->node_count = count;
0369 }
0370 return 0;
0371 }
0372
0373 struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
0374 {
0375 struct hfs_bnode *node, *next_node;
0376 struct page **pagep;
0377 u32 nidx, idx;
0378 unsigned off;
0379 u16 off16;
0380 u16 len;
0381 u8 *data, byte, m;
0382 int i, res;
0383
0384 res = hfs_bmap_reserve(tree, 1);
0385 if (res)
0386 return ERR_PTR(res);
0387
0388 nidx = 0;
0389 node = hfs_bnode_find(tree, nidx);
0390 if (IS_ERR(node))
0391 return node;
0392 len = hfs_brec_lenoff(node, 2, &off16);
0393 off = off16;
0394
0395 off += node->page_offset;
0396 pagep = node->page + (off >> PAGE_SHIFT);
0397 data = kmap(*pagep);
0398 off &= ~PAGE_MASK;
0399 idx = 0;
0400
0401 for (;;) {
0402 while (len) {
0403 byte = data[off];
0404 if (byte != 0xff) {
0405 for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
0406 if (!(byte & m)) {
0407 idx += i;
0408 data[off] |= m;
0409 set_page_dirty(*pagep);
0410 kunmap(*pagep);
0411 tree->free_nodes--;
0412 mark_inode_dirty(tree->inode);
0413 hfs_bnode_put(node);
0414 return hfs_bnode_create(tree,
0415 idx);
0416 }
0417 }
0418 }
0419 if (++off >= PAGE_SIZE) {
0420 kunmap(*pagep);
0421 data = kmap(*++pagep);
0422 off = 0;
0423 }
0424 idx += 8;
0425 len--;
0426 }
0427 kunmap(*pagep);
0428 nidx = node->next;
0429 if (!nidx) {
0430 hfs_dbg(BNODE_MOD, "create new bmap node\n");
0431 next_node = hfs_bmap_new_bmap(node, idx);
0432 } else
0433 next_node = hfs_bnode_find(tree, nidx);
0434 hfs_bnode_put(node);
0435 if (IS_ERR(next_node))
0436 return next_node;
0437 node = next_node;
0438
0439 len = hfs_brec_lenoff(node, 0, &off16);
0440 off = off16;
0441 off += node->page_offset;
0442 pagep = node->page + (off >> PAGE_SHIFT);
0443 data = kmap(*pagep);
0444 off &= ~PAGE_MASK;
0445 }
0446 }
0447
0448 void hfs_bmap_free(struct hfs_bnode *node)
0449 {
0450 struct hfs_btree *tree;
0451 struct page *page;
0452 u16 off, len;
0453 u32 nidx;
0454 u8 *data, byte, m;
0455
0456 hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
0457 BUG_ON(!node->this);
0458 tree = node->tree;
0459 nidx = node->this;
0460 node = hfs_bnode_find(tree, 0);
0461 if (IS_ERR(node))
0462 return;
0463 len = hfs_brec_lenoff(node, 2, &off);
0464 while (nidx >= len * 8) {
0465 u32 i;
0466
0467 nidx -= len * 8;
0468 i = node->next;
0469 if (!i) {
0470 ;
0471 pr_crit("unable to free bnode %u. "
0472 "bmap not found!\n",
0473 node->this);
0474 hfs_bnode_put(node);
0475 return;
0476 }
0477 hfs_bnode_put(node);
0478 node = hfs_bnode_find(tree, i);
0479 if (IS_ERR(node))
0480 return;
0481 if (node->type != HFS_NODE_MAP) {
0482 ;
0483 pr_crit("invalid bmap found! "
0484 "(%u,%d)\n",
0485 node->this, node->type);
0486 hfs_bnode_put(node);
0487 return;
0488 }
0489 len = hfs_brec_lenoff(node, 0, &off);
0490 }
0491 off += node->page_offset + nidx / 8;
0492 page = node->page[off >> PAGE_SHIFT];
0493 data = kmap(page);
0494 off &= ~PAGE_MASK;
0495 m = 1 << (~nidx & 7);
0496 byte = data[off];
0497 if (!(byte & m)) {
0498 pr_crit("trying to free free bnode "
0499 "%u(%d)\n",
0500 node->this, node->type);
0501 kunmap(page);
0502 hfs_bnode_put(node);
0503 return;
0504 }
0505 data[off] = byte & ~m;
0506 set_page_dirty(page);
0507 kunmap(page);
0508 hfs_bnode_put(node);
0509 tree->free_nodes++;
0510 mark_inode_dirty(tree->inode);
0511 }