0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/pagemap.h>
0013 #include <linux/slab.h>
0014 #include <linux/log2.h>
0015
0016 #include "btree.h"
0017
0018
0019 struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp keycmp)
0020 {
0021 struct hfs_btree *tree;
0022 struct hfs_btree_header_rec *head;
0023 struct address_space *mapping;
0024 struct page *page;
0025 unsigned int size;
0026
0027 tree = kzalloc(sizeof(*tree), GFP_KERNEL);
0028 if (!tree)
0029 return NULL;
0030
0031 mutex_init(&tree->tree_lock);
0032 spin_lock_init(&tree->hash_lock);
0033
0034 tree->sb = sb;
0035 tree->cnid = id;
0036 tree->keycmp = keycmp;
0037
0038 tree->inode = iget_locked(sb, id);
0039 if (!tree->inode)
0040 goto free_tree;
0041 BUG_ON(!(tree->inode->i_state & I_NEW));
0042 {
0043 struct hfs_mdb *mdb = HFS_SB(sb)->mdb;
0044 HFS_I(tree->inode)->flags = 0;
0045 mutex_init(&HFS_I(tree->inode)->extents_lock);
0046 switch (id) {
0047 case HFS_EXT_CNID:
0048 hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
0049 mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
0050 if (HFS_I(tree->inode)->alloc_blocks >
0051 HFS_I(tree->inode)->first_blocks) {
0052 pr_err("invalid btree extent records\n");
0053 unlock_new_inode(tree->inode);
0054 goto free_inode;
0055 }
0056
0057 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
0058 break;
0059 case HFS_CAT_CNID:
0060 hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
0061 mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
0062
0063 if (!HFS_I(tree->inode)->first_blocks) {
0064 pr_err("invalid btree extent records (0 size)\n");
0065 unlock_new_inode(tree->inode);
0066 goto free_inode;
0067 }
0068
0069 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
0070 break;
0071 default:
0072 BUG();
0073 }
0074 }
0075 unlock_new_inode(tree->inode);
0076
0077 mapping = tree->inode->i_mapping;
0078 page = read_mapping_page(mapping, 0, NULL);
0079 if (IS_ERR(page))
0080 goto free_inode;
0081
0082
0083 head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
0084 tree->root = be32_to_cpu(head->root);
0085 tree->leaf_count = be32_to_cpu(head->leaf_count);
0086 tree->leaf_head = be32_to_cpu(head->leaf_head);
0087 tree->leaf_tail = be32_to_cpu(head->leaf_tail);
0088 tree->node_count = be32_to_cpu(head->node_count);
0089 tree->free_nodes = be32_to_cpu(head->free_nodes);
0090 tree->attributes = be32_to_cpu(head->attributes);
0091 tree->node_size = be16_to_cpu(head->node_size);
0092 tree->max_key_len = be16_to_cpu(head->max_key_len);
0093 tree->depth = be16_to_cpu(head->depth);
0094
0095 size = tree->node_size;
0096 if (!is_power_of_2(size))
0097 goto fail_page;
0098 if (!tree->node_count)
0099 goto fail_page;
0100 switch (id) {
0101 case HFS_EXT_CNID:
0102 if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) {
0103 pr_err("invalid extent max_key_len %d\n",
0104 tree->max_key_len);
0105 goto fail_page;
0106 }
0107 break;
0108 case HFS_CAT_CNID:
0109 if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) {
0110 pr_err("invalid catalog max_key_len %d\n",
0111 tree->max_key_len);
0112 goto fail_page;
0113 }
0114 break;
0115 default:
0116 BUG();
0117 }
0118
0119 tree->node_size_shift = ffs(size) - 1;
0120 tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
0121
0122 kunmap(page);
0123 put_page(page);
0124 return tree;
0125
0126 fail_page:
0127 put_page(page);
0128 free_inode:
0129 tree->inode->i_mapping->a_ops = &hfs_aops;
0130 iput(tree->inode);
0131 free_tree:
0132 kfree(tree);
0133 return NULL;
0134 }
0135
0136
0137 void hfs_btree_close(struct hfs_btree *tree)
0138 {
0139 struct hfs_bnode *node;
0140 int i;
0141
0142 if (!tree)
0143 return;
0144
0145 for (i = 0; i < NODE_HASH_SIZE; i++) {
0146 while ((node = tree->node_hash[i])) {
0147 tree->node_hash[i] = node->next_hash;
0148 if (atomic_read(&node->refcnt))
0149 pr_err("node %d:%d still has %d user(s)!\n",
0150 node->tree->cnid, node->this,
0151 atomic_read(&node->refcnt));
0152 hfs_bnode_free(node);
0153 tree->node_hash_cnt--;
0154 }
0155 }
0156 iput(tree->inode);
0157 kfree(tree);
0158 }
0159
0160 void hfs_btree_write(struct hfs_btree *tree)
0161 {
0162 struct hfs_btree_header_rec *head;
0163 struct hfs_bnode *node;
0164 struct page *page;
0165
0166 node = hfs_bnode_find(tree, 0);
0167 if (IS_ERR(node))
0168
0169 return;
0170
0171 page = node->page[0];
0172 head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
0173
0174 head->root = cpu_to_be32(tree->root);
0175 head->leaf_count = cpu_to_be32(tree->leaf_count);
0176 head->leaf_head = cpu_to_be32(tree->leaf_head);
0177 head->leaf_tail = cpu_to_be32(tree->leaf_tail);
0178 head->node_count = cpu_to_be32(tree->node_count);
0179 head->free_nodes = cpu_to_be32(tree->free_nodes);
0180 head->attributes = cpu_to_be32(tree->attributes);
0181 head->depth = cpu_to_be16(tree->depth);
0182
0183 kunmap(page);
0184 set_page_dirty(page);
0185 hfs_bnode_put(node);
0186 }
0187
0188 static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
0189 {
0190 struct hfs_btree *tree = prev->tree;
0191 struct hfs_bnode *node;
0192 struct hfs_bnode_desc desc;
0193 __be32 cnid;
0194
0195 node = hfs_bnode_create(tree, idx);
0196 if (IS_ERR(node))
0197 return node;
0198
0199 if (!tree->free_nodes)
0200 panic("FIXME!!!");
0201 tree->free_nodes--;
0202 prev->next = idx;
0203 cnid = cpu_to_be32(idx);
0204 hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
0205
0206 node->type = HFS_NODE_MAP;
0207 node->num_recs = 1;
0208 hfs_bnode_clear(node, 0, tree->node_size);
0209 desc.next = 0;
0210 desc.prev = 0;
0211 desc.type = HFS_NODE_MAP;
0212 desc.height = 0;
0213 desc.num_recs = cpu_to_be16(1);
0214 desc.reserved = 0;
0215 hfs_bnode_write(node, &desc, 0, sizeof(desc));
0216 hfs_bnode_write_u16(node, 14, 0x8000);
0217 hfs_bnode_write_u16(node, tree->node_size - 2, 14);
0218 hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
0219
0220 return node;
0221 }
0222
0223
0224 int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
0225 {
0226 struct inode *inode = tree->inode;
0227 u32 count;
0228 int res;
0229
0230 while (tree->free_nodes < rsvd_nodes) {
0231 res = hfs_extend_file(inode);
0232 if (res)
0233 return res;
0234 HFS_I(inode)->phys_size = inode->i_size =
0235 (loff_t)HFS_I(inode)->alloc_blocks *
0236 HFS_SB(tree->sb)->alloc_blksz;
0237 HFS_I(inode)->fs_blocks = inode->i_size >>
0238 tree->sb->s_blocksize_bits;
0239 inode_set_bytes(inode, inode->i_size);
0240 count = inode->i_size >> tree->node_size_shift;
0241 tree->free_nodes += count - tree->node_count;
0242 tree->node_count = count;
0243 }
0244 return 0;
0245 }
0246
0247 struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
0248 {
0249 struct hfs_bnode *node, *next_node;
0250 struct page **pagep;
0251 u32 nidx, idx;
0252 unsigned off;
0253 u16 off16;
0254 u16 len;
0255 u8 *data, byte, m;
0256 int i, res;
0257
0258 res = hfs_bmap_reserve(tree, 1);
0259 if (res)
0260 return ERR_PTR(res);
0261
0262 nidx = 0;
0263 node = hfs_bnode_find(tree, nidx);
0264 if (IS_ERR(node))
0265 return node;
0266 len = hfs_brec_lenoff(node, 2, &off16);
0267 off = off16;
0268
0269 off += node->page_offset;
0270 pagep = node->page + (off >> PAGE_SHIFT);
0271 data = kmap(*pagep);
0272 off &= ~PAGE_MASK;
0273 idx = 0;
0274
0275 for (;;) {
0276 while (len) {
0277 byte = data[off];
0278 if (byte != 0xff) {
0279 for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
0280 if (!(byte & m)) {
0281 idx += i;
0282 data[off] |= m;
0283 set_page_dirty(*pagep);
0284 kunmap(*pagep);
0285 tree->free_nodes--;
0286 mark_inode_dirty(tree->inode);
0287 hfs_bnode_put(node);
0288 return hfs_bnode_create(tree, idx);
0289 }
0290 }
0291 }
0292 if (++off >= PAGE_SIZE) {
0293 kunmap(*pagep);
0294 data = kmap(*++pagep);
0295 off = 0;
0296 }
0297 idx += 8;
0298 len--;
0299 }
0300 kunmap(*pagep);
0301 nidx = node->next;
0302 if (!nidx) {
0303 printk(KERN_DEBUG "create new bmap node...\n");
0304 next_node = hfs_bmap_new_bmap(node, idx);
0305 } else
0306 next_node = hfs_bnode_find(tree, nidx);
0307 hfs_bnode_put(node);
0308 if (IS_ERR(next_node))
0309 return next_node;
0310 node = next_node;
0311
0312 len = hfs_brec_lenoff(node, 0, &off16);
0313 off = off16;
0314 off += node->page_offset;
0315 pagep = node->page + (off >> PAGE_SHIFT);
0316 data = kmap(*pagep);
0317 off &= ~PAGE_MASK;
0318 }
0319 }
0320
0321 void hfs_bmap_free(struct hfs_bnode *node)
0322 {
0323 struct hfs_btree *tree;
0324 struct page *page;
0325 u16 off, len;
0326 u32 nidx;
0327 u8 *data, byte, m;
0328
0329 hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
0330 tree = node->tree;
0331 nidx = node->this;
0332 node = hfs_bnode_find(tree, 0);
0333 if (IS_ERR(node))
0334 return;
0335 len = hfs_brec_lenoff(node, 2, &off);
0336 while (nidx >= len * 8) {
0337 u32 i;
0338
0339 nidx -= len * 8;
0340 i = node->next;
0341 if (!i) {
0342 ;
0343 pr_crit("unable to free bnode %u. bmap not found!\n",
0344 node->this);
0345 hfs_bnode_put(node);
0346 return;
0347 }
0348 hfs_bnode_put(node);
0349 node = hfs_bnode_find(tree, i);
0350 if (IS_ERR(node))
0351 return;
0352 if (node->type != HFS_NODE_MAP) {
0353 ;
0354 pr_crit("invalid bmap found! (%u,%d)\n",
0355 node->this, node->type);
0356 hfs_bnode_put(node);
0357 return;
0358 }
0359 len = hfs_brec_lenoff(node, 0, &off);
0360 }
0361 off += node->page_offset + nidx / 8;
0362 page = node->page[off >> PAGE_SHIFT];
0363 data = kmap(page);
0364 off &= ~PAGE_MASK;
0365 m = 1 << (~nidx & 7);
0366 byte = data[off];
0367 if (!(byte & m)) {
0368 pr_crit("trying to free free bnode %u(%d)\n",
0369 node->this, node->type);
0370 kunmap(page);
0371 hfs_bnode_put(node);
0372 return;
0373 }
0374 data[off] = byte & ~m;
0375 set_page_dirty(page);
0376 kunmap(page);
0377 hfs_bnode_put(node);
0378 tree->free_nodes++;
0379 mark_inode_dirty(tree->inode);
0380 }