Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *  linux/fs/hfsplus/bnode.c
0004  *
0005  * Copyright (C) 2001
0006  * Brad Boyer (flar@allandria.com)
0007  * (C) 2003 Ardis Technologies <roman@ardistech.com>
0008  *
0009  * Handle basic btree node operations
0010  */
0011 
0012 #include <linux/string.h>
0013 #include <linux/slab.h>
0014 #include <linux/pagemap.h>
0015 #include <linux/fs.h>
0016 #include <linux/swap.h>
0017 
0018 #include "hfsplus_fs.h"
0019 #include "hfsplus_raw.h"
0020 
0021 /* Copy a specified range of bytes from the raw data of a node */
0022 void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
0023 {
0024     struct page **pagep;
0025     int l;
0026 
0027     off += node->page_offset;
0028     pagep = node->page + (off >> PAGE_SHIFT);
0029     off &= ~PAGE_MASK;
0030 
0031     l = min_t(int, len, PAGE_SIZE - off);
0032     memcpy(buf, kmap(*pagep) + off, l);
0033     kunmap(*pagep);
0034 
0035     while ((len -= l) != 0) {
0036         buf += l;
0037         l = min_t(int, len, PAGE_SIZE);
0038         memcpy(buf, kmap(*++pagep), l);
0039         kunmap(*pagep);
0040     }
0041 }
0042 
0043 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
0044 {
0045     __be16 data;
0046     /* TODO: optimize later... */
0047     hfs_bnode_read(node, &data, off, 2);
0048     return be16_to_cpu(data);
0049 }
0050 
0051 u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
0052 {
0053     u8 data;
0054     /* TODO: optimize later... */
0055     hfs_bnode_read(node, &data, off, 1);
0056     return data;
0057 }
0058 
0059 void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
0060 {
0061     struct hfs_btree *tree;
0062     int key_len;
0063 
0064     tree = node->tree;
0065     if (node->type == HFS_NODE_LEAF ||
0066         tree->attributes & HFS_TREE_VARIDXKEYS ||
0067         node->tree->cnid == HFSPLUS_ATTR_CNID)
0068         key_len = hfs_bnode_read_u16(node, off) + 2;
0069     else
0070         key_len = tree->max_key_len + 2;
0071 
0072     hfs_bnode_read(node, key, off, key_len);
0073 }
0074 
0075 void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
0076 {
0077     struct page **pagep;
0078     int l;
0079 
0080     off += node->page_offset;
0081     pagep = node->page + (off >> PAGE_SHIFT);
0082     off &= ~PAGE_MASK;
0083 
0084     l = min_t(int, len, PAGE_SIZE - off);
0085     memcpy(kmap(*pagep) + off, buf, l);
0086     set_page_dirty(*pagep);
0087     kunmap(*pagep);
0088 
0089     while ((len -= l) != 0) {
0090         buf += l;
0091         l = min_t(int, len, PAGE_SIZE);
0092         memcpy(kmap(*++pagep), buf, l);
0093         set_page_dirty(*pagep);
0094         kunmap(*pagep);
0095     }
0096 }
0097 
0098 void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
0099 {
0100     __be16 v = cpu_to_be16(data);
0101     /* TODO: optimize later... */
0102     hfs_bnode_write(node, &v, off, 2);
0103 }
0104 
0105 void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
0106 {
0107     struct page **pagep;
0108     int l;
0109 
0110     off += node->page_offset;
0111     pagep = node->page + (off >> PAGE_SHIFT);
0112     off &= ~PAGE_MASK;
0113 
0114     l = min_t(int, len, PAGE_SIZE - off);
0115     memset(kmap(*pagep) + off, 0, l);
0116     set_page_dirty(*pagep);
0117     kunmap(*pagep);
0118 
0119     while ((len -= l) != 0) {
0120         l = min_t(int, len, PAGE_SIZE);
0121         memset(kmap(*++pagep), 0, l);
0122         set_page_dirty(*pagep);
0123         kunmap(*pagep);
0124     }
0125 }
0126 
0127 void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
0128             struct hfs_bnode *src_node, int src, int len)
0129 {
0130     struct page **src_page, **dst_page;
0131     int l;
0132 
0133     hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
0134     if (!len)
0135         return;
0136     src += src_node->page_offset;
0137     dst += dst_node->page_offset;
0138     src_page = src_node->page + (src >> PAGE_SHIFT);
0139     src &= ~PAGE_MASK;
0140     dst_page = dst_node->page + (dst >> PAGE_SHIFT);
0141     dst &= ~PAGE_MASK;
0142 
0143     if (src == dst) {
0144         l = min_t(int, len, PAGE_SIZE - src);
0145         memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
0146         kunmap(*src_page);
0147         set_page_dirty(*dst_page);
0148         kunmap(*dst_page);
0149 
0150         while ((len -= l) != 0) {
0151             l = min_t(int, len, PAGE_SIZE);
0152             memcpy(kmap(*++dst_page), kmap(*++src_page), l);
0153             kunmap(*src_page);
0154             set_page_dirty(*dst_page);
0155             kunmap(*dst_page);
0156         }
0157     } else {
0158         void *src_ptr, *dst_ptr;
0159 
0160         do {
0161             src_ptr = kmap(*src_page) + src;
0162             dst_ptr = kmap(*dst_page) + dst;
0163             if (PAGE_SIZE - src < PAGE_SIZE - dst) {
0164                 l = PAGE_SIZE - src;
0165                 src = 0;
0166                 dst += l;
0167             } else {
0168                 l = PAGE_SIZE - dst;
0169                 src += l;
0170                 dst = 0;
0171             }
0172             l = min(len, l);
0173             memcpy(dst_ptr, src_ptr, l);
0174             kunmap(*src_page);
0175             set_page_dirty(*dst_page);
0176             kunmap(*dst_page);
0177             if (!dst)
0178                 dst_page++;
0179             else
0180                 src_page++;
0181         } while ((len -= l));
0182     }
0183 }
0184 
0185 void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
0186 {
0187     struct page **src_page, **dst_page;
0188     int l;
0189 
0190     hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
0191     if (!len)
0192         return;
0193     src += node->page_offset;
0194     dst += node->page_offset;
0195     if (dst > src) {
0196         src += len - 1;
0197         src_page = node->page + (src >> PAGE_SHIFT);
0198         src = (src & ~PAGE_MASK) + 1;
0199         dst += len - 1;
0200         dst_page = node->page + (dst >> PAGE_SHIFT);
0201         dst = (dst & ~PAGE_MASK) + 1;
0202 
0203         if (src == dst) {
0204             while (src < len) {
0205                 memmove(kmap(*dst_page), kmap(*src_page), src);
0206                 kunmap(*src_page);
0207                 set_page_dirty(*dst_page);
0208                 kunmap(*dst_page);
0209                 len -= src;
0210                 src = PAGE_SIZE;
0211                 src_page--;
0212                 dst_page--;
0213             }
0214             src -= len;
0215             memmove(kmap(*dst_page) + src,
0216                 kmap(*src_page) + src, len);
0217             kunmap(*src_page);
0218             set_page_dirty(*dst_page);
0219             kunmap(*dst_page);
0220         } else {
0221             void *src_ptr, *dst_ptr;
0222 
0223             do {
0224                 src_ptr = kmap(*src_page) + src;
0225                 dst_ptr = kmap(*dst_page) + dst;
0226                 if (src < dst) {
0227                     l = src;
0228                     src = PAGE_SIZE;
0229                     dst -= l;
0230                 } else {
0231                     l = dst;
0232                     src -= l;
0233                     dst = PAGE_SIZE;
0234                 }
0235                 l = min(len, l);
0236                 memmove(dst_ptr - l, src_ptr - l, l);
0237                 kunmap(*src_page);
0238                 set_page_dirty(*dst_page);
0239                 kunmap(*dst_page);
0240                 if (dst == PAGE_SIZE)
0241                     dst_page--;
0242                 else
0243                     src_page--;
0244             } while ((len -= l));
0245         }
0246     } else {
0247         src_page = node->page + (src >> PAGE_SHIFT);
0248         src &= ~PAGE_MASK;
0249         dst_page = node->page + (dst >> PAGE_SHIFT);
0250         dst &= ~PAGE_MASK;
0251 
0252         if (src == dst) {
0253             l = min_t(int, len, PAGE_SIZE - src);
0254             memmove(kmap(*dst_page) + src,
0255                 kmap(*src_page) + src, l);
0256             kunmap(*src_page);
0257             set_page_dirty(*dst_page);
0258             kunmap(*dst_page);
0259 
0260             while ((len -= l) != 0) {
0261                 l = min_t(int, len, PAGE_SIZE);
0262                 memmove(kmap(*++dst_page),
0263                     kmap(*++src_page), l);
0264                 kunmap(*src_page);
0265                 set_page_dirty(*dst_page);
0266                 kunmap(*dst_page);
0267             }
0268         } else {
0269             void *src_ptr, *dst_ptr;
0270 
0271             do {
0272                 src_ptr = kmap(*src_page) + src;
0273                 dst_ptr = kmap(*dst_page) + dst;
0274                 if (PAGE_SIZE - src <
0275                         PAGE_SIZE - dst) {
0276                     l = PAGE_SIZE - src;
0277                     src = 0;
0278                     dst += l;
0279                 } else {
0280                     l = PAGE_SIZE - dst;
0281                     src += l;
0282                     dst = 0;
0283                 }
0284                 l = min(len, l);
0285                 memmove(dst_ptr, src_ptr, l);
0286                 kunmap(*src_page);
0287                 set_page_dirty(*dst_page);
0288                 kunmap(*dst_page);
0289                 if (!dst)
0290                     dst_page++;
0291                 else
0292                     src_page++;
0293             } while ((len -= l));
0294         }
0295     }
0296 }
0297 
0298 void hfs_bnode_dump(struct hfs_bnode *node)
0299 {
0300     struct hfs_bnode_desc desc;
0301     __be32 cnid;
0302     int i, off, key_off;
0303 
0304     hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
0305     hfs_bnode_read(node, &desc, 0, sizeof(desc));
0306     hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
0307         be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
0308         desc.type, desc.height, be16_to_cpu(desc.num_recs));
0309 
0310     off = node->tree->node_size - 2;
0311     for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
0312         key_off = hfs_bnode_read_u16(node, off);
0313         hfs_dbg(BNODE_MOD, " %d", key_off);
0314         if (i && node->type == HFS_NODE_INDEX) {
0315             int tmp;
0316 
0317             if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
0318                     node->tree->cnid == HFSPLUS_ATTR_CNID)
0319                 tmp = hfs_bnode_read_u16(node, key_off) + 2;
0320             else
0321                 tmp = node->tree->max_key_len + 2;
0322             hfs_dbg_cont(BNODE_MOD, " (%d", tmp);
0323             hfs_bnode_read(node, &cnid, key_off + tmp, 4);
0324             hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
0325         } else if (i && node->type == HFS_NODE_LEAF) {
0326             int tmp;
0327 
0328             tmp = hfs_bnode_read_u16(node, key_off);
0329             hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
0330         }
0331     }
0332     hfs_dbg_cont(BNODE_MOD, "\n");
0333 }
0334 
0335 void hfs_bnode_unlink(struct hfs_bnode *node)
0336 {
0337     struct hfs_btree *tree;
0338     struct hfs_bnode *tmp;
0339     __be32 cnid;
0340 
0341     tree = node->tree;
0342     if (node->prev) {
0343         tmp = hfs_bnode_find(tree, node->prev);
0344         if (IS_ERR(tmp))
0345             return;
0346         tmp->next = node->next;
0347         cnid = cpu_to_be32(tmp->next);
0348         hfs_bnode_write(tmp, &cnid,
0349             offsetof(struct hfs_bnode_desc, next), 4);
0350         hfs_bnode_put(tmp);
0351     } else if (node->type == HFS_NODE_LEAF)
0352         tree->leaf_head = node->next;
0353 
0354     if (node->next) {
0355         tmp = hfs_bnode_find(tree, node->next);
0356         if (IS_ERR(tmp))
0357             return;
0358         tmp->prev = node->prev;
0359         cnid = cpu_to_be32(tmp->prev);
0360         hfs_bnode_write(tmp, &cnid,
0361             offsetof(struct hfs_bnode_desc, prev), 4);
0362         hfs_bnode_put(tmp);
0363     } else if (node->type == HFS_NODE_LEAF)
0364         tree->leaf_tail = node->prev;
0365 
0366     /* move down? */
0367     if (!node->prev && !node->next)
0368         hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n");
0369     if (!node->parent) {
0370         tree->root = 0;
0371         tree->depth = 0;
0372     }
0373     set_bit(HFS_BNODE_DELETED, &node->flags);
0374 }
0375 
0376 static inline int hfs_bnode_hash(u32 num)
0377 {
0378     num = (num >> 16) + num;
0379     num += num >> 8;
0380     return num & (NODE_HASH_SIZE - 1);
0381 }
0382 
0383 struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
0384 {
0385     struct hfs_bnode *node;
0386 
0387     if (cnid >= tree->node_count) {
0388         pr_err("request for non-existent node %d in B*Tree\n",
0389                cnid);
0390         return NULL;
0391     }
0392 
0393     for (node = tree->node_hash[hfs_bnode_hash(cnid)];
0394             node; node = node->next_hash)
0395         if (node->this == cnid)
0396             return node;
0397     return NULL;
0398 }
0399 
0400 static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
0401 {
0402     struct hfs_bnode *node, *node2;
0403     struct address_space *mapping;
0404     struct page *page;
0405     int size, block, i, hash;
0406     loff_t off;
0407 
0408     if (cnid >= tree->node_count) {
0409         pr_err("request for non-existent node %d in B*Tree\n",
0410                cnid);
0411         return NULL;
0412     }
0413 
0414     size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
0415         sizeof(struct page *);
0416     node = kzalloc(size, GFP_KERNEL);
0417     if (!node)
0418         return NULL;
0419     node->tree = tree;
0420     node->this = cnid;
0421     set_bit(HFS_BNODE_NEW, &node->flags);
0422     atomic_set(&node->refcnt, 1);
0423     hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
0424         node->tree->cnid, node->this);
0425     init_waitqueue_head(&node->lock_wq);
0426     spin_lock(&tree->hash_lock);
0427     node2 = hfs_bnode_findhash(tree, cnid);
0428     if (!node2) {
0429         hash = hfs_bnode_hash(cnid);
0430         node->next_hash = tree->node_hash[hash];
0431         tree->node_hash[hash] = node;
0432         tree->node_hash_cnt++;
0433     } else {
0434         spin_unlock(&tree->hash_lock);
0435         kfree(node);
0436         wait_event(node2->lock_wq,
0437             !test_bit(HFS_BNODE_NEW, &node2->flags));
0438         return node2;
0439     }
0440     spin_unlock(&tree->hash_lock);
0441 
0442     mapping = tree->inode->i_mapping;
0443     off = (loff_t)cnid << tree->node_size_shift;
0444     block = off >> PAGE_SHIFT;
0445     node->page_offset = off & ~PAGE_MASK;
0446     for (i = 0; i < tree->pages_per_bnode; block++, i++) {
0447         page = read_mapping_page(mapping, block, NULL);
0448         if (IS_ERR(page))
0449             goto fail;
0450         node->page[i] = page;
0451     }
0452 
0453     return node;
0454 fail:
0455     set_bit(HFS_BNODE_ERROR, &node->flags);
0456     return node;
0457 }
0458 
0459 void hfs_bnode_unhash(struct hfs_bnode *node)
0460 {
0461     struct hfs_bnode **p;
0462 
0463     hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
0464         node->tree->cnid, node->this, atomic_read(&node->refcnt));
0465     for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
0466          *p && *p != node; p = &(*p)->next_hash)
0467         ;
0468     BUG_ON(!*p);
0469     *p = node->next_hash;
0470     node->tree->node_hash_cnt--;
0471 }
0472 
0473 /* Load a particular node out of a tree */
0474 struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
0475 {
0476     struct hfs_bnode *node;
0477     struct hfs_bnode_desc *desc;
0478     int i, rec_off, off, next_off;
0479     int entry_size, key_size;
0480 
0481     spin_lock(&tree->hash_lock);
0482     node = hfs_bnode_findhash(tree, num);
0483     if (node) {
0484         hfs_bnode_get(node);
0485         spin_unlock(&tree->hash_lock);
0486         wait_event(node->lock_wq,
0487             !test_bit(HFS_BNODE_NEW, &node->flags));
0488         if (test_bit(HFS_BNODE_ERROR, &node->flags))
0489             goto node_error;
0490         return node;
0491     }
0492     spin_unlock(&tree->hash_lock);
0493     node = __hfs_bnode_create(tree, num);
0494     if (!node)
0495         return ERR_PTR(-ENOMEM);
0496     if (test_bit(HFS_BNODE_ERROR, &node->flags))
0497         goto node_error;
0498     if (!test_bit(HFS_BNODE_NEW, &node->flags))
0499         return node;
0500 
0501     desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) +
0502             node->page_offset);
0503     node->prev = be32_to_cpu(desc->prev);
0504     node->next = be32_to_cpu(desc->next);
0505     node->num_recs = be16_to_cpu(desc->num_recs);
0506     node->type = desc->type;
0507     node->height = desc->height;
0508     kunmap(node->page[0]);
0509 
0510     switch (node->type) {
0511     case HFS_NODE_HEADER:
0512     case HFS_NODE_MAP:
0513         if (node->height != 0)
0514             goto node_error;
0515         break;
0516     case HFS_NODE_LEAF:
0517         if (node->height != 1)
0518             goto node_error;
0519         break;
0520     case HFS_NODE_INDEX:
0521         if (node->height <= 1 || node->height > tree->depth)
0522             goto node_error;
0523         break;
0524     default:
0525         goto node_error;
0526     }
0527 
0528     rec_off = tree->node_size - 2;
0529     off = hfs_bnode_read_u16(node, rec_off);
0530     if (off != sizeof(struct hfs_bnode_desc))
0531         goto node_error;
0532     for (i = 1; i <= node->num_recs; off = next_off, i++) {
0533         rec_off -= 2;
0534         next_off = hfs_bnode_read_u16(node, rec_off);
0535         if (next_off <= off ||
0536             next_off > tree->node_size ||
0537             next_off & 1)
0538             goto node_error;
0539         entry_size = next_off - off;
0540         if (node->type != HFS_NODE_INDEX &&
0541             node->type != HFS_NODE_LEAF)
0542             continue;
0543         key_size = hfs_bnode_read_u16(node, off) + 2;
0544         if (key_size >= entry_size || key_size & 1)
0545             goto node_error;
0546     }
0547     clear_bit(HFS_BNODE_NEW, &node->flags);
0548     wake_up(&node->lock_wq);
0549     return node;
0550 
0551 node_error:
0552     set_bit(HFS_BNODE_ERROR, &node->flags);
0553     clear_bit(HFS_BNODE_NEW, &node->flags);
0554     wake_up(&node->lock_wq);
0555     hfs_bnode_put(node);
0556     return ERR_PTR(-EIO);
0557 }
0558 
0559 void hfs_bnode_free(struct hfs_bnode *node)
0560 {
0561     int i;
0562 
0563     for (i = 0; i < node->tree->pages_per_bnode; i++)
0564         if (node->page[i])
0565             put_page(node->page[i]);
0566     kfree(node);
0567 }
0568 
0569 struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
0570 {
0571     struct hfs_bnode *node;
0572     struct page **pagep;
0573     int i;
0574 
0575     spin_lock(&tree->hash_lock);
0576     node = hfs_bnode_findhash(tree, num);
0577     spin_unlock(&tree->hash_lock);
0578     if (node) {
0579         pr_crit("new node %u already hashed?\n", num);
0580         WARN_ON(1);
0581         return node;
0582     }
0583     node = __hfs_bnode_create(tree, num);
0584     if (!node)
0585         return ERR_PTR(-ENOMEM);
0586     if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
0587         hfs_bnode_put(node);
0588         return ERR_PTR(-EIO);
0589     }
0590 
0591     pagep = node->page;
0592     memset(kmap(*pagep) + node->page_offset, 0,
0593            min_t(int, PAGE_SIZE, tree->node_size));
0594     set_page_dirty(*pagep);
0595     kunmap(*pagep);
0596     for (i = 1; i < tree->pages_per_bnode; i++) {
0597         memset(kmap(*++pagep), 0, PAGE_SIZE);
0598         set_page_dirty(*pagep);
0599         kunmap(*pagep);
0600     }
0601     clear_bit(HFS_BNODE_NEW, &node->flags);
0602     wake_up(&node->lock_wq);
0603 
0604     return node;
0605 }
0606 
0607 void hfs_bnode_get(struct hfs_bnode *node)
0608 {
0609     if (node) {
0610         atomic_inc(&node->refcnt);
0611         hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
0612             node->tree->cnid, node->this,
0613             atomic_read(&node->refcnt));
0614     }
0615 }
0616 
0617 /* Dispose of resources used by a node */
0618 void hfs_bnode_put(struct hfs_bnode *node)
0619 {
0620     if (node) {
0621         struct hfs_btree *tree = node->tree;
0622         int i;
0623 
0624         hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
0625             node->tree->cnid, node->this,
0626             atomic_read(&node->refcnt));
0627         BUG_ON(!atomic_read(&node->refcnt));
0628         if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
0629             return;
0630         for (i = 0; i < tree->pages_per_bnode; i++) {
0631             if (!node->page[i])
0632                 continue;
0633             mark_page_accessed(node->page[i]);
0634         }
0635 
0636         if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
0637             hfs_bnode_unhash(node);
0638             spin_unlock(&tree->hash_lock);
0639             if (hfs_bnode_need_zeroout(tree))
0640                 hfs_bnode_clear(node, 0, tree->node_size);
0641             hfs_bmap_free(node);
0642             hfs_bnode_free(node);
0643             return;
0644         }
0645         spin_unlock(&tree->hash_lock);
0646     }
0647 }
0648 
0649 /*
0650  * Unused nodes have to be zeroed if this is the catalog tree and
0651  * a corresponding flag in the volume header is set.
0652  */
0653 bool hfs_bnode_need_zeroout(struct hfs_btree *tree)
0654 {
0655     struct super_block *sb = tree->inode->i_sb;
0656     struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
0657     const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes);
0658 
0659     return tree->cnid == HFSPLUS_CAT_CNID &&
0660         volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
0661 }