Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *
0004  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
0005  *
0006  * This code builds two trees of free clusters extents.
0007  * Trees are sorted by start of extent and by length of extent.
0008  * NTFS_MAX_WND_EXTENTS defines the maximum number of elements in trees.
0009  * In extreme case code reads on-disk bitmap to find free clusters.
0010  *
0011  */
0012 
0013 #include <linux/buffer_head.h>
0014 #include <linux/fs.h>
0015 #include <linux/kernel.h>
0016 
0017 #include "ntfs.h"
0018 #include "ntfs_fs.h"
0019 
0020 /*
0021  * Maximum number of extents in tree.
0022  */
0023 #define NTFS_MAX_WND_EXTENTS (32u * 1024u)
0024 
0025 struct rb_node_key {
0026     struct rb_node node;
0027     size_t key;
0028 };
0029 
0030 struct e_node {
0031     struct rb_node_key start; /* Tree sorted by start. */
0032     struct rb_node_key count; /* Tree sorted by len. */
0033 };
0034 
0035 static int wnd_rescan(struct wnd_bitmap *wnd);
0036 static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw);
0037 static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits);
0038 
0039 static struct kmem_cache *ntfs_enode_cachep;
0040 
0041 int __init ntfs3_init_bitmap(void)
0042 {
0043     ntfs_enode_cachep =
0044         kmem_cache_create("ntfs3_enode_cache", sizeof(struct e_node), 0,
0045                   SLAB_RECLAIM_ACCOUNT, NULL);
0046     return ntfs_enode_cachep ? 0 : -ENOMEM;
0047 }
0048 
0049 void ntfs3_exit_bitmap(void)
0050 {
0051     kmem_cache_destroy(ntfs_enode_cachep);
0052 }
0053 
0054 /*
0055  * wnd_scan
0056  *
0057  * b_pos + b_len - biggest fragment.
0058  * Scan range [wpos wbits) window @buf.
0059  *
0060  * Return: -1 if not found.
0061  */
0062 static size_t wnd_scan(const ulong *buf, size_t wbit, u32 wpos, u32 wend,
0063                size_t to_alloc, size_t *prev_tail, size_t *b_pos,
0064                size_t *b_len)
0065 {
0066     while (wpos < wend) {
0067         size_t free_len;
0068         u32 free_bits, end;
0069         u32 used = find_next_zero_bit(buf, wend, wpos);
0070 
0071         if (used >= wend) {
0072             if (*b_len < *prev_tail) {
0073                 *b_pos = wbit - *prev_tail;
0074                 *b_len = *prev_tail;
0075             }
0076 
0077             *prev_tail = 0;
0078             return -1;
0079         }
0080 
0081         if (used > wpos) {
0082             wpos = used;
0083             if (*b_len < *prev_tail) {
0084                 *b_pos = wbit - *prev_tail;
0085                 *b_len = *prev_tail;
0086             }
0087 
0088             *prev_tail = 0;
0089         }
0090 
0091         /*
0092          * Now we have a fragment [wpos, wend) staring with 0.
0093          */
0094         end = wpos + to_alloc - *prev_tail;
0095         free_bits = find_next_bit(buf, min(end, wend), wpos);
0096 
0097         free_len = *prev_tail + free_bits - wpos;
0098 
0099         if (*b_len < free_len) {
0100             *b_pos = wbit + wpos - *prev_tail;
0101             *b_len = free_len;
0102         }
0103 
0104         if (free_len >= to_alloc)
0105             return wbit + wpos - *prev_tail;
0106 
0107         if (free_bits >= wend) {
0108             *prev_tail += free_bits - wpos;
0109             return -1;
0110         }
0111 
0112         wpos = free_bits + 1;
0113 
0114         *prev_tail = 0;
0115     }
0116 
0117     return -1;
0118 }
0119 
0120 /*
0121  * wnd_close - Frees all resources.
0122  */
0123 void wnd_close(struct wnd_bitmap *wnd)
0124 {
0125     struct rb_node *node, *next;
0126 
0127     kfree(wnd->free_bits);
0128     run_close(&wnd->run);
0129 
0130     node = rb_first(&wnd->start_tree);
0131 
0132     while (node) {
0133         next = rb_next(node);
0134         rb_erase(node, &wnd->start_tree);
0135         kmem_cache_free(ntfs_enode_cachep,
0136                 rb_entry(node, struct e_node, start.node));
0137         node = next;
0138     }
0139 }
0140 
0141 static struct rb_node *rb_lookup(struct rb_root *root, size_t v)
0142 {
0143     struct rb_node **p = &root->rb_node;
0144     struct rb_node *r = NULL;
0145 
0146     while (*p) {
0147         struct rb_node_key *k;
0148 
0149         k = rb_entry(*p, struct rb_node_key, node);
0150         if (v < k->key) {
0151             p = &(*p)->rb_left;
0152         } else if (v > k->key) {
0153             r = &k->node;
0154             p = &(*p)->rb_right;
0155         } else {
0156             return &k->node;
0157         }
0158     }
0159 
0160     return r;
0161 }
0162 
0163 /*
0164  * rb_insert_count - Helper function to insert special kind of 'count' tree.
0165  */
0166 static inline bool rb_insert_count(struct rb_root *root, struct e_node *e)
0167 {
0168     struct rb_node **p = &root->rb_node;
0169     struct rb_node *parent = NULL;
0170     size_t e_ckey = e->count.key;
0171     size_t e_skey = e->start.key;
0172 
0173     while (*p) {
0174         struct e_node *k =
0175             rb_entry(parent = *p, struct e_node, count.node);
0176 
0177         if (e_ckey > k->count.key) {
0178             p = &(*p)->rb_left;
0179         } else if (e_ckey < k->count.key) {
0180             p = &(*p)->rb_right;
0181         } else if (e_skey < k->start.key) {
0182             p = &(*p)->rb_left;
0183         } else if (e_skey > k->start.key) {
0184             p = &(*p)->rb_right;
0185         } else {
0186             WARN_ON(1);
0187             return false;
0188         }
0189     }
0190 
0191     rb_link_node(&e->count.node, parent, p);
0192     rb_insert_color(&e->count.node, root);
0193     return true;
0194 }
0195 
0196 /*
0197  * rb_insert_start - Helper function to insert special kind of 'count' tree.
0198  */
0199 static inline bool rb_insert_start(struct rb_root *root, struct e_node *e)
0200 {
0201     struct rb_node **p = &root->rb_node;
0202     struct rb_node *parent = NULL;
0203     size_t e_skey = e->start.key;
0204 
0205     while (*p) {
0206         struct e_node *k;
0207 
0208         parent = *p;
0209 
0210         k = rb_entry(parent, struct e_node, start.node);
0211         if (e_skey < k->start.key) {
0212             p = &(*p)->rb_left;
0213         } else if (e_skey > k->start.key) {
0214             p = &(*p)->rb_right;
0215         } else {
0216             WARN_ON(1);
0217             return false;
0218         }
0219     }
0220 
0221     rb_link_node(&e->start.node, parent, p);
0222     rb_insert_color(&e->start.node, root);
0223     return true;
0224 }
0225 
0226 /*
0227  * wnd_add_free_ext - Adds a new extent of free space.
0228  * @build:  1 when building tree.
0229  */
0230 static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
0231                  bool build)
0232 {
0233     struct e_node *e, *e0 = NULL;
0234     size_t ib, end_in = bit + len;
0235     struct rb_node *n;
0236 
0237     if (build) {
0238         /* Use extent_min to filter too short extents. */
0239         if (wnd->count >= NTFS_MAX_WND_EXTENTS &&
0240             len <= wnd->extent_min) {
0241             wnd->uptodated = -1;
0242             return;
0243         }
0244     } else {
0245         /* Try to find extent before 'bit'. */
0246         n = rb_lookup(&wnd->start_tree, bit);
0247 
0248         if (!n) {
0249             n = rb_first(&wnd->start_tree);
0250         } else {
0251             e = rb_entry(n, struct e_node, start.node);
0252             n = rb_next(n);
0253             if (e->start.key + e->count.key == bit) {
0254                 /* Remove left. */
0255                 bit = e->start.key;
0256                 len += e->count.key;
0257                 rb_erase(&e->start.node, &wnd->start_tree);
0258                 rb_erase(&e->count.node, &wnd->count_tree);
0259                 wnd->count -= 1;
0260                 e0 = e;
0261             }
0262         }
0263 
0264         while (n) {
0265             size_t next_end;
0266 
0267             e = rb_entry(n, struct e_node, start.node);
0268             next_end = e->start.key + e->count.key;
0269             if (e->start.key > end_in)
0270                 break;
0271 
0272             /* Remove right. */
0273             n = rb_next(n);
0274             len += next_end - end_in;
0275             end_in = next_end;
0276             rb_erase(&e->start.node, &wnd->start_tree);
0277             rb_erase(&e->count.node, &wnd->count_tree);
0278             wnd->count -= 1;
0279 
0280             if (!e0)
0281                 e0 = e;
0282             else
0283                 kmem_cache_free(ntfs_enode_cachep, e);
0284         }
0285 
0286         if (wnd->uptodated != 1) {
0287             /* Check bits before 'bit'. */
0288             ib = wnd->zone_bit == wnd->zone_end ||
0289                          bit < wnd->zone_end
0290                      ? 0
0291                      : wnd->zone_end;
0292 
0293             while (bit > ib && wnd_is_free_hlp(wnd, bit - 1, 1)) {
0294                 bit -= 1;
0295                 len += 1;
0296             }
0297 
0298             /* Check bits after 'end_in'. */
0299             ib = wnd->zone_bit == wnd->zone_end ||
0300                          end_in > wnd->zone_bit
0301                      ? wnd->nbits
0302                      : wnd->zone_bit;
0303 
0304             while (end_in < ib && wnd_is_free_hlp(wnd, end_in, 1)) {
0305                 end_in += 1;
0306                 len += 1;
0307             }
0308         }
0309     }
0310     /* Insert new fragment. */
0311     if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
0312         if (e0)
0313             kmem_cache_free(ntfs_enode_cachep, e0);
0314 
0315         wnd->uptodated = -1;
0316 
0317         /* Compare with smallest fragment. */
0318         n = rb_last(&wnd->count_tree);
0319         e = rb_entry(n, struct e_node, count.node);
0320         if (len <= e->count.key)
0321             goto out; /* Do not insert small fragments. */
0322 
0323         if (build) {
0324             struct e_node *e2;
0325 
0326             n = rb_prev(n);
0327             e2 = rb_entry(n, struct e_node, count.node);
0328             /* Smallest fragment will be 'e2->count.key'. */
0329             wnd->extent_min = e2->count.key;
0330         }
0331 
0332         /* Replace smallest fragment by new one. */
0333         rb_erase(&e->start.node, &wnd->start_tree);
0334         rb_erase(&e->count.node, &wnd->count_tree);
0335         wnd->count -= 1;
0336     } else {
0337         e = e0 ? e0 : kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
0338         if (!e) {
0339             wnd->uptodated = -1;
0340             goto out;
0341         }
0342 
0343         if (build && len <= wnd->extent_min)
0344             wnd->extent_min = len;
0345     }
0346     e->start.key = bit;
0347     e->count.key = len;
0348     if (len > wnd->extent_max)
0349         wnd->extent_max = len;
0350 
0351     rb_insert_start(&wnd->start_tree, e);
0352     rb_insert_count(&wnd->count_tree, e);
0353     wnd->count += 1;
0354 
0355 out:;
0356 }
0357 
0358 /*
0359  * wnd_remove_free_ext - Remove a run from the cached free space.
0360  */
0361 static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
0362 {
0363     struct rb_node *n, *n3;
0364     struct e_node *e, *e3;
0365     size_t end_in = bit + len;
0366     size_t end3, end, new_key, new_len, max_new_len;
0367 
0368     /* Try to find extent before 'bit'. */
0369     n = rb_lookup(&wnd->start_tree, bit);
0370 
0371     if (!n)
0372         return;
0373 
0374     e = rb_entry(n, struct e_node, start.node);
0375     end = e->start.key + e->count.key;
0376 
0377     new_key = new_len = 0;
0378     len = e->count.key;
0379 
0380     /* Range [bit,end_in) must be inside 'e' or outside 'e' and 'n'. */
0381     if (e->start.key > bit)
0382         ;
0383     else if (end_in <= end) {
0384         /* Range [bit,end_in) inside 'e'. */
0385         new_key = end_in;
0386         new_len = end - end_in;
0387         len = bit - e->start.key;
0388     } else if (bit > end) {
0389         bool bmax = false;
0390 
0391         n3 = rb_next(n);
0392 
0393         while (n3) {
0394             e3 = rb_entry(n3, struct e_node, start.node);
0395             if (e3->start.key >= end_in)
0396                 break;
0397 
0398             if (e3->count.key == wnd->extent_max)
0399                 bmax = true;
0400 
0401             end3 = e3->start.key + e3->count.key;
0402             if (end3 > end_in) {
0403                 e3->start.key = end_in;
0404                 rb_erase(&e3->count.node, &wnd->count_tree);
0405                 e3->count.key = end3 - end_in;
0406                 rb_insert_count(&wnd->count_tree, e3);
0407                 break;
0408             }
0409 
0410             n3 = rb_next(n3);
0411             rb_erase(&e3->start.node, &wnd->start_tree);
0412             rb_erase(&e3->count.node, &wnd->count_tree);
0413             wnd->count -= 1;
0414             kmem_cache_free(ntfs_enode_cachep, e3);
0415         }
0416         if (!bmax)
0417             return;
0418         n3 = rb_first(&wnd->count_tree);
0419         wnd->extent_max =
0420             n3 ? rb_entry(n3, struct e_node, count.node)->count.key
0421                : 0;
0422         return;
0423     }
0424 
0425     if (e->count.key != wnd->extent_max) {
0426         ;
0427     } else if (rb_prev(&e->count.node)) {
0428         ;
0429     } else {
0430         n3 = rb_next(&e->count.node);
0431         max_new_len = max(len, new_len);
0432         if (!n3) {
0433             wnd->extent_max = max_new_len;
0434         } else {
0435             e3 = rb_entry(n3, struct e_node, count.node);
0436             wnd->extent_max = max(e3->count.key, max_new_len);
0437         }
0438     }
0439 
0440     if (!len) {
0441         if (new_len) {
0442             e->start.key = new_key;
0443             rb_erase(&e->count.node, &wnd->count_tree);
0444             e->count.key = new_len;
0445             rb_insert_count(&wnd->count_tree, e);
0446         } else {
0447             rb_erase(&e->start.node, &wnd->start_tree);
0448             rb_erase(&e->count.node, &wnd->count_tree);
0449             wnd->count -= 1;
0450             kmem_cache_free(ntfs_enode_cachep, e);
0451         }
0452         goto out;
0453     }
0454     rb_erase(&e->count.node, &wnd->count_tree);
0455     e->count.key = len;
0456     rb_insert_count(&wnd->count_tree, e);
0457 
0458     if (!new_len)
0459         goto out;
0460 
0461     if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
0462         wnd->uptodated = -1;
0463 
0464         /* Get minimal extent. */
0465         e = rb_entry(rb_last(&wnd->count_tree), struct e_node,
0466                  count.node);
0467         if (e->count.key > new_len)
0468             goto out;
0469 
0470         /* Replace minimum. */
0471         rb_erase(&e->start.node, &wnd->start_tree);
0472         rb_erase(&e->count.node, &wnd->count_tree);
0473         wnd->count -= 1;
0474     } else {
0475         e = kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
0476         if (!e)
0477             wnd->uptodated = -1;
0478     }
0479 
0480     if (e) {
0481         e->start.key = new_key;
0482         e->count.key = new_len;
0483         rb_insert_start(&wnd->start_tree, e);
0484         rb_insert_count(&wnd->count_tree, e);
0485         wnd->count += 1;
0486     }
0487 
0488 out:
0489     if (!wnd->count && 1 != wnd->uptodated)
0490         wnd_rescan(wnd);
0491 }
0492 
0493 /*
0494  * wnd_rescan - Scan all bitmap. Used while initialization.
0495  */
0496 static int wnd_rescan(struct wnd_bitmap *wnd)
0497 {
0498     int err = 0;
0499     size_t prev_tail = 0;
0500     struct super_block *sb = wnd->sb;
0501     struct ntfs_sb_info *sbi = sb->s_fs_info;
0502     u64 lbo, len = 0;
0503     u32 blocksize = sb->s_blocksize;
0504     u8 cluster_bits = sbi->cluster_bits;
0505     u32 wbits = 8 * sb->s_blocksize;
0506     u32 used, frb;
0507     const ulong *buf;
0508     size_t wpos, wbit, iw, vbo;
0509     struct buffer_head *bh = NULL;
0510     CLST lcn, clen;
0511 
0512     wnd->uptodated = 0;
0513     wnd->extent_max = 0;
0514     wnd->extent_min = MINUS_ONE_T;
0515     wnd->total_zeroes = 0;
0516 
0517     vbo = 0;
0518 
0519     for (iw = 0; iw < wnd->nwnd; iw++) {
0520         if (iw + 1 == wnd->nwnd)
0521             wbits = wnd->bits_last;
0522 
0523         if (wnd->inited) {
0524             if (!wnd->free_bits[iw]) {
0525                 /* All ones. */
0526                 if (prev_tail) {
0527                     wnd_add_free_ext(wnd,
0528                              vbo * 8 - prev_tail,
0529                              prev_tail, true);
0530                     prev_tail = 0;
0531                 }
0532                 goto next_wnd;
0533             }
0534             if (wbits == wnd->free_bits[iw]) {
0535                 /* All zeroes. */
0536                 prev_tail += wbits;
0537                 wnd->total_zeroes += wbits;
0538                 goto next_wnd;
0539             }
0540         }
0541 
0542         if (!len) {
0543             u32 off = vbo & sbi->cluster_mask;
0544 
0545             if (!run_lookup_entry(&wnd->run, vbo >> cluster_bits,
0546                           &lcn, &clen, NULL)) {
0547                 err = -ENOENT;
0548                 goto out;
0549             }
0550 
0551             lbo = ((u64)lcn << cluster_bits) + off;
0552             len = ((u64)clen << cluster_bits) - off;
0553         }
0554 
0555         bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
0556         if (!bh) {
0557             err = -EIO;
0558             goto out;
0559         }
0560 
0561         buf = (ulong *)bh->b_data;
0562 
0563         used = __bitmap_weight(buf, wbits);
0564         if (used < wbits) {
0565             frb = wbits - used;
0566             wnd->free_bits[iw] = frb;
0567             wnd->total_zeroes += frb;
0568         }
0569 
0570         wpos = 0;
0571         wbit = vbo * 8;
0572 
0573         if (wbit + wbits > wnd->nbits)
0574             wbits = wnd->nbits - wbit;
0575 
0576         do {
0577             used = find_next_zero_bit(buf, wbits, wpos);
0578 
0579             if (used > wpos && prev_tail) {
0580                 wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
0581                          prev_tail, true);
0582                 prev_tail = 0;
0583             }
0584 
0585             wpos = used;
0586 
0587             if (wpos >= wbits) {
0588                 /* No free blocks. */
0589                 prev_tail = 0;
0590                 break;
0591             }
0592 
0593             frb = find_next_bit(buf, wbits, wpos);
0594             if (frb >= wbits) {
0595                 /* Keep last free block. */
0596                 prev_tail += frb - wpos;
0597                 break;
0598             }
0599 
0600             wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
0601                      frb + prev_tail - wpos, true);
0602 
0603             /* Skip free block and first '1'. */
0604             wpos = frb + 1;
0605             /* Reset previous tail. */
0606             prev_tail = 0;
0607         } while (wpos < wbits);
0608 
0609 next_wnd:
0610 
0611         if (bh)
0612             put_bh(bh);
0613         bh = NULL;
0614 
0615         vbo += blocksize;
0616         if (len) {
0617             len -= blocksize;
0618             lbo += blocksize;
0619         }
0620     }
0621 
0622     /* Add last block. */
0623     if (prev_tail)
0624         wnd_add_free_ext(wnd, wnd->nbits - prev_tail, prev_tail, true);
0625 
0626     /*
0627      * Before init cycle wnd->uptodated was 0.
0628      * If any errors or limits occurs while initialization then
0629      * wnd->uptodated will be -1.
0630      * If 'uptodated' is still 0 then Tree is really updated.
0631      */
0632     if (!wnd->uptodated)
0633         wnd->uptodated = 1;
0634 
0635     if (wnd->zone_bit != wnd->zone_end) {
0636         size_t zlen = wnd->zone_end - wnd->zone_bit;
0637 
0638         wnd->zone_end = wnd->zone_bit;
0639         wnd_zone_set(wnd, wnd->zone_bit, zlen);
0640     }
0641 
0642 out:
0643     return err;
0644 }
0645 
0646 int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
0647 {
0648     int err;
0649     u32 blocksize = sb->s_blocksize;
0650     u32 wbits = blocksize * 8;
0651 
0652     init_rwsem(&wnd->rw_lock);
0653 
0654     wnd->sb = sb;
0655     wnd->nbits = nbits;
0656     wnd->total_zeroes = nbits;
0657     wnd->extent_max = MINUS_ONE_T;
0658     wnd->zone_bit = wnd->zone_end = 0;
0659     wnd->nwnd = bytes_to_block(sb, bitmap_size(nbits));
0660     wnd->bits_last = nbits & (wbits - 1);
0661     if (!wnd->bits_last)
0662         wnd->bits_last = wbits;
0663 
0664     wnd->free_bits = kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS);
0665     if (!wnd->free_bits)
0666         return -ENOMEM;
0667 
0668     err = wnd_rescan(wnd);
0669     if (err)
0670         return err;
0671 
0672     wnd->inited = true;
0673 
0674     return 0;
0675 }
0676 
0677 /*
0678  * wnd_map - Call sb_bread for requested window.
0679  */
0680 static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw)
0681 {
0682     size_t vbo;
0683     CLST lcn, clen;
0684     struct super_block *sb = wnd->sb;
0685     struct ntfs_sb_info *sbi;
0686     struct buffer_head *bh;
0687     u64 lbo;
0688 
0689     sbi = sb->s_fs_info;
0690     vbo = (u64)iw << sb->s_blocksize_bits;
0691 
0692     if (!run_lookup_entry(&wnd->run, vbo >> sbi->cluster_bits, &lcn, &clen,
0693                   NULL)) {
0694         return ERR_PTR(-ENOENT);
0695     }
0696 
0697     lbo = ((u64)lcn << sbi->cluster_bits) + (vbo & sbi->cluster_mask);
0698 
0699     bh = ntfs_bread(wnd->sb, lbo >> sb->s_blocksize_bits);
0700     if (!bh)
0701         return ERR_PTR(-EIO);
0702 
0703     return bh;
0704 }
0705 
0706 /*
0707  * wnd_set_free - Mark the bits range from bit to bit + bits as free.
0708  */
0709 int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
0710 {
0711     int err = 0;
0712     struct super_block *sb = wnd->sb;
0713     size_t bits0 = bits;
0714     u32 wbits = 8 * sb->s_blocksize;
0715     size_t iw = bit >> (sb->s_blocksize_bits + 3);
0716     u32 wbit = bit & (wbits - 1);
0717     struct buffer_head *bh;
0718 
0719     while (iw < wnd->nwnd && bits) {
0720         u32 tail, op;
0721         ulong *buf;
0722 
0723         if (iw + 1 == wnd->nwnd)
0724             wbits = wnd->bits_last;
0725 
0726         tail = wbits - wbit;
0727         op = min_t(u32, tail, bits);
0728 
0729         bh = wnd_map(wnd, iw);
0730         if (IS_ERR(bh)) {
0731             err = PTR_ERR(bh);
0732             break;
0733         }
0734 
0735         buf = (ulong *)bh->b_data;
0736 
0737         lock_buffer(bh);
0738 
0739         __bitmap_clear(buf, wbit, op);
0740 
0741         wnd->free_bits[iw] += op;
0742 
0743         set_buffer_uptodate(bh);
0744         mark_buffer_dirty(bh);
0745         unlock_buffer(bh);
0746         put_bh(bh);
0747 
0748         wnd->total_zeroes += op;
0749         bits -= op;
0750         wbit = 0;
0751         iw += 1;
0752     }
0753 
0754     wnd_add_free_ext(wnd, bit, bits0, false);
0755 
0756     return err;
0757 }
0758 
0759 /*
0760  * wnd_set_used - Mark the bits range from bit to bit + bits as used.
0761  */
0762 int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
0763 {
0764     int err = 0;
0765     struct super_block *sb = wnd->sb;
0766     size_t bits0 = bits;
0767     size_t iw = bit >> (sb->s_blocksize_bits + 3);
0768     u32 wbits = 8 * sb->s_blocksize;
0769     u32 wbit = bit & (wbits - 1);
0770     struct buffer_head *bh;
0771 
0772     while (iw < wnd->nwnd && bits) {
0773         u32 tail, op;
0774         ulong *buf;
0775 
0776         if (unlikely(iw + 1 == wnd->nwnd))
0777             wbits = wnd->bits_last;
0778 
0779         tail = wbits - wbit;
0780         op = min_t(u32, tail, bits);
0781 
0782         bh = wnd_map(wnd, iw);
0783         if (IS_ERR(bh)) {
0784             err = PTR_ERR(bh);
0785             break;
0786         }
0787         buf = (ulong *)bh->b_data;
0788 
0789         lock_buffer(bh);
0790 
0791         __bitmap_set(buf, wbit, op);
0792         wnd->free_bits[iw] -= op;
0793 
0794         set_buffer_uptodate(bh);
0795         mark_buffer_dirty(bh);
0796         unlock_buffer(bh);
0797         put_bh(bh);
0798 
0799         wnd->total_zeroes -= op;
0800         bits -= op;
0801         wbit = 0;
0802         iw += 1;
0803     }
0804 
0805     if (!RB_EMPTY_ROOT(&wnd->start_tree))
0806         wnd_remove_free_ext(wnd, bit, bits0);
0807 
0808     return err;
0809 }
0810 
0811 /*
0812  * wnd_is_free_hlp
0813  *
0814  * Return: True if all clusters [bit, bit+bits) are free (bitmap only).
0815  */
0816 static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits)
0817 {
0818     struct super_block *sb = wnd->sb;
0819     size_t iw = bit >> (sb->s_blocksize_bits + 3);
0820     u32 wbits = 8 * sb->s_blocksize;
0821     u32 wbit = bit & (wbits - 1);
0822 
0823     while (iw < wnd->nwnd && bits) {
0824         u32 tail, op;
0825 
0826         if (unlikely(iw + 1 == wnd->nwnd))
0827             wbits = wnd->bits_last;
0828 
0829         tail = wbits - wbit;
0830         op = min_t(u32, tail, bits);
0831 
0832         if (wbits != wnd->free_bits[iw]) {
0833             bool ret;
0834             struct buffer_head *bh = wnd_map(wnd, iw);
0835 
0836             if (IS_ERR(bh))
0837                 return false;
0838 
0839             ret = are_bits_clear((ulong *)bh->b_data, wbit, op);
0840 
0841             put_bh(bh);
0842             if (!ret)
0843                 return false;
0844         }
0845 
0846         bits -= op;
0847         wbit = 0;
0848         iw += 1;
0849     }
0850 
0851     return true;
0852 }
0853 
0854 /*
0855  * wnd_is_free
0856  *
0857  * Return: True if all clusters [bit, bit+bits) are free.
0858  */
0859 bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
0860 {
0861     bool ret;
0862     struct rb_node *n;
0863     size_t end;
0864     struct e_node *e;
0865 
0866     if (RB_EMPTY_ROOT(&wnd->start_tree))
0867         goto use_wnd;
0868 
0869     n = rb_lookup(&wnd->start_tree, bit);
0870     if (!n)
0871         goto use_wnd;
0872 
0873     e = rb_entry(n, struct e_node, start.node);
0874 
0875     end = e->start.key + e->count.key;
0876 
0877     if (bit < end && bit + bits <= end)
0878         return true;
0879 
0880 use_wnd:
0881     ret = wnd_is_free_hlp(wnd, bit, bits);
0882 
0883     return ret;
0884 }
0885 
0886 /*
0887  * wnd_is_used
0888  *
0889  * Return: True if all clusters [bit, bit+bits) are used.
0890  */
0891 bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
0892 {
0893     bool ret = false;
0894     struct super_block *sb = wnd->sb;
0895     size_t iw = bit >> (sb->s_blocksize_bits + 3);
0896     u32 wbits = 8 * sb->s_blocksize;
0897     u32 wbit = bit & (wbits - 1);
0898     size_t end;
0899     struct rb_node *n;
0900     struct e_node *e;
0901 
0902     if (RB_EMPTY_ROOT(&wnd->start_tree))
0903         goto use_wnd;
0904 
0905     end = bit + bits;
0906     n = rb_lookup(&wnd->start_tree, end - 1);
0907     if (!n)
0908         goto use_wnd;
0909 
0910     e = rb_entry(n, struct e_node, start.node);
0911     if (e->start.key + e->count.key > bit)
0912         return false;
0913 
0914 use_wnd:
0915     while (iw < wnd->nwnd && bits) {
0916         u32 tail, op;
0917 
0918         if (unlikely(iw + 1 == wnd->nwnd))
0919             wbits = wnd->bits_last;
0920 
0921         tail = wbits - wbit;
0922         op = min_t(u32, tail, bits);
0923 
0924         if (wnd->free_bits[iw]) {
0925             bool ret;
0926             struct buffer_head *bh = wnd_map(wnd, iw);
0927 
0928             if (IS_ERR(bh))
0929                 goto out;
0930 
0931             ret = are_bits_set((ulong *)bh->b_data, wbit, op);
0932             put_bh(bh);
0933             if (!ret)
0934                 goto out;
0935         }
0936 
0937         bits -= op;
0938         wbit = 0;
0939         iw += 1;
0940     }
0941     ret = true;
0942 
0943 out:
0944     return ret;
0945 }
0946 
0947 /*
0948  * wnd_find - Look for free space.
0949  *
0950  * - flags - BITMAP_FIND_XXX flags
0951  *
0952  * Return: 0 if not found.
0953  */
0954 size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
0955         size_t flags, size_t *allocated)
0956 {
0957     struct super_block *sb;
0958     u32 wbits, wpos, wzbit, wzend;
0959     size_t fnd, max_alloc, b_len, b_pos;
0960     size_t iw, prev_tail, nwnd, wbit, ebit, zbit, zend;
0961     size_t to_alloc0 = to_alloc;
0962     const ulong *buf;
0963     const struct e_node *e;
0964     const struct rb_node *pr, *cr;
0965     u8 log2_bits;
0966     bool fbits_valid;
0967     struct buffer_head *bh;
0968 
0969     /* Fast checking for available free space. */
0970     if (flags & BITMAP_FIND_FULL) {
0971         size_t zeroes = wnd_zeroes(wnd);
0972 
0973         zeroes -= wnd->zone_end - wnd->zone_bit;
0974         if (zeroes < to_alloc0)
0975             goto no_space;
0976 
0977         if (to_alloc0 > wnd->extent_max)
0978             goto no_space;
0979     } else {
0980         if (to_alloc > wnd->extent_max)
0981             to_alloc = wnd->extent_max;
0982     }
0983 
0984     if (wnd->zone_bit <= hint && hint < wnd->zone_end)
0985         hint = wnd->zone_end;
0986 
0987     max_alloc = wnd->nbits;
0988     b_len = b_pos = 0;
0989 
0990     if (hint >= max_alloc)
0991         hint = 0;
0992 
0993     if (RB_EMPTY_ROOT(&wnd->start_tree)) {
0994         if (wnd->uptodated == 1) {
0995             /* Extents tree is updated -> No free space. */
0996             goto no_space;
0997         }
0998         goto scan_bitmap;
0999     }
1000 
1001     e = NULL;
1002     if (!hint)
1003         goto allocate_biggest;
1004 
1005     /* Use hint: Enumerate extents by start >= hint. */
1006     pr = NULL;
1007     cr = wnd->start_tree.rb_node;
1008 
1009     for (;;) {
1010         e = rb_entry(cr, struct e_node, start.node);
1011 
1012         if (e->start.key == hint)
1013             break;
1014 
1015         if (e->start.key < hint) {
1016             pr = cr;
1017             cr = cr->rb_right;
1018             if (!cr)
1019                 break;
1020             continue;
1021         }
1022 
1023         cr = cr->rb_left;
1024         if (!cr) {
1025             e = pr ? rb_entry(pr, struct e_node, start.node) : NULL;
1026             break;
1027         }
1028     }
1029 
1030     if (!e)
1031         goto allocate_biggest;
1032 
1033     if (e->start.key + e->count.key > hint) {
1034         /* We have found extension with 'hint' inside. */
1035         size_t len = e->start.key + e->count.key - hint;
1036 
1037         if (len >= to_alloc && hint + to_alloc <= max_alloc) {
1038             fnd = hint;
1039             goto found;
1040         }
1041 
1042         if (!(flags & BITMAP_FIND_FULL)) {
1043             if (len > to_alloc)
1044                 len = to_alloc;
1045 
1046             if (hint + len <= max_alloc) {
1047                 fnd = hint;
1048                 to_alloc = len;
1049                 goto found;
1050             }
1051         }
1052     }
1053 
1054 allocate_biggest:
1055     /* Allocate from biggest free extent. */
1056     e = rb_entry(rb_first(&wnd->count_tree), struct e_node, count.node);
1057     if (e->count.key != wnd->extent_max)
1058         wnd->extent_max = e->count.key;
1059 
1060     if (e->count.key < max_alloc) {
1061         if (e->count.key >= to_alloc) {
1062             ;
1063         } else if (flags & BITMAP_FIND_FULL) {
1064             if (e->count.key < to_alloc0) {
1065                 /* Biggest free block is less then requested. */
1066                 goto no_space;
1067             }
1068             to_alloc = e->count.key;
1069         } else if (-1 != wnd->uptodated) {
1070             to_alloc = e->count.key;
1071         } else {
1072             /* Check if we can use more bits. */
1073             size_t op, max_check;
1074             struct rb_root start_tree;
1075 
1076             memcpy(&start_tree, &wnd->start_tree,
1077                    sizeof(struct rb_root));
1078             memset(&wnd->start_tree, 0, sizeof(struct rb_root));
1079 
1080             max_check = e->start.key + to_alloc;
1081             if (max_check > max_alloc)
1082                 max_check = max_alloc;
1083             for (op = e->start.key + e->count.key; op < max_check;
1084                  op++) {
1085                 if (!wnd_is_free(wnd, op, 1))
1086                     break;
1087             }
1088             memcpy(&wnd->start_tree, &start_tree,
1089                    sizeof(struct rb_root));
1090             to_alloc = op - e->start.key;
1091         }
1092 
1093         /* Prepare to return. */
1094         fnd = e->start.key;
1095         if (e->start.key + to_alloc > max_alloc)
1096             to_alloc = max_alloc - e->start.key;
1097         goto found;
1098     }
1099 
1100     if (wnd->uptodated == 1) {
1101         /* Extents tree is updated -> no free space. */
1102         goto no_space;
1103     }
1104 
1105     b_len = e->count.key;
1106     b_pos = e->start.key;
1107 
1108 scan_bitmap:
1109     sb = wnd->sb;
1110     log2_bits = sb->s_blocksize_bits + 3;
1111 
1112     /* At most two ranges [hint, max_alloc) + [0, hint). */
1113 Again:
1114 
1115     /* TODO: Optimize request for case nbits > wbits. */
1116     iw = hint >> log2_bits;
1117     wbits = sb->s_blocksize * 8;
1118     wpos = hint & (wbits - 1);
1119     prev_tail = 0;
1120     fbits_valid = true;
1121 
1122     if (max_alloc == wnd->nbits) {
1123         nwnd = wnd->nwnd;
1124     } else {
1125         size_t t = max_alloc + wbits - 1;
1126 
1127         nwnd = likely(t > max_alloc) ? (t >> log2_bits) : wnd->nwnd;
1128     }
1129 
1130     /* Enumerate all windows. */
1131     for (; iw < nwnd; iw++) {
1132         wbit = iw << log2_bits;
1133 
1134         if (!wnd->free_bits[iw]) {
1135             if (prev_tail > b_len) {
1136                 b_pos = wbit - prev_tail;
1137                 b_len = prev_tail;
1138             }
1139 
1140             /* Skip full used window. */
1141             prev_tail = 0;
1142             wpos = 0;
1143             continue;
1144         }
1145 
1146         if (unlikely(iw + 1 == nwnd)) {
1147             if (max_alloc == wnd->nbits) {
1148                 wbits = wnd->bits_last;
1149             } else {
1150                 size_t t = max_alloc & (wbits - 1);
1151 
1152                 if (t) {
1153                     wbits = t;
1154                     fbits_valid = false;
1155                 }
1156             }
1157         }
1158 
1159         if (wnd->zone_end > wnd->zone_bit) {
1160             ebit = wbit + wbits;
1161             zbit = max(wnd->zone_bit, wbit);
1162             zend = min(wnd->zone_end, ebit);
1163 
1164             /* Here we have a window [wbit, ebit) and zone [zbit, zend). */
1165             if (zend <= zbit) {
1166                 /* Zone does not overlap window. */
1167             } else {
1168                 wzbit = zbit - wbit;
1169                 wzend = zend - wbit;
1170 
1171                 /* Zone overlaps window. */
1172                 if (wnd->free_bits[iw] == wzend - wzbit) {
1173                     prev_tail = 0;
1174                     wpos = 0;
1175                     continue;
1176                 }
1177 
1178                 /* Scan two ranges window: [wbit, zbit) and [zend, ebit). */
1179                 bh = wnd_map(wnd, iw);
1180 
1181                 if (IS_ERR(bh)) {
1182                     /* TODO: Error */
1183                     prev_tail = 0;
1184                     wpos = 0;
1185                     continue;
1186                 }
1187 
1188                 buf = (ulong *)bh->b_data;
1189 
1190                 /* Scan range [wbit, zbit). */
1191                 if (wpos < wzbit) {
1192                     /* Scan range [wpos, zbit). */
1193                     fnd = wnd_scan(buf, wbit, wpos, wzbit,
1194                                to_alloc, &prev_tail,
1195                                &b_pos, &b_len);
1196                     if (fnd != MINUS_ONE_T) {
1197                         put_bh(bh);
1198                         goto found;
1199                     }
1200                 }
1201 
1202                 prev_tail = 0;
1203 
1204                 /* Scan range [zend, ebit). */
1205                 if (wzend < wbits) {
1206                     fnd = wnd_scan(buf, wbit,
1207                                max(wzend, wpos), wbits,
1208                                to_alloc, &prev_tail,
1209                                &b_pos, &b_len);
1210                     if (fnd != MINUS_ONE_T) {
1211                         put_bh(bh);
1212                         goto found;
1213                     }
1214                 }
1215 
1216                 wpos = 0;
1217                 put_bh(bh);
1218                 continue;
1219             }
1220         }
1221 
1222         /* Current window does not overlap zone. */
1223         if (!wpos && fbits_valid && wnd->free_bits[iw] == wbits) {
1224             /* Window is empty. */
1225             if (prev_tail + wbits >= to_alloc) {
1226                 fnd = wbit + wpos - prev_tail;
1227                 goto found;
1228             }
1229 
1230             /* Increase 'prev_tail' and process next window. */
1231             prev_tail += wbits;
1232             wpos = 0;
1233             continue;
1234         }
1235 
1236         /* Read window. */
1237         bh = wnd_map(wnd, iw);
1238         if (IS_ERR(bh)) {
1239             // TODO: Error.
1240             prev_tail = 0;
1241             wpos = 0;
1242             continue;
1243         }
1244 
1245         buf = (ulong *)bh->b_data;
1246 
1247         /* Scan range [wpos, eBits). */
1248         fnd = wnd_scan(buf, wbit, wpos, wbits, to_alloc, &prev_tail,
1249                    &b_pos, &b_len);
1250         put_bh(bh);
1251         if (fnd != MINUS_ONE_T)
1252             goto found;
1253     }
1254 
1255     if (b_len < prev_tail) {
1256         /* The last fragment. */
1257         b_len = prev_tail;
1258         b_pos = max_alloc - prev_tail;
1259     }
1260 
1261     if (hint) {
1262         /*
1263          * We have scanned range [hint max_alloc).
1264          * Prepare to scan range [0 hint + to_alloc).
1265          */
1266         size_t nextmax = hint + to_alloc;
1267 
1268         if (likely(nextmax >= hint) && nextmax < max_alloc)
1269             max_alloc = nextmax;
1270         hint = 0;
1271         goto Again;
1272     }
1273 
1274     if (!b_len)
1275         goto no_space;
1276 
1277     wnd->extent_max = b_len;
1278 
1279     if (flags & BITMAP_FIND_FULL)
1280         goto no_space;
1281 
1282     fnd = b_pos;
1283     to_alloc = b_len;
1284 
1285 found:
1286     if (flags & BITMAP_FIND_MARK_AS_USED) {
1287         /* TODO: Optimize remove extent (pass 'e'?). */
1288         if (wnd_set_used(wnd, fnd, to_alloc))
1289             goto no_space;
1290     } else if (wnd->extent_max != MINUS_ONE_T &&
1291            to_alloc > wnd->extent_max) {
1292         wnd->extent_max = to_alloc;
1293     }
1294 
1295     *allocated = fnd;
1296     return to_alloc;
1297 
1298 no_space:
1299     return 0;
1300 }
1301 
1302 /*
1303  * wnd_extend - Extend bitmap ($MFT bitmap).
1304  */
1305 int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
1306 {
1307     int err;
1308     struct super_block *sb = wnd->sb;
1309     struct ntfs_sb_info *sbi = sb->s_fs_info;
1310     u32 blocksize = sb->s_blocksize;
1311     u32 wbits = blocksize * 8;
1312     u32 b0, new_last;
1313     size_t bits, iw, new_wnd;
1314     size_t old_bits = wnd->nbits;
1315     u16 *new_free;
1316 
1317     if (new_bits <= old_bits)
1318         return -EINVAL;
1319 
1320     /* Align to 8 byte boundary. */
1321     new_wnd = bytes_to_block(sb, bitmap_size(new_bits));
1322     new_last = new_bits & (wbits - 1);
1323     if (!new_last)
1324         new_last = wbits;
1325 
1326     if (new_wnd != wnd->nwnd) {
1327         new_free = kmalloc(new_wnd * sizeof(u16), GFP_NOFS);
1328         if (!new_free)
1329             return -ENOMEM;
1330 
1331         memcpy(new_free, wnd->free_bits, wnd->nwnd * sizeof(short));
1332         memset(new_free + wnd->nwnd, 0,
1333                (new_wnd - wnd->nwnd) * sizeof(short));
1334         kfree(wnd->free_bits);
1335         wnd->free_bits = new_free;
1336     }
1337 
1338     /* Zero bits [old_bits,new_bits). */
1339     bits = new_bits - old_bits;
1340     b0 = old_bits & (wbits - 1);
1341 
1342     for (iw = old_bits >> (sb->s_blocksize_bits + 3); bits; iw += 1) {
1343         u32 op;
1344         size_t frb;
1345         u64 vbo, lbo, bytes;
1346         struct buffer_head *bh;
1347         ulong *buf;
1348 
1349         if (iw + 1 == new_wnd)
1350             wbits = new_last;
1351 
1352         op = b0 + bits > wbits ? wbits - b0 : bits;
1353         vbo = (u64)iw * blocksize;
1354 
1355         err = ntfs_vbo_to_lbo(sbi, &wnd->run, vbo, &lbo, &bytes);
1356         if (err)
1357             break;
1358 
1359         bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1360         if (!bh)
1361             return -EIO;
1362 
1363         lock_buffer(bh);
1364         buf = (ulong *)bh->b_data;
1365 
1366         __bitmap_clear(buf, b0, blocksize * 8 - b0);
1367         frb = wbits - __bitmap_weight(buf, wbits);
1368         wnd->total_zeroes += frb - wnd->free_bits[iw];
1369         wnd->free_bits[iw] = frb;
1370 
1371         set_buffer_uptodate(bh);
1372         mark_buffer_dirty(bh);
1373         unlock_buffer(bh);
1374         /* err = sync_dirty_buffer(bh); */
1375 
1376         b0 = 0;
1377         bits -= op;
1378     }
1379 
1380     wnd->nbits = new_bits;
1381     wnd->nwnd = new_wnd;
1382     wnd->bits_last = new_last;
1383 
1384     wnd_add_free_ext(wnd, old_bits, new_bits - old_bits, false);
1385 
1386     return 0;
1387 }
1388 
1389 void wnd_zone_set(struct wnd_bitmap *wnd, size_t lcn, size_t len)
1390 {
1391     size_t zlen = wnd->zone_end - wnd->zone_bit;
1392 
1393     if (zlen)
1394         wnd_add_free_ext(wnd, wnd->zone_bit, zlen, false);
1395 
1396     if (!RB_EMPTY_ROOT(&wnd->start_tree) && len)
1397         wnd_remove_free_ext(wnd, lcn, len);
1398 
1399     wnd->zone_bit = lcn;
1400     wnd->zone_end = lcn + len;
1401 }
1402 
1403 int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range)
1404 {
1405     int err = 0;
1406     struct super_block *sb = sbi->sb;
1407     struct wnd_bitmap *wnd = &sbi->used.bitmap;
1408     u32 wbits = 8 * sb->s_blocksize;
1409     CLST len = 0, lcn = 0, done = 0;
1410     CLST minlen = bytes_to_cluster(sbi, range->minlen);
1411     CLST lcn_from = bytes_to_cluster(sbi, range->start);
1412     size_t iw = lcn_from >> (sb->s_blocksize_bits + 3);
1413     u32 wbit = lcn_from & (wbits - 1);
1414     const ulong *buf;
1415     CLST lcn_to;
1416 
1417     if (!minlen)
1418         minlen = 1;
1419 
1420     if (range->len == (u64)-1)
1421         lcn_to = wnd->nbits;
1422     else
1423         lcn_to = bytes_to_cluster(sbi, range->start + range->len);
1424 
1425     down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
1426 
1427     for (; iw < wnd->nbits; iw++, wbit = 0) {
1428         CLST lcn_wnd = iw * wbits;
1429         struct buffer_head *bh;
1430 
1431         if (lcn_wnd > lcn_to)
1432             break;
1433 
1434         if (!wnd->free_bits[iw])
1435             continue;
1436 
1437         if (iw + 1 == wnd->nwnd)
1438             wbits = wnd->bits_last;
1439 
1440         if (lcn_wnd + wbits > lcn_to)
1441             wbits = lcn_to - lcn_wnd;
1442 
1443         bh = wnd_map(wnd, iw);
1444         if (IS_ERR(bh)) {
1445             err = PTR_ERR(bh);
1446             break;
1447         }
1448 
1449         buf = (ulong *)bh->b_data;
1450 
1451         for (; wbit < wbits; wbit++) {
1452             if (!test_bit(wbit, buf)) {
1453                 if (!len)
1454                     lcn = lcn_wnd + wbit;
1455                 len += 1;
1456                 continue;
1457             }
1458             if (len >= minlen) {
1459                 err = ntfs_discard(sbi, lcn, len);
1460                 if (err)
1461                     goto out;
1462                 done += len;
1463             }
1464             len = 0;
1465         }
1466         put_bh(bh);
1467     }
1468 
1469     /* Process the last fragment. */
1470     if (len >= minlen) {
1471         err = ntfs_discard(sbi, lcn, len);
1472         if (err)
1473             goto out;
1474         done += len;
1475     }
1476 
1477 out:
1478     range->len = (u64)done << sbi->cluster_bits;
1479 
1480     up_read(&wnd->rw_lock);
1481 
1482     return err;
1483 }