Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * balloc.c
0003  *
0004  * PURPOSE
0005  *  Block allocation handling routines for the OSTA-UDF(tm) filesystem.
0006  *
0007  * COPYRIGHT
0008  *  This file is distributed under the terms of the GNU General Public
0009  *  License (GPL). Copies of the GPL can be obtained from:
0010  *      ftp://prep.ai.mit.edu/pub/gnu/GPL
0011  *  Each contributing author retains all rights to their own work.
0012  *
0013  *  (C) 1999-2001 Ben Fennema
0014  *  (C) 1999 Stelias Computing Inc
0015  *
0016  * HISTORY
0017  *
0018  *  02/24/99 blf  Created.
0019  *
0020  */
0021 
0022 #include "udfdecl.h"
0023 
0024 #include <linux/bitops.h>
0025 
0026 #include "udf_i.h"
0027 #include "udf_sb.h"
0028 
0029 #define udf_clear_bit   __test_and_clear_bit_le
0030 #define udf_set_bit __test_and_set_bit_le
0031 #define udf_test_bit    test_bit_le
0032 #define udf_find_next_one_bit   find_next_bit_le
0033 
0034 static int read_block_bitmap(struct super_block *sb,
0035                  struct udf_bitmap *bitmap, unsigned int block,
0036                  unsigned long bitmap_nr)
0037 {
0038     struct buffer_head *bh = NULL;
0039     int retval = 0;
0040     struct kernel_lb_addr loc;
0041 
0042     loc.logicalBlockNum = bitmap->s_extPosition;
0043     loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
0044 
0045     bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
0046     if (!bh)
0047         retval = -EIO;
0048 
0049     bitmap->s_block_bitmap[bitmap_nr] = bh;
0050     return retval;
0051 }
0052 
0053 static int __load_block_bitmap(struct super_block *sb,
0054                    struct udf_bitmap *bitmap,
0055                    unsigned int block_group)
0056 {
0057     int retval = 0;
0058     int nr_groups = bitmap->s_nr_groups;
0059 
0060     if (block_group >= nr_groups) {
0061         udf_debug("block_group (%u) > nr_groups (%d)\n",
0062               block_group, nr_groups);
0063     }
0064 
0065     if (bitmap->s_block_bitmap[block_group])
0066         return block_group;
0067 
0068     retval = read_block_bitmap(sb, bitmap, block_group, block_group);
0069     if (retval < 0)
0070         return retval;
0071 
0072     return block_group;
0073 }
0074 
0075 static inline int load_block_bitmap(struct super_block *sb,
0076                     struct udf_bitmap *bitmap,
0077                     unsigned int block_group)
0078 {
0079     int slot;
0080 
0081     slot = __load_block_bitmap(sb, bitmap, block_group);
0082 
0083     if (slot < 0)
0084         return slot;
0085 
0086     if (!bitmap->s_block_bitmap[slot])
0087         return -EIO;
0088 
0089     return slot;
0090 }
0091 
0092 static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
0093 {
0094     struct udf_sb_info *sbi = UDF_SB(sb);
0095     struct logicalVolIntegrityDesc *lvid;
0096 
0097     if (!sbi->s_lvid_bh)
0098         return;
0099 
0100     lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
0101     le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
0102     udf_updated_lvid(sb);
0103 }
0104 
0105 static void udf_bitmap_free_blocks(struct super_block *sb,
0106                    struct udf_bitmap *bitmap,
0107                    struct kernel_lb_addr *bloc,
0108                    uint32_t offset,
0109                    uint32_t count)
0110 {
0111     struct udf_sb_info *sbi = UDF_SB(sb);
0112     struct buffer_head *bh = NULL;
0113     struct udf_part_map *partmap;
0114     unsigned long block;
0115     unsigned long block_group;
0116     unsigned long bit;
0117     unsigned long i;
0118     int bitmap_nr;
0119     unsigned long overflow;
0120 
0121     mutex_lock(&sbi->s_alloc_mutex);
0122     partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
0123     if (bloc->logicalBlockNum + count < count ||
0124         (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
0125         udf_debug("%u < %d || %u + %u > %u\n",
0126               bloc->logicalBlockNum, 0,
0127               bloc->logicalBlockNum, count,
0128               partmap->s_partition_len);
0129         goto error_return;
0130     }
0131 
0132     block = bloc->logicalBlockNum + offset +
0133         (sizeof(struct spaceBitmapDesc) << 3);
0134 
0135     do {
0136         overflow = 0;
0137         block_group = block >> (sb->s_blocksize_bits + 3);
0138         bit = block % (sb->s_blocksize << 3);
0139 
0140         /*
0141         * Check to see if we are freeing blocks across a group boundary.
0142         */
0143         if (bit + count > (sb->s_blocksize << 3)) {
0144             overflow = bit + count - (sb->s_blocksize << 3);
0145             count -= overflow;
0146         }
0147         bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
0148         if (bitmap_nr < 0)
0149             goto error_return;
0150 
0151         bh = bitmap->s_block_bitmap[bitmap_nr];
0152         for (i = 0; i < count; i++) {
0153             if (udf_set_bit(bit + i, bh->b_data)) {
0154                 udf_debug("bit %lu already set\n", bit + i);
0155                 udf_debug("byte=%2x\n",
0156                       ((__u8 *)bh->b_data)[(bit + i) >> 3]);
0157             }
0158         }
0159         udf_add_free_space(sb, sbi->s_partition, count);
0160         mark_buffer_dirty(bh);
0161         if (overflow) {
0162             block += count;
0163             count = overflow;
0164         }
0165     } while (overflow);
0166 
0167 error_return:
0168     mutex_unlock(&sbi->s_alloc_mutex);
0169 }
0170 
0171 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
0172                       struct udf_bitmap *bitmap,
0173                       uint16_t partition, uint32_t first_block,
0174                       uint32_t block_count)
0175 {
0176     struct udf_sb_info *sbi = UDF_SB(sb);
0177     int alloc_count = 0;
0178     int bit, block, block_group;
0179     int bitmap_nr;
0180     struct buffer_head *bh;
0181     __u32 part_len;
0182 
0183     mutex_lock(&sbi->s_alloc_mutex);
0184     part_len = sbi->s_partmaps[partition].s_partition_len;
0185     if (first_block >= part_len)
0186         goto out;
0187 
0188     if (first_block + block_count > part_len)
0189         block_count = part_len - first_block;
0190 
0191     do {
0192         block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
0193         block_group = block >> (sb->s_blocksize_bits + 3);
0194 
0195         bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
0196         if (bitmap_nr < 0)
0197             goto out;
0198         bh = bitmap->s_block_bitmap[bitmap_nr];
0199 
0200         bit = block % (sb->s_blocksize << 3);
0201 
0202         while (bit < (sb->s_blocksize << 3) && block_count > 0) {
0203             if (!udf_clear_bit(bit, bh->b_data))
0204                 goto out;
0205             block_count--;
0206             alloc_count++;
0207             bit++;
0208             block++;
0209         }
0210         mark_buffer_dirty(bh);
0211     } while (block_count > 0);
0212 
0213 out:
0214     udf_add_free_space(sb, partition, -alloc_count);
0215     mutex_unlock(&sbi->s_alloc_mutex);
0216     return alloc_count;
0217 }
0218 
0219 static udf_pblk_t udf_bitmap_new_block(struct super_block *sb,
0220                 struct udf_bitmap *bitmap, uint16_t partition,
0221                 uint32_t goal, int *err)
0222 {
0223     struct udf_sb_info *sbi = UDF_SB(sb);
0224     int newbit, bit = 0;
0225     udf_pblk_t block;
0226     int block_group, group_start;
0227     int end_goal, nr_groups, bitmap_nr, i;
0228     struct buffer_head *bh = NULL;
0229     char *ptr;
0230     udf_pblk_t newblock = 0;
0231 
0232     *err = -ENOSPC;
0233     mutex_lock(&sbi->s_alloc_mutex);
0234 
0235 repeat:
0236     if (goal >= sbi->s_partmaps[partition].s_partition_len)
0237         goal = 0;
0238 
0239     nr_groups = bitmap->s_nr_groups;
0240     block = goal + (sizeof(struct spaceBitmapDesc) << 3);
0241     block_group = block >> (sb->s_blocksize_bits + 3);
0242     group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
0243 
0244     bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
0245     if (bitmap_nr < 0)
0246         goto error_return;
0247     bh = bitmap->s_block_bitmap[bitmap_nr];
0248     ptr = memscan((char *)bh->b_data + group_start, 0xFF,
0249               sb->s_blocksize - group_start);
0250 
0251     if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
0252         bit = block % (sb->s_blocksize << 3);
0253         if (udf_test_bit(bit, bh->b_data))
0254             goto got_block;
0255 
0256         end_goal = (bit + 63) & ~63;
0257         bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
0258         if (bit < end_goal)
0259             goto got_block;
0260 
0261         ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
0262                   sb->s_blocksize - ((bit + 7) >> 3));
0263         newbit = (ptr - ((char *)bh->b_data)) << 3;
0264         if (newbit < sb->s_blocksize << 3) {
0265             bit = newbit;
0266             goto search_back;
0267         }
0268 
0269         newbit = udf_find_next_one_bit(bh->b_data,
0270                            sb->s_blocksize << 3, bit);
0271         if (newbit < sb->s_blocksize << 3) {
0272             bit = newbit;
0273             goto got_block;
0274         }
0275     }
0276 
0277     for (i = 0; i < (nr_groups * 2); i++) {
0278         block_group++;
0279         if (block_group >= nr_groups)
0280             block_group = 0;
0281         group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
0282 
0283         bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
0284         if (bitmap_nr < 0)
0285             goto error_return;
0286         bh = bitmap->s_block_bitmap[bitmap_nr];
0287         if (i < nr_groups) {
0288             ptr = memscan((char *)bh->b_data + group_start, 0xFF,
0289                       sb->s_blocksize - group_start);
0290             if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
0291                 bit = (ptr - ((char *)bh->b_data)) << 3;
0292                 break;
0293             }
0294         } else {
0295             bit = udf_find_next_one_bit(bh->b_data,
0296                             sb->s_blocksize << 3,
0297                             group_start << 3);
0298             if (bit < sb->s_blocksize << 3)
0299                 break;
0300         }
0301     }
0302     if (i >= (nr_groups * 2)) {
0303         mutex_unlock(&sbi->s_alloc_mutex);
0304         return newblock;
0305     }
0306     if (bit < sb->s_blocksize << 3)
0307         goto search_back;
0308     else
0309         bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
0310                         group_start << 3);
0311     if (bit >= sb->s_blocksize << 3) {
0312         mutex_unlock(&sbi->s_alloc_mutex);
0313         return 0;
0314     }
0315 
0316 search_back:
0317     i = 0;
0318     while (i < 7 && bit > (group_start << 3) &&
0319            udf_test_bit(bit - 1, bh->b_data)) {
0320         ++i;
0321         --bit;
0322     }
0323 
0324 got_block:
0325     newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
0326         (sizeof(struct spaceBitmapDesc) << 3);
0327 
0328     if (newblock >= sbi->s_partmaps[partition].s_partition_len) {
0329         /*
0330          * Ran off the end of the bitmap, and bits following are
0331          * non-compliant (not all zero)
0332          */
0333         udf_err(sb, "bitmap for partition %d corrupted (block %u marked"
0334             " as free, partition length is %u)\n", partition,
0335             newblock, sbi->s_partmaps[partition].s_partition_len);
0336         goto error_return;
0337     }
0338 
0339     if (!udf_clear_bit(bit, bh->b_data)) {
0340         udf_debug("bit already cleared for block %d\n", bit);
0341         goto repeat;
0342     }
0343 
0344     mark_buffer_dirty(bh);
0345 
0346     udf_add_free_space(sb, partition, -1);
0347     mutex_unlock(&sbi->s_alloc_mutex);
0348     *err = 0;
0349     return newblock;
0350 
0351 error_return:
0352     *err = -EIO;
0353     mutex_unlock(&sbi->s_alloc_mutex);
0354     return 0;
0355 }
0356 
0357 static void udf_table_free_blocks(struct super_block *sb,
0358                   struct inode *table,
0359                   struct kernel_lb_addr *bloc,
0360                   uint32_t offset,
0361                   uint32_t count)
0362 {
0363     struct udf_sb_info *sbi = UDF_SB(sb);
0364     struct udf_part_map *partmap;
0365     uint32_t start, end;
0366     uint32_t elen;
0367     struct kernel_lb_addr eloc;
0368     struct extent_position oepos, epos;
0369     int8_t etype;
0370     struct udf_inode_info *iinfo;
0371 
0372     mutex_lock(&sbi->s_alloc_mutex);
0373     partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
0374     if (bloc->logicalBlockNum + count < count ||
0375         (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
0376         udf_debug("%u < %d || %u + %u > %u\n",
0377               bloc->logicalBlockNum, 0,
0378               bloc->logicalBlockNum, count,
0379               partmap->s_partition_len);
0380         goto error_return;
0381     }
0382 
0383     iinfo = UDF_I(table);
0384     udf_add_free_space(sb, sbi->s_partition, count);
0385 
0386     start = bloc->logicalBlockNum + offset;
0387     end = bloc->logicalBlockNum + offset + count - 1;
0388 
0389     epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
0390     elen = 0;
0391     epos.block = oepos.block = iinfo->i_location;
0392     epos.bh = oepos.bh = NULL;
0393 
0394     while (count &&
0395            (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
0396         if (((eloc.logicalBlockNum +
0397             (elen >> sb->s_blocksize_bits)) == start)) {
0398             if ((0x3FFFFFFF - elen) <
0399                     (count << sb->s_blocksize_bits)) {
0400                 uint32_t tmp = ((0x3FFFFFFF - elen) >>
0401                             sb->s_blocksize_bits);
0402                 count -= tmp;
0403                 start += tmp;
0404                 elen = (etype << 30) |
0405                     (0x40000000 - sb->s_blocksize);
0406             } else {
0407                 elen = (etype << 30) |
0408                     (elen +
0409                     (count << sb->s_blocksize_bits));
0410                 start += count;
0411                 count = 0;
0412             }
0413             udf_write_aext(table, &oepos, &eloc, elen, 1);
0414         } else if (eloc.logicalBlockNum == (end + 1)) {
0415             if ((0x3FFFFFFF - elen) <
0416                     (count << sb->s_blocksize_bits)) {
0417                 uint32_t tmp = ((0x3FFFFFFF - elen) >>
0418                         sb->s_blocksize_bits);
0419                 count -= tmp;
0420                 end -= tmp;
0421                 eloc.logicalBlockNum -= tmp;
0422                 elen = (etype << 30) |
0423                     (0x40000000 - sb->s_blocksize);
0424             } else {
0425                 eloc.logicalBlockNum = start;
0426                 elen = (etype << 30) |
0427                     (elen +
0428                     (count << sb->s_blocksize_bits));
0429                 end -= count;
0430                 count = 0;
0431             }
0432             udf_write_aext(table, &oepos, &eloc, elen, 1);
0433         }
0434 
0435         if (epos.bh != oepos.bh) {
0436             oepos.block = epos.block;
0437             brelse(oepos.bh);
0438             get_bh(epos.bh);
0439             oepos.bh = epos.bh;
0440             oepos.offset = 0;
0441         } else {
0442             oepos.offset = epos.offset;
0443         }
0444     }
0445 
0446     if (count) {
0447         /*
0448          * NOTE: we CANNOT use udf_add_aext here, as it can try to
0449          * allocate a new block, and since we hold the super block
0450          * lock already very bad things would happen :)
0451          *
0452          * We copy the behavior of udf_add_aext, but instead of
0453          * trying to allocate a new block close to the existing one,
0454          * we just steal a block from the extent we are trying to add.
0455          *
0456          * It would be nice if the blocks were close together, but it
0457          * isn't required.
0458          */
0459 
0460         int adsize;
0461 
0462         eloc.logicalBlockNum = start;
0463         elen = EXT_RECORDED_ALLOCATED |
0464             (count << sb->s_blocksize_bits);
0465 
0466         if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
0467             adsize = sizeof(struct short_ad);
0468         else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
0469             adsize = sizeof(struct long_ad);
0470         else {
0471             brelse(oepos.bh);
0472             brelse(epos.bh);
0473             goto error_return;
0474         }
0475 
0476         if (epos.offset + (2 * adsize) > sb->s_blocksize) {
0477             /* Steal a block from the extent being free'd */
0478             udf_setup_indirect_aext(table, eloc.logicalBlockNum,
0479                         &epos);
0480 
0481             eloc.logicalBlockNum++;
0482             elen -= sb->s_blocksize;
0483         }
0484 
0485         /* It's possible that stealing the block emptied the extent */
0486         if (elen)
0487             __udf_add_aext(table, &epos, &eloc, elen, 1);
0488     }
0489 
0490     brelse(epos.bh);
0491     brelse(oepos.bh);
0492 
0493 error_return:
0494     mutex_unlock(&sbi->s_alloc_mutex);
0495     return;
0496 }
0497 
0498 static int udf_table_prealloc_blocks(struct super_block *sb,
0499                      struct inode *table, uint16_t partition,
0500                      uint32_t first_block, uint32_t block_count)
0501 {
0502     struct udf_sb_info *sbi = UDF_SB(sb);
0503     int alloc_count = 0;
0504     uint32_t elen, adsize;
0505     struct kernel_lb_addr eloc;
0506     struct extent_position epos;
0507     int8_t etype = -1;
0508     struct udf_inode_info *iinfo;
0509 
0510     if (first_block >= sbi->s_partmaps[partition].s_partition_len)
0511         return 0;
0512 
0513     iinfo = UDF_I(table);
0514     if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
0515         adsize = sizeof(struct short_ad);
0516     else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
0517         adsize = sizeof(struct long_ad);
0518     else
0519         return 0;
0520 
0521     mutex_lock(&sbi->s_alloc_mutex);
0522     epos.offset = sizeof(struct unallocSpaceEntry);
0523     epos.block = iinfo->i_location;
0524     epos.bh = NULL;
0525     eloc.logicalBlockNum = 0xFFFFFFFF;
0526 
0527     while (first_block != eloc.logicalBlockNum &&
0528            (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
0529         udf_debug("eloc=%u, elen=%u, first_block=%u\n",
0530               eloc.logicalBlockNum, elen, first_block);
0531         ; /* empty loop body */
0532     }
0533 
0534     if (first_block == eloc.logicalBlockNum) {
0535         epos.offset -= adsize;
0536 
0537         alloc_count = (elen >> sb->s_blocksize_bits);
0538         if (alloc_count > block_count) {
0539             alloc_count = block_count;
0540             eloc.logicalBlockNum += alloc_count;
0541             elen -= (alloc_count << sb->s_blocksize_bits);
0542             udf_write_aext(table, &epos, &eloc,
0543                     (etype << 30) | elen, 1);
0544         } else
0545             udf_delete_aext(table, epos);
0546     } else {
0547         alloc_count = 0;
0548     }
0549 
0550     brelse(epos.bh);
0551 
0552     if (alloc_count)
0553         udf_add_free_space(sb, partition, -alloc_count);
0554     mutex_unlock(&sbi->s_alloc_mutex);
0555     return alloc_count;
0556 }
0557 
0558 static udf_pblk_t udf_table_new_block(struct super_block *sb,
0559                    struct inode *table, uint16_t partition,
0560                    uint32_t goal, int *err)
0561 {
0562     struct udf_sb_info *sbi = UDF_SB(sb);
0563     uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
0564     udf_pblk_t newblock = 0;
0565     uint32_t adsize;
0566     uint32_t elen, goal_elen = 0;
0567     struct kernel_lb_addr eloc, goal_eloc;
0568     struct extent_position epos, goal_epos;
0569     int8_t etype;
0570     struct udf_inode_info *iinfo = UDF_I(table);
0571 
0572     *err = -ENOSPC;
0573 
0574     if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
0575         adsize = sizeof(struct short_ad);
0576     else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
0577         adsize = sizeof(struct long_ad);
0578     else
0579         return newblock;
0580 
0581     mutex_lock(&sbi->s_alloc_mutex);
0582     if (goal >= sbi->s_partmaps[partition].s_partition_len)
0583         goal = 0;
0584 
0585     /* We search for the closest matching block to goal. If we find
0586        a exact hit, we stop. Otherwise we keep going till we run out
0587        of extents. We store the buffer_head, bloc, and extoffset
0588        of the current closest match and use that when we are done.
0589      */
0590     epos.offset = sizeof(struct unallocSpaceEntry);
0591     epos.block = iinfo->i_location;
0592     epos.bh = goal_epos.bh = NULL;
0593 
0594     while (spread &&
0595            (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
0596         if (goal >= eloc.logicalBlockNum) {
0597             if (goal < eloc.logicalBlockNum +
0598                     (elen >> sb->s_blocksize_bits))
0599                 nspread = 0;
0600             else
0601                 nspread = goal - eloc.logicalBlockNum -
0602                     (elen >> sb->s_blocksize_bits);
0603         } else {
0604             nspread = eloc.logicalBlockNum - goal;
0605         }
0606 
0607         if (nspread < spread) {
0608             spread = nspread;
0609             if (goal_epos.bh != epos.bh) {
0610                 brelse(goal_epos.bh);
0611                 goal_epos.bh = epos.bh;
0612                 get_bh(goal_epos.bh);
0613             }
0614             goal_epos.block = epos.block;
0615             goal_epos.offset = epos.offset - adsize;
0616             goal_eloc = eloc;
0617             goal_elen = (etype << 30) | elen;
0618         }
0619     }
0620 
0621     brelse(epos.bh);
0622 
0623     if (spread == 0xFFFFFFFF) {
0624         brelse(goal_epos.bh);
0625         mutex_unlock(&sbi->s_alloc_mutex);
0626         return 0;
0627     }
0628 
0629     /* Only allocate blocks from the beginning of the extent.
0630        That way, we only delete (empty) extents, never have to insert an
0631        extent because of splitting */
0632     /* This works, but very poorly.... */
0633 
0634     newblock = goal_eloc.logicalBlockNum;
0635     goal_eloc.logicalBlockNum++;
0636     goal_elen -= sb->s_blocksize;
0637 
0638     if (goal_elen)
0639         udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
0640     else
0641         udf_delete_aext(table, goal_epos);
0642     brelse(goal_epos.bh);
0643 
0644     udf_add_free_space(sb, partition, -1);
0645 
0646     mutex_unlock(&sbi->s_alloc_mutex);
0647     *err = 0;
0648     return newblock;
0649 }
0650 
0651 void udf_free_blocks(struct super_block *sb, struct inode *inode,
0652              struct kernel_lb_addr *bloc, uint32_t offset,
0653              uint32_t count)
0654 {
0655     uint16_t partition = bloc->partitionReferenceNum;
0656     struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
0657 
0658     if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
0659         udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
0660                        bloc, offset, count);
0661     } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
0662         udf_table_free_blocks(sb, map->s_uspace.s_table,
0663                       bloc, offset, count);
0664     }
0665 
0666     if (inode) {
0667         inode_sub_bytes(inode,
0668                 ((sector_t)count) << sb->s_blocksize_bits);
0669     }
0670 }
0671 
0672 inline int udf_prealloc_blocks(struct super_block *sb,
0673                    struct inode *inode,
0674                    uint16_t partition, uint32_t first_block,
0675                    uint32_t block_count)
0676 {
0677     struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
0678     int allocated;
0679 
0680     if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
0681         allocated = udf_bitmap_prealloc_blocks(sb,
0682                                map->s_uspace.s_bitmap,
0683                                partition, first_block,
0684                                block_count);
0685     else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
0686         allocated = udf_table_prealloc_blocks(sb,
0687                               map->s_uspace.s_table,
0688                               partition, first_block,
0689                               block_count);
0690     else
0691         return 0;
0692 
0693     if (inode && allocated > 0)
0694         inode_add_bytes(inode, allocated << sb->s_blocksize_bits);
0695     return allocated;
0696 }
0697 
0698 inline udf_pblk_t udf_new_block(struct super_block *sb,
0699              struct inode *inode,
0700              uint16_t partition, uint32_t goal, int *err)
0701 {
0702     struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
0703     udf_pblk_t block;
0704 
0705     if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
0706         block = udf_bitmap_new_block(sb,
0707                          map->s_uspace.s_bitmap,
0708                          partition, goal, err);
0709     else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
0710         block = udf_table_new_block(sb,
0711                         map->s_uspace.s_table,
0712                         partition, goal, err);
0713     else {
0714         *err = -EIO;
0715         return 0;
0716     }
0717     if (inode && block)
0718         inode_add_bytes(inode, sb->s_blocksize);
0719     return block;
0720 }