Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *  linux/fs/ufs/balloc.c
0004  *
0005  * Copyright (C) 1998
0006  * Daniel Pirkl <daniel.pirkl@email.cz>
0007  * Charles University, Faculty of Mathematics and Physics
0008  *
0009  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
0010  */
0011 
0012 #include <linux/fs.h>
0013 #include <linux/stat.h>
0014 #include <linux/time.h>
0015 #include <linux/string.h>
0016 #include <linux/buffer_head.h>
0017 #include <linux/capability.h>
0018 #include <linux/bitops.h>
0019 #include <linux/bio.h>
0020 #include <asm/byteorder.h>
0021 
0022 #include "ufs_fs.h"
0023 #include "ufs.h"
0024 #include "swab.h"
0025 #include "util.h"
0026 
0027 #define INVBLOCK ((u64)-1L)
0028 
0029 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
0030 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
0031 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
0032 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
0033 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
0034 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
0035 
0036 /*
0037  * Free 'count' fragments from fragment number 'fragment'
0038  */
0039 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
0040 {
0041     struct super_block * sb;
0042     struct ufs_sb_private_info * uspi;
0043     struct ufs_cg_private_info * ucpi;
0044     struct ufs_cylinder_group * ucg;
0045     unsigned cgno, bit, end_bit, bbase, blkmap, i;
0046     u64 blkno;
0047     
0048     sb = inode->i_sb;
0049     uspi = UFS_SB(sb)->s_uspi;
0050     
0051     UFSD("ENTER, fragment %llu, count %u\n",
0052          (unsigned long long)fragment, count);
0053     
0054     if (ufs_fragnum(fragment) + count > uspi->s_fpg)
0055         ufs_error (sb, "ufs_free_fragments", "internal error");
0056 
0057     mutex_lock(&UFS_SB(sb)->s_lock);
0058     
0059     cgno = ufs_dtog(uspi, fragment);
0060     bit = ufs_dtogd(uspi, fragment);
0061     if (cgno >= uspi->s_ncg) {
0062         ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
0063         goto failed;
0064     }
0065         
0066     ucpi = ufs_load_cylinder (sb, cgno);
0067     if (!ucpi) 
0068         goto failed;
0069     ucg = ubh_get_ucg (UCPI_UBH(ucpi));
0070     if (!ufs_cg_chkmagic(sb, ucg)) {
0071         ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
0072         goto failed;
0073     }
0074 
0075     end_bit = bit + count;
0076     bbase = ufs_blknum (bit);
0077     blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
0078     ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
0079     for (i = bit; i < end_bit; i++) {
0080         if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
0081             ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
0082         else 
0083             ufs_error (sb, "ufs_free_fragments",
0084                    "bit already cleared for fragment %u", i);
0085     }
0086 
0087     inode_sub_bytes(inode, count << uspi->s_fshift);
0088     fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
0089     uspi->cs_total.cs_nffree += count;
0090     fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
0091     blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
0092     ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
0093 
0094     /*
0095      * Trying to reassemble free fragments into block
0096      */
0097     blkno = ufs_fragstoblks (bbase);
0098     if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
0099         fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
0100         uspi->cs_total.cs_nffree -= uspi->s_fpb;
0101         fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
0102         if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
0103             ufs_clusteracct (sb, ucpi, blkno, 1);
0104         fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
0105         uspi->cs_total.cs_nbfree++;
0106         fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
0107         if (uspi->fs_magic != UFS2_MAGIC) {
0108             unsigned cylno = ufs_cbtocylno (bbase);
0109 
0110             fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
0111                           ufs_cbtorpos(bbase)), 1);
0112             fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
0113         }
0114     }
0115     
0116     ubh_mark_buffer_dirty (USPI_UBH(uspi));
0117     ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
0118     if (sb->s_flags & SB_SYNCHRONOUS)
0119         ubh_sync_block(UCPI_UBH(ucpi));
0120     ufs_mark_sb_dirty(sb);
0121 
0122     mutex_unlock(&UFS_SB(sb)->s_lock);
0123     UFSD("EXIT\n");
0124     return;
0125 
0126 failed:
0127     mutex_unlock(&UFS_SB(sb)->s_lock);
0128     UFSD("EXIT (FAILED)\n");
0129     return;
0130 }
0131 
0132 /*
0133  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
0134  */
0135 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
0136 {
0137     struct super_block * sb;
0138     struct ufs_sb_private_info * uspi;
0139     struct ufs_cg_private_info * ucpi;
0140     struct ufs_cylinder_group * ucg;
0141     unsigned overflow, cgno, bit, end_bit, i;
0142     u64 blkno;
0143     
0144     sb = inode->i_sb;
0145     uspi = UFS_SB(sb)->s_uspi;
0146 
0147     UFSD("ENTER, fragment %llu, count %u\n",
0148          (unsigned long long)fragment, count);
0149     
0150     if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
0151         ufs_error (sb, "ufs_free_blocks", "internal error, "
0152                "fragment %llu, count %u\n",
0153                (unsigned long long)fragment, count);
0154         goto failed;
0155     }
0156 
0157     mutex_lock(&UFS_SB(sb)->s_lock);
0158     
0159 do_more:
0160     overflow = 0;
0161     cgno = ufs_dtog(uspi, fragment);
0162     bit = ufs_dtogd(uspi, fragment);
0163     if (cgno >= uspi->s_ncg) {
0164         ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
0165         goto failed_unlock;
0166     }
0167     end_bit = bit + count;
0168     if (end_bit > uspi->s_fpg) {
0169         overflow = bit + count - uspi->s_fpg;
0170         count -= overflow;
0171         end_bit -= overflow;
0172     }
0173 
0174     ucpi = ufs_load_cylinder (sb, cgno);
0175     if (!ucpi) 
0176         goto failed_unlock;
0177     ucg = ubh_get_ucg (UCPI_UBH(ucpi));
0178     if (!ufs_cg_chkmagic(sb, ucg)) {
0179         ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
0180         goto failed_unlock;
0181     }
0182 
0183     for (i = bit; i < end_bit; i += uspi->s_fpb) {
0184         blkno = ufs_fragstoblks(i);
0185         if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
0186             ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
0187         }
0188         ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
0189         inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
0190         if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
0191             ufs_clusteracct (sb, ucpi, blkno, 1);
0192 
0193         fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
0194         uspi->cs_total.cs_nbfree++;
0195         fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
0196 
0197         if (uspi->fs_magic != UFS2_MAGIC) {
0198             unsigned cylno = ufs_cbtocylno(i);
0199 
0200             fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
0201                           ufs_cbtorpos(i)), 1);
0202             fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
0203         }
0204     }
0205 
0206     ubh_mark_buffer_dirty (USPI_UBH(uspi));
0207     ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
0208     if (sb->s_flags & SB_SYNCHRONOUS)
0209         ubh_sync_block(UCPI_UBH(ucpi));
0210 
0211     if (overflow) {
0212         fragment += count;
0213         count = overflow;
0214         goto do_more;
0215     }
0216 
0217     ufs_mark_sb_dirty(sb);
0218     mutex_unlock(&UFS_SB(sb)->s_lock);
0219     UFSD("EXIT\n");
0220     return;
0221 
0222 failed_unlock:
0223     mutex_unlock(&UFS_SB(sb)->s_lock);
0224 failed:
0225     UFSD("EXIT (FAILED)\n");
0226     return;
0227 }
0228 
0229 /*
0230  * Modify inode page cache in such way:
0231  * have - blocks with b_blocknr equal to oldb...oldb+count-1
0232  * get - blocks with b_blocknr equal to newb...newb+count-1
0233  * also we suppose that oldb...oldb+count-1 blocks
0234  * situated at the end of file.
0235  *
0236  * We can come here from ufs_writepage or ufs_prepare_write,
0237  * locked_page is argument of these functions, so we already lock it.
0238  */
0239 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
0240                    unsigned int count, sector_t oldb,
0241                    sector_t newb, struct page *locked_page)
0242 {
0243     const unsigned blks_per_page =
0244         1 << (PAGE_SHIFT - inode->i_blkbits);
0245     const unsigned mask = blks_per_page - 1;
0246     struct address_space * const mapping = inode->i_mapping;
0247     pgoff_t index, cur_index, last_index;
0248     unsigned pos, j, lblock;
0249     sector_t end, i;
0250     struct page *page;
0251     struct buffer_head *head, *bh;
0252 
0253     UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
0254           inode->i_ino, count,
0255          (unsigned long long)oldb, (unsigned long long)newb);
0256 
0257     BUG_ON(!locked_page);
0258     BUG_ON(!PageLocked(locked_page));
0259 
0260     cur_index = locked_page->index;
0261     end = count + beg;
0262     last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
0263     for (i = beg; i < end; i = (i | mask) + 1) {
0264         index = i >> (PAGE_SHIFT - inode->i_blkbits);
0265 
0266         if (likely(cur_index != index)) {
0267             page = ufs_get_locked_page(mapping, index);
0268             if (!page)/* it was truncated */
0269                 continue;
0270             if (IS_ERR(page)) {/* or EIO */
0271                 ufs_error(inode->i_sb, __func__,
0272                       "read of page %llu failed\n",
0273                       (unsigned long long)index);
0274                 continue;
0275             }
0276         } else
0277             page = locked_page;
0278 
0279         head = page_buffers(page);
0280         bh = head;
0281         pos = i & mask;
0282         for (j = 0; j < pos; ++j)
0283             bh = bh->b_this_page;
0284 
0285 
0286         if (unlikely(index == last_index))
0287             lblock = end & mask;
0288         else
0289             lblock = blks_per_page;
0290 
0291         do {
0292             if (j >= lblock)
0293                 break;
0294             pos = (i - beg) + j;
0295 
0296             if (!buffer_mapped(bh))
0297                     map_bh(bh, inode->i_sb, oldb + pos);
0298             if (!buffer_uptodate(bh)) {
0299                 ll_rw_block(REQ_OP_READ, 1, &bh);
0300                 wait_on_buffer(bh);
0301                 if (!buffer_uptodate(bh)) {
0302                     ufs_error(inode->i_sb, __func__,
0303                           "read of block failed\n");
0304                     break;
0305                 }
0306             }
0307 
0308             UFSD(" change from %llu to %llu, pos %u\n",
0309                  (unsigned long long)(pos + oldb),
0310                  (unsigned long long)(pos + newb), pos);
0311 
0312             bh->b_blocknr = newb + pos;
0313             clean_bdev_bh_alias(bh);
0314             mark_buffer_dirty(bh);
0315             ++j;
0316             bh = bh->b_this_page;
0317         } while (bh != head);
0318 
0319         if (likely(cur_index != index))
0320             ufs_put_locked_page(page);
0321     }
0322     UFSD("EXIT\n");
0323 }
0324 
0325 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
0326                 int sync)
0327 {
0328     struct buffer_head *bh;
0329     sector_t end = beg + n;
0330 
0331     for (; beg < end; ++beg) {
0332         bh = sb_getblk(inode->i_sb, beg);
0333         lock_buffer(bh);
0334         memset(bh->b_data, 0, inode->i_sb->s_blocksize);
0335         set_buffer_uptodate(bh);
0336         mark_buffer_dirty(bh);
0337         unlock_buffer(bh);
0338         if (IS_SYNC(inode) || sync)
0339             sync_dirty_buffer(bh);
0340         brelse(bh);
0341     }
0342 }
0343 
0344 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
0345                u64 goal, unsigned count, int *err,
0346                struct page *locked_page)
0347 {
0348     struct super_block * sb;
0349     struct ufs_sb_private_info * uspi;
0350     struct ufs_super_block_first * usb1;
0351     unsigned cgno, oldcount, newcount;
0352     u64 tmp, request, result;
0353     
0354     UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
0355          inode->i_ino, (unsigned long long)fragment,
0356          (unsigned long long)goal, count);
0357     
0358     sb = inode->i_sb;
0359     uspi = UFS_SB(sb)->s_uspi;
0360     usb1 = ubh_get_usb_first(uspi);
0361     *err = -ENOSPC;
0362 
0363     mutex_lock(&UFS_SB(sb)->s_lock);
0364     tmp = ufs_data_ptr_to_cpu(sb, p);
0365 
0366     if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
0367         ufs_warning(sb, "ufs_new_fragments", "internal warning"
0368                 " fragment %llu, count %u",
0369                 (unsigned long long)fragment, count);
0370         count = uspi->s_fpb - ufs_fragnum(fragment); 
0371     }
0372     oldcount = ufs_fragnum (fragment);
0373     newcount = oldcount + count;
0374 
0375     /*
0376      * Somebody else has just allocated our fragments
0377      */
0378     if (oldcount) {
0379         if (!tmp) {
0380             ufs_error(sb, "ufs_new_fragments", "internal error, "
0381                   "fragment %llu, tmp %llu\n",
0382                   (unsigned long long)fragment,
0383                   (unsigned long long)tmp);
0384             mutex_unlock(&UFS_SB(sb)->s_lock);
0385             return INVBLOCK;
0386         }
0387         if (fragment < UFS_I(inode)->i_lastfrag) {
0388             UFSD("EXIT (ALREADY ALLOCATED)\n");
0389             mutex_unlock(&UFS_SB(sb)->s_lock);
0390             return 0;
0391         }
0392     }
0393     else {
0394         if (tmp) {
0395             UFSD("EXIT (ALREADY ALLOCATED)\n");
0396             mutex_unlock(&UFS_SB(sb)->s_lock);
0397             return 0;
0398         }
0399     }
0400 
0401     /*
0402      * There is not enough space for user on the device
0403      */
0404     if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
0405         if (!capable(CAP_SYS_RESOURCE)) {
0406             mutex_unlock(&UFS_SB(sb)->s_lock);
0407             UFSD("EXIT (FAILED)\n");
0408             return 0;
0409         }
0410     }
0411 
0412     if (goal >= uspi->s_size) 
0413         goal = 0;
0414     if (goal == 0) 
0415         cgno = ufs_inotocg (inode->i_ino);
0416     else
0417         cgno = ufs_dtog(uspi, goal);
0418      
0419     /*
0420      * allocate new fragment
0421      */
0422     if (oldcount == 0) {
0423         result = ufs_alloc_fragments (inode, cgno, goal, count, err);
0424         if (result) {
0425             ufs_clear_frags(inode, result + oldcount,
0426                     newcount - oldcount, locked_page != NULL);
0427             *err = 0;
0428             write_seqlock(&UFS_I(inode)->meta_lock);
0429             ufs_cpu_to_data_ptr(sb, p, result);
0430             UFS_I(inode)->i_lastfrag =
0431                 max(UFS_I(inode)->i_lastfrag, fragment + count);
0432             write_sequnlock(&UFS_I(inode)->meta_lock);
0433         }
0434         mutex_unlock(&UFS_SB(sb)->s_lock);
0435         UFSD("EXIT, result %llu\n", (unsigned long long)result);
0436         return result;
0437     }
0438 
0439     /*
0440      * resize block
0441      */
0442     result = ufs_add_fragments(inode, tmp, oldcount, newcount);
0443     if (result) {
0444         *err = 0;
0445         read_seqlock_excl(&UFS_I(inode)->meta_lock);
0446         UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
0447                         fragment + count);
0448         read_sequnlock_excl(&UFS_I(inode)->meta_lock);
0449         ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
0450                 locked_page != NULL);
0451         mutex_unlock(&UFS_SB(sb)->s_lock);
0452         UFSD("EXIT, result %llu\n", (unsigned long long)result);
0453         return result;
0454     }
0455 
0456     /*
0457      * allocate new block and move data
0458      */
0459     if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
0460         request = newcount;
0461         if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
0462             usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
0463     } else {
0464         request = uspi->s_fpb;
0465         if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
0466             usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
0467     }
0468     result = ufs_alloc_fragments (inode, cgno, goal, request, err);
0469     if (result) {
0470         ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
0471                 locked_page != NULL);
0472         mutex_unlock(&UFS_SB(sb)->s_lock);
0473         ufs_change_blocknr(inode, fragment - oldcount, oldcount,
0474                    uspi->s_sbbase + tmp,
0475                    uspi->s_sbbase + result, locked_page);
0476         *err = 0;
0477         write_seqlock(&UFS_I(inode)->meta_lock);
0478         ufs_cpu_to_data_ptr(sb, p, result);
0479         UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
0480                         fragment + count);
0481         write_sequnlock(&UFS_I(inode)->meta_lock);
0482         if (newcount < request)
0483             ufs_free_fragments (inode, result + newcount, request - newcount);
0484         ufs_free_fragments (inode, tmp, oldcount);
0485         UFSD("EXIT, result %llu\n", (unsigned long long)result);
0486         return result;
0487     }
0488 
0489     mutex_unlock(&UFS_SB(sb)->s_lock);
0490     UFSD("EXIT (FAILED)\n");
0491     return 0;
0492 }       
0493 
0494 static bool try_add_frags(struct inode *inode, unsigned frags)
0495 {
0496     unsigned size = frags * i_blocksize(inode);
0497     spin_lock(&inode->i_lock);
0498     __inode_add_bytes(inode, size);
0499     if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
0500         __inode_sub_bytes(inode, size);
0501         spin_unlock(&inode->i_lock);
0502         return false;
0503     }
0504     spin_unlock(&inode->i_lock);
0505     return true;
0506 }
0507 
0508 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
0509                  unsigned oldcount, unsigned newcount)
0510 {
0511     struct super_block * sb;
0512     struct ufs_sb_private_info * uspi;
0513     struct ufs_cg_private_info * ucpi;
0514     struct ufs_cylinder_group * ucg;
0515     unsigned cgno, fragno, fragoff, count, fragsize, i;
0516     
0517     UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
0518          (unsigned long long)fragment, oldcount, newcount);
0519     
0520     sb = inode->i_sb;
0521     uspi = UFS_SB(sb)->s_uspi;
0522     count = newcount - oldcount;
0523     
0524     cgno = ufs_dtog(uspi, fragment);
0525     if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
0526         return 0;
0527     if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
0528         return 0;
0529     ucpi = ufs_load_cylinder (sb, cgno);
0530     if (!ucpi)
0531         return 0;
0532     ucg = ubh_get_ucg (UCPI_UBH(ucpi));
0533     if (!ufs_cg_chkmagic(sb, ucg)) {
0534         ufs_panic (sb, "ufs_add_fragments",
0535             "internal error, bad magic number on cg %u", cgno);
0536         return 0;
0537     }
0538 
0539     fragno = ufs_dtogd(uspi, fragment);
0540     fragoff = ufs_fragnum (fragno);
0541     for (i = oldcount; i < newcount; i++)
0542         if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
0543             return 0;
0544 
0545     if (!try_add_frags(inode, count))
0546         return 0;
0547     /*
0548      * Block can be extended
0549      */
0550     ucg->cg_time = ufs_get_seconds(sb);
0551     for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
0552         if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
0553             break;
0554     fragsize = i - oldcount;
0555     if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
0556         ufs_panic (sb, "ufs_add_fragments",
0557             "internal error or corrupted bitmap on cg %u", cgno);
0558     fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
0559     if (fragsize != count)
0560         fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
0561     for (i = oldcount; i < newcount; i++)
0562         ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
0563 
0564     fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
0565     fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
0566     uspi->cs_total.cs_nffree -= count;
0567     
0568     ubh_mark_buffer_dirty (USPI_UBH(uspi));
0569     ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
0570     if (sb->s_flags & SB_SYNCHRONOUS)
0571         ubh_sync_block(UCPI_UBH(ucpi));
0572     ufs_mark_sb_dirty(sb);
0573 
0574     UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
0575     
0576     return fragment;
0577 }
0578 
0579 #define UFS_TEST_FREE_SPACE_CG \
0580     ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
0581     if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
0582         goto cg_found; \
0583     for (k = count; k < uspi->s_fpb; k++) \
0584         if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
0585             goto cg_found; 
0586 
0587 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
0588                    u64 goal, unsigned count, int *err)
0589 {
0590     struct super_block * sb;
0591     struct ufs_sb_private_info * uspi;
0592     struct ufs_cg_private_info * ucpi;
0593     struct ufs_cylinder_group * ucg;
0594     unsigned oldcg, i, j, k, allocsize;
0595     u64 result;
0596     
0597     UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
0598          inode->i_ino, cgno, (unsigned long long)goal, count);
0599 
0600     sb = inode->i_sb;
0601     uspi = UFS_SB(sb)->s_uspi;
0602     oldcg = cgno;
0603     
0604     /*
0605      * 1. searching on preferred cylinder group
0606      */
0607     UFS_TEST_FREE_SPACE_CG
0608 
0609     /*
0610      * 2. quadratic rehash
0611      */
0612     for (j = 1; j < uspi->s_ncg; j *= 2) {
0613         cgno += j;
0614         if (cgno >= uspi->s_ncg) 
0615             cgno -= uspi->s_ncg;
0616         UFS_TEST_FREE_SPACE_CG
0617     }
0618 
0619     /*
0620      * 3. brute force search
0621      * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
0622      */
0623     cgno = (oldcg + 1) % uspi->s_ncg;
0624     for (j = 2; j < uspi->s_ncg; j++) {
0625         cgno++;
0626         if (cgno >= uspi->s_ncg)
0627             cgno = 0;
0628         UFS_TEST_FREE_SPACE_CG
0629     }
0630     
0631     UFSD("EXIT (FAILED)\n");
0632     return 0;
0633 
0634 cg_found:
0635     ucpi = ufs_load_cylinder (sb, cgno);
0636     if (!ucpi)
0637         return 0;
0638     ucg = ubh_get_ucg (UCPI_UBH(ucpi));
0639     if (!ufs_cg_chkmagic(sb, ucg)) 
0640         ufs_panic (sb, "ufs_alloc_fragments",
0641             "internal error, bad magic number on cg %u", cgno);
0642     ucg->cg_time = ufs_get_seconds(sb);
0643 
0644     if (count == uspi->s_fpb) {
0645         result = ufs_alloccg_block (inode, ucpi, goal, err);
0646         if (result == INVBLOCK)
0647             return 0;
0648         goto succed;
0649     }
0650 
0651     for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
0652         if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
0653             break;
0654     
0655     if (allocsize == uspi->s_fpb) {
0656         result = ufs_alloccg_block (inode, ucpi, goal, err);
0657         if (result == INVBLOCK)
0658             return 0;
0659         goal = ufs_dtogd(uspi, result);
0660         for (i = count; i < uspi->s_fpb; i++)
0661             ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
0662         i = uspi->s_fpb - count;
0663 
0664         inode_sub_bytes(inode, i << uspi->s_fshift);
0665         fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
0666         uspi->cs_total.cs_nffree += i;
0667         fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
0668         fs32_add(sb, &ucg->cg_frsum[i], 1);
0669         goto succed;
0670     }
0671 
0672     result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
0673     if (result == INVBLOCK)
0674         return 0;
0675     if (!try_add_frags(inode, count))
0676         return 0;
0677     for (i = 0; i < count; i++)
0678         ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
0679     
0680     fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
0681     uspi->cs_total.cs_nffree -= count;
0682     fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
0683     fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
0684 
0685     if (count != allocsize)
0686         fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
0687 
0688 succed:
0689     ubh_mark_buffer_dirty (USPI_UBH(uspi));
0690     ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
0691     if (sb->s_flags & SB_SYNCHRONOUS)
0692         ubh_sync_block(UCPI_UBH(ucpi));
0693     ufs_mark_sb_dirty(sb);
0694 
0695     result += cgno * uspi->s_fpg;
0696     UFSD("EXIT3, result %llu\n", (unsigned long long)result);
0697     return result;
0698 }
0699 
0700 static u64 ufs_alloccg_block(struct inode *inode,
0701                  struct ufs_cg_private_info *ucpi,
0702                  u64 goal, int *err)
0703 {
0704     struct super_block * sb;
0705     struct ufs_sb_private_info * uspi;
0706     struct ufs_cylinder_group * ucg;
0707     u64 result, blkno;
0708 
0709     UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
0710 
0711     sb = inode->i_sb;
0712     uspi = UFS_SB(sb)->s_uspi;
0713     ucg = ubh_get_ucg(UCPI_UBH(ucpi));
0714 
0715     if (goal == 0) {
0716         goal = ucpi->c_rotor;
0717         goto norot;
0718     }
0719     goal = ufs_blknum (goal);
0720     goal = ufs_dtogd(uspi, goal);
0721     
0722     /*
0723      * If the requested block is available, use it.
0724      */
0725     if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
0726         result = goal;
0727         goto gotit;
0728     }
0729     
0730 norot:  
0731     result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
0732     if (result == INVBLOCK)
0733         return INVBLOCK;
0734     ucpi->c_rotor = result;
0735 gotit:
0736     if (!try_add_frags(inode, uspi->s_fpb))
0737         return 0;
0738     blkno = ufs_fragstoblks(result);
0739     ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
0740     if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
0741         ufs_clusteracct (sb, ucpi, blkno, -1);
0742 
0743     fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
0744     uspi->cs_total.cs_nbfree--;
0745     fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
0746 
0747     if (uspi->fs_magic != UFS2_MAGIC) {
0748         unsigned cylno = ufs_cbtocylno((unsigned)result);
0749 
0750         fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
0751                       ufs_cbtorpos((unsigned)result)), 1);
0752         fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
0753     }
0754     
0755     UFSD("EXIT, result %llu\n", (unsigned long long)result);
0756 
0757     return result;
0758 }
0759 
0760 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
0761               struct ufs_buffer_head *ubh,
0762               unsigned begin, unsigned size,
0763               unsigned char *table, unsigned char mask)
0764 {
0765     unsigned rest, offset;
0766     unsigned char *cp;
0767     
0768 
0769     offset = begin & ~uspi->s_fmask;
0770     begin >>= uspi->s_fshift;
0771     for (;;) {
0772         if ((offset + size) < uspi->s_fsize)
0773             rest = size;
0774         else
0775             rest = uspi->s_fsize - offset;
0776         size -= rest;
0777         cp = ubh->bh[begin]->b_data + offset;
0778         while ((table[*cp++] & mask) == 0 && --rest)
0779             ;
0780         if (rest || !size)
0781             break;
0782         begin++;
0783         offset = 0;
0784     }
0785     return (size + rest);
0786 }
0787 
0788 /*
0789  * Find a block of the specified size in the specified cylinder group.
0790  * @sp: pointer to super block
0791  * @ucpi: pointer to cylinder group info
0792  * @goal: near which block we want find new one
0793  * @count: specified size
0794  */
0795 static u64 ufs_bitmap_search(struct super_block *sb,
0796                  struct ufs_cg_private_info *ucpi,
0797                  u64 goal, unsigned count)
0798 {
0799     /*
0800      * Bit patterns for identifying fragments in the block map
0801      * used as ((map & mask_arr) == want_arr)
0802      */
0803     static const int mask_arr[9] = {
0804         0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
0805     };
0806     static const int want_arr[9] = {
0807         0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
0808     };
0809     struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
0810     unsigned start, length, loc;
0811     unsigned pos, want, blockmap, mask, end;
0812     u64 result;
0813 
0814     UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
0815          (unsigned long long)goal, count);
0816 
0817     if (goal)
0818         start = ufs_dtogd(uspi, goal) >> 3;
0819     else
0820         start = ucpi->c_frotor >> 3;
0821         
0822     length = ((uspi->s_fpg + 7) >> 3) - start;
0823     loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
0824         (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
0825         1 << (count - 1 + (uspi->s_fpb & 7))); 
0826     if (loc == 0) {
0827         length = start + 1;
0828         loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
0829                 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
0830                 ufs_fragtable_other,
0831                 1 << (count - 1 + (uspi->s_fpb & 7)));
0832         if (loc == 0) {
0833             ufs_error(sb, "ufs_bitmap_search",
0834                   "bitmap corrupted on cg %u, start %u,"
0835                   " length %u, count %u, freeoff %u\n",
0836                   ucpi->c_cgx, start, length, count,
0837                   ucpi->c_freeoff);
0838             return INVBLOCK;
0839         }
0840         start = 0;
0841     }
0842     result = (start + length - loc) << 3;
0843     ucpi->c_frotor = result;
0844 
0845     /*
0846      * found the byte in the map
0847      */
0848 
0849     for (end = result + 8; result < end; result += uspi->s_fpb) {
0850         blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
0851         blockmap <<= 1;
0852         mask = mask_arr[count];
0853         want = want_arr[count];
0854         for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
0855             if ((blockmap & mask) == want) {
0856                 UFSD("EXIT, result %llu\n",
0857                      (unsigned long long)result);
0858                 return result + pos;
0859             }
0860             mask <<= 1;
0861             want <<= 1;
0862         }
0863     }
0864 
0865     ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
0866           ucpi->c_cgx);
0867     UFSD("EXIT (FAILED)\n");
0868     return INVBLOCK;
0869 }
0870 
0871 static void ufs_clusteracct(struct super_block * sb,
0872     struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
0873 {
0874     struct ufs_sb_private_info * uspi;
0875     int i, start, end, forw, back;
0876     
0877     uspi = UFS_SB(sb)->s_uspi;
0878     if (uspi->s_contigsumsize <= 0)
0879         return;
0880 
0881     if (cnt > 0)
0882         ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
0883     else
0884         ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
0885 
0886     /*
0887      * Find the size of the cluster going forward.
0888      */
0889     start = blkno + 1;
0890     end = start + uspi->s_contigsumsize;
0891     if ( end >= ucpi->c_nclusterblks)
0892         end = ucpi->c_nclusterblks;
0893     i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
0894     if (i > end)
0895         i = end;
0896     forw = i - start;
0897     
0898     /*
0899      * Find the size of the cluster going backward.
0900      */
0901     start = blkno - 1;
0902     end = start - uspi->s_contigsumsize;
0903     if (end < 0 ) 
0904         end = -1;
0905     i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
0906     if ( i < end) 
0907         i = end;
0908     back = start - i;
0909     
0910     /*
0911      * Account for old cluster and the possibly new forward and
0912      * back clusters.
0913      */
0914     i = back + forw + 1;
0915     if (i > uspi->s_contigsumsize)
0916         i = uspi->s_contigsumsize;
0917     fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
0918     if (back > 0)
0919         fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
0920     if (forw > 0)
0921         fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
0922 }
0923 
0924 
0925 static unsigned char ufs_fragtable_8fpb[] = {
0926     0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
0927     0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
0928     0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
0929     0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
0930     0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09, 
0931     0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
0932     0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
0933     0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
0934     0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
0935     0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
0936     0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
0937     0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
0938     0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
0939     0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
0940     0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
0941     0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
0942 };
0943 
0944 static unsigned char ufs_fragtable_other[] = {
0945     0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
0946     0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
0947     0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
0948     0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
0949     0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
0950     0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
0951     0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
0952     0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
0953     0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
0954     0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
0955     0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
0956     0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
0957     0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
0958     0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
0959     0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
0960     0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
0961 };