Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
0003  */
0004 
0005 #include <linux/string.h>
0006 #include <linux/time.h>
0007 #include <linux/uuid.h>
0008 #include "reiserfs.h"
0009 
0010 /* find where objectid map starts */
0011 #define objectid_map(s,rs) (old_format_only (s) ? \
0012                          (__le32 *)((struct reiserfs_super_block_v1 *)(rs) + 1) :\
0013              (__le32 *)((rs) + 1))
0014 
0015 #ifdef CONFIG_REISERFS_CHECK
0016 
0017 static void check_objectid_map(struct super_block *s, __le32 * map)
0018 {
0019     if (le32_to_cpu(map[0]) != 1)
0020         reiserfs_panic(s, "vs-15010", "map corrupted: %lx",
0021                    (long unsigned int)le32_to_cpu(map[0]));
0022 
0023     /* FIXME: add something else here */
0024 }
0025 
0026 #else
0027 static void check_objectid_map(struct super_block *s, __le32 * map)
0028 {;
0029 }
0030 #endif
0031 
0032 /*
0033  * When we allocate objectids we allocate the first unused objectid.
0034  * Each sequence of objectids in use (the odd sequences) is followed
0035  * by a sequence of objectids not in use (the even sequences).  We
0036  * only need to record the last objectid in each of these sequences
0037  * (both the odd and even sequences) in order to fully define the
0038  * boundaries of the sequences.  A consequence of allocating the first
0039  * objectid not in use is that under most conditions this scheme is
0040  * extremely compact.  The exception is immediately after a sequence
0041  * of operations which deletes a large number of objects of
0042  * non-sequential objectids, and even then it will become compact
0043  * again as soon as more objects are created.  Note that many
0044  * interesting optimizations of layout could result from complicating
0045  * objectid assignment, but we have deferred making them for now.
0046  */
0047 
0048 /* get unique object identifier */
0049 __u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th)
0050 {
0051     struct super_block *s = th->t_super;
0052     struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
0053     __le32 *map = objectid_map(s, rs);
0054     __u32 unused_objectid;
0055 
0056     BUG_ON(!th->t_trans_id);
0057 
0058     check_objectid_map(s, map);
0059 
0060     reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
0061     /* comment needed -Hans */
0062     unused_objectid = le32_to_cpu(map[1]);
0063     if (unused_objectid == U32_MAX) {
0064         reiserfs_warning(s, "reiserfs-15100", "no more object ids");
0065         reiserfs_restore_prepared_buffer(s, SB_BUFFER_WITH_SB(s));
0066         return 0;
0067     }
0068 
0069     /*
0070      * This incrementation allocates the first unused objectid. That
0071      * is to say, the first entry on the objectid map is the first
0072      * unused objectid, and by incrementing it we use it.  See below
0073      * where we check to see if we eliminated a sequence of unused
0074      * objectids....
0075      */
0076     map[1] = cpu_to_le32(unused_objectid + 1);
0077 
0078     /*
0079      * Now we check to see if we eliminated the last remaining member of
0080      * the first even sequence (and can eliminate the sequence by
0081      * eliminating its last objectid from oids), and can collapse the
0082      * first two odd sequences into one sequence.  If so, then the net
0083      * result is to eliminate a pair of objectids from oids.  We do this
0084      * by shifting the entire map to the left.
0085      */
0086     if (sb_oid_cursize(rs) > 2 && map[1] == map[2]) {
0087         memmove(map + 1, map + 3,
0088             (sb_oid_cursize(rs) - 3) * sizeof(__u32));
0089         set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
0090     }
0091 
0092     journal_mark_dirty(th, SB_BUFFER_WITH_SB(s));
0093     return unused_objectid;
0094 }
0095 
0096 /* makes object identifier unused */
0097 void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
0098                    __u32 objectid_to_release)
0099 {
0100     struct super_block *s = th->t_super;
0101     struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
0102     __le32 *map = objectid_map(s, rs);
0103     int i = 0;
0104 
0105     BUG_ON(!th->t_trans_id);
0106     /*return; */
0107     check_objectid_map(s, map);
0108 
0109     reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
0110     journal_mark_dirty(th, SB_BUFFER_WITH_SB(s));
0111 
0112     /*
0113      * start at the beginning of the objectid map (i = 0) and go to
0114      * the end of it (i = disk_sb->s_oid_cursize).  Linear search is
0115      * what we use, though it is possible that binary search would be
0116      * more efficient after performing lots of deletions (which is
0117      * when oids is large.)  We only check even i's.
0118      */
0119     while (i < sb_oid_cursize(rs)) {
0120         if (objectid_to_release == le32_to_cpu(map[i])) {
0121             /* This incrementation unallocates the objectid. */
0122             le32_add_cpu(&map[i], 1);
0123 
0124             /*
0125              * Did we unallocate the last member of an
0126              * odd sequence, and can shrink oids?
0127              */
0128             if (map[i] == map[i + 1]) {
0129                 /* shrink objectid map */
0130                 memmove(map + i, map + i + 2,
0131                     (sb_oid_cursize(rs) - i -
0132                      2) * sizeof(__u32));
0133                 set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
0134 
0135                 RFALSE(sb_oid_cursize(rs) < 2 ||
0136                        sb_oid_cursize(rs) > sb_oid_maxsize(rs),
0137                        "vs-15005: objectid map corrupted cur_size == %d (max == %d)",
0138                        sb_oid_cursize(rs), sb_oid_maxsize(rs));
0139             }
0140             return;
0141         }
0142 
0143         if (objectid_to_release > le32_to_cpu(map[i]) &&
0144             objectid_to_release < le32_to_cpu(map[i + 1])) {
0145             /* size of objectid map is not changed */
0146             if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) {
0147                 le32_add_cpu(&map[i + 1], -1);
0148                 return;
0149             }
0150 
0151             /*
0152              * JDM comparing two little-endian values for
0153              * equality -- safe
0154              */
0155             /*
0156              * objectid map must be expanded, but
0157              * there is no space
0158              */
0159             if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) {
0160                 PROC_INFO_INC(s, leaked_oid);
0161                 return;
0162             }
0163 
0164             /* expand the objectid map */
0165             memmove(map + i + 3, map + i + 1,
0166                 (sb_oid_cursize(rs) - i - 1) * sizeof(__u32));
0167             map[i + 1] = cpu_to_le32(objectid_to_release);
0168             map[i + 2] = cpu_to_le32(objectid_to_release + 1);
0169             set_sb_oid_cursize(rs, sb_oid_cursize(rs) + 2);
0170             return;
0171         }
0172         i += 2;
0173     }
0174 
0175     reiserfs_error(s, "vs-15011", "tried to free free object id (%lu)",
0176                (long unsigned)objectid_to_release);
0177 }
0178 
0179 int reiserfs_convert_objectid_map_v1(struct super_block *s)
0180 {
0181     struct reiserfs_super_block *disk_sb = SB_DISK_SUPER_BLOCK(s);
0182     int cur_size = sb_oid_cursize(disk_sb);
0183     int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2;
0184     int old_max = sb_oid_maxsize(disk_sb);
0185     struct reiserfs_super_block_v1 *disk_sb_v1;
0186     __le32 *objectid_map;
0187     int i;
0188 
0189     disk_sb_v1 =
0190         (struct reiserfs_super_block_v1 *)(SB_BUFFER_WITH_SB(s)->b_data);
0191     objectid_map = (__le32 *) (disk_sb_v1 + 1);
0192 
0193     if (cur_size > new_size) {
0194         /*
0195          * mark everyone used that was listed as free at
0196          * the end of the objectid map
0197          */
0198         objectid_map[new_size - 1] = objectid_map[cur_size - 1];
0199         set_sb_oid_cursize(disk_sb, new_size);
0200     }
0201     /* move the smaller objectid map past the end of the new super */
0202     for (i = new_size - 1; i >= 0; i--) {
0203         objectid_map[i + (old_max - new_size)] = objectid_map[i];
0204     }
0205 
0206     /* set the max size so we don't overflow later */
0207     set_sb_oid_maxsize(disk_sb, new_size);
0208 
0209     /* Zero out label and generate random UUID */
0210     memset(disk_sb->s_label, 0, sizeof(disk_sb->s_label));
0211     generate_random_uuid(disk_sb->s_uuid);
0212 
0213     /* finally, zero out the unused chunk of the new super */
0214     memset(disk_sb->s_unused, 0, sizeof(disk_sb->s_unused));
0215     return 0;
0216 }