0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/time.h>
0013 #include <linux/fs.h>
0014 #include <linux/namei.h>
0015 #include <linux/quotaops.h>
0016 #include <linux/buffer_head.h>
0017 #include <linux/swap.h>
0018 #include <linux/pagemap.h>
0019 #include <linux/blkdev.h>
0020 #include <linux/slab.h>
0021 #include "ext4.h"
0022
0023 struct ext4_system_zone {
0024 struct rb_node node;
0025 ext4_fsblk_t start_blk;
0026 unsigned int count;
0027 u32 ino;
0028 };
0029
0030 static struct kmem_cache *ext4_system_zone_cachep;
0031
0032 int __init ext4_init_system_zone(void)
0033 {
0034 ext4_system_zone_cachep = KMEM_CACHE(ext4_system_zone, 0);
0035 if (ext4_system_zone_cachep == NULL)
0036 return -ENOMEM;
0037 return 0;
0038 }
0039
0040 void ext4_exit_system_zone(void)
0041 {
0042 rcu_barrier();
0043 kmem_cache_destroy(ext4_system_zone_cachep);
0044 }
0045
0046 static inline int can_merge(struct ext4_system_zone *entry1,
0047 struct ext4_system_zone *entry2)
0048 {
0049 if ((entry1->start_blk + entry1->count) == entry2->start_blk &&
0050 entry1->ino == entry2->ino)
0051 return 1;
0052 return 0;
0053 }
0054
0055 static void release_system_zone(struct ext4_system_blocks *system_blks)
0056 {
0057 struct ext4_system_zone *entry, *n;
0058
0059 rbtree_postorder_for_each_entry_safe(entry, n,
0060 &system_blks->root, node)
0061 kmem_cache_free(ext4_system_zone_cachep, entry);
0062 }
0063
0064
0065
0066
0067
0068
0069 static int add_system_zone(struct ext4_system_blocks *system_blks,
0070 ext4_fsblk_t start_blk,
0071 unsigned int count, u32 ino)
0072 {
0073 struct ext4_system_zone *new_entry, *entry;
0074 struct rb_node **n = &system_blks->root.rb_node, *node;
0075 struct rb_node *parent = NULL, *new_node = NULL;
0076
0077 while (*n) {
0078 parent = *n;
0079 entry = rb_entry(parent, struct ext4_system_zone, node);
0080 if (start_blk < entry->start_blk)
0081 n = &(*n)->rb_left;
0082 else if (start_blk >= (entry->start_blk + entry->count))
0083 n = &(*n)->rb_right;
0084 else
0085 return -EFSCORRUPTED;
0086 }
0087
0088 new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
0089 GFP_KERNEL);
0090 if (!new_entry)
0091 return -ENOMEM;
0092 new_entry->start_blk = start_blk;
0093 new_entry->count = count;
0094 new_entry->ino = ino;
0095 new_node = &new_entry->node;
0096
0097 rb_link_node(new_node, parent, n);
0098 rb_insert_color(new_node, &system_blks->root);
0099
0100
0101 node = rb_prev(new_node);
0102 if (node) {
0103 entry = rb_entry(node, struct ext4_system_zone, node);
0104 if (can_merge(entry, new_entry)) {
0105 new_entry->start_blk = entry->start_blk;
0106 new_entry->count += entry->count;
0107 rb_erase(node, &system_blks->root);
0108 kmem_cache_free(ext4_system_zone_cachep, entry);
0109 }
0110 }
0111
0112
0113 node = rb_next(new_node);
0114 if (node) {
0115 entry = rb_entry(node, struct ext4_system_zone, node);
0116 if (can_merge(new_entry, entry)) {
0117 new_entry->count += entry->count;
0118 rb_erase(node, &system_blks->root);
0119 kmem_cache_free(ext4_system_zone_cachep, entry);
0120 }
0121 }
0122 return 0;
0123 }
0124
0125 static void debug_print_tree(struct ext4_sb_info *sbi)
0126 {
0127 struct rb_node *node;
0128 struct ext4_system_zone *entry;
0129 struct ext4_system_blocks *system_blks;
0130 int first = 1;
0131
0132 printk(KERN_INFO "System zones: ");
0133 rcu_read_lock();
0134 system_blks = rcu_dereference(sbi->s_system_blks);
0135 node = rb_first(&system_blks->root);
0136 while (node) {
0137 entry = rb_entry(node, struct ext4_system_zone, node);
0138 printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ",
0139 entry->start_blk, entry->start_blk + entry->count - 1);
0140 first = 0;
0141 node = rb_next(node);
0142 }
0143 rcu_read_unlock();
0144 printk(KERN_CONT "\n");
0145 }
0146
0147 static int ext4_protect_reserved_inode(struct super_block *sb,
0148 struct ext4_system_blocks *system_blks,
0149 u32 ino)
0150 {
0151 struct inode *inode;
0152 struct ext4_sb_info *sbi = EXT4_SB(sb);
0153 struct ext4_map_blocks map;
0154 u32 i = 0, num;
0155 int err = 0, n;
0156
0157 if ((ino < EXT4_ROOT_INO) ||
0158 (ino > le32_to_cpu(sbi->s_es->s_inodes_count)))
0159 return -EINVAL;
0160 inode = ext4_iget(sb, ino, EXT4_IGET_SPECIAL);
0161 if (IS_ERR(inode))
0162 return PTR_ERR(inode);
0163 num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
0164 while (i < num) {
0165 cond_resched();
0166 map.m_lblk = i;
0167 map.m_len = num - i;
0168 n = ext4_map_blocks(NULL, inode, &map, 0);
0169 if (n < 0) {
0170 err = n;
0171 break;
0172 }
0173 if (n == 0) {
0174 i++;
0175 } else {
0176 err = add_system_zone(system_blks, map.m_pblk, n, ino);
0177 if (err < 0) {
0178 if (err == -EFSCORRUPTED) {
0179 EXT4_ERROR_INODE_ERR(inode, -err,
0180 "blocks %llu-%llu from inode overlap system zone",
0181 map.m_pblk,
0182 map.m_pblk + map.m_len - 1);
0183 }
0184 break;
0185 }
0186 i += n;
0187 }
0188 }
0189 iput(inode);
0190 return err;
0191 }
0192
0193 static void ext4_destroy_system_zone(struct rcu_head *rcu)
0194 {
0195 struct ext4_system_blocks *system_blks;
0196
0197 system_blks = container_of(rcu, struct ext4_system_blocks, rcu);
0198 release_system_zone(system_blks);
0199 kfree(system_blks);
0200 }
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211 int ext4_setup_system_zone(struct super_block *sb)
0212 {
0213 ext4_group_t ngroups = ext4_get_groups_count(sb);
0214 struct ext4_sb_info *sbi = EXT4_SB(sb);
0215 struct ext4_system_blocks *system_blks;
0216 struct ext4_group_desc *gdp;
0217 ext4_group_t i;
0218 int flex_size = ext4_flex_bg_size(sbi);
0219 int ret;
0220
0221 system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
0222 if (!system_blks)
0223 return -ENOMEM;
0224
0225 for (i=0; i < ngroups; i++) {
0226 cond_resched();
0227 if (ext4_bg_has_super(sb, i) &&
0228 ((i < 5) || ((i % flex_size) == 0))) {
0229 ret = add_system_zone(system_blks,
0230 ext4_group_first_block_no(sb, i),
0231 ext4_bg_num_gdb(sb, i) + 1, 0);
0232 if (ret)
0233 goto err;
0234 }
0235 gdp = ext4_get_group_desc(sb, i, NULL);
0236 ret = add_system_zone(system_blks,
0237 ext4_block_bitmap(sb, gdp), 1, 0);
0238 if (ret)
0239 goto err;
0240 ret = add_system_zone(system_blks,
0241 ext4_inode_bitmap(sb, gdp), 1, 0);
0242 if (ret)
0243 goto err;
0244 ret = add_system_zone(system_blks,
0245 ext4_inode_table(sb, gdp),
0246 sbi->s_itb_per_group, 0);
0247 if (ret)
0248 goto err;
0249 }
0250 if (ext4_has_feature_journal(sb) && sbi->s_es->s_journal_inum) {
0251 ret = ext4_protect_reserved_inode(sb, system_blks,
0252 le32_to_cpu(sbi->s_es->s_journal_inum));
0253 if (ret)
0254 goto err;
0255 }
0256
0257
0258
0259
0260
0261
0262 rcu_assign_pointer(sbi->s_system_blks, system_blks);
0263
0264 if (test_opt(sb, DEBUG))
0265 debug_print_tree(sbi);
0266 return 0;
0267 err:
0268 release_system_zone(system_blks);
0269 kfree(system_blks);
0270 return ret;
0271 }
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283 void ext4_release_system_zone(struct super_block *sb)
0284 {
0285 struct ext4_system_blocks *system_blks;
0286
0287 system_blks = rcu_dereference_protected(EXT4_SB(sb)->s_system_blks,
0288 lockdep_is_held(&sb->s_umount));
0289 rcu_assign_pointer(EXT4_SB(sb)->s_system_blks, NULL);
0290
0291 if (system_blks)
0292 call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
0293 }
0294
0295 int ext4_sb_block_valid(struct super_block *sb, struct inode *inode,
0296 ext4_fsblk_t start_blk, unsigned int count)
0297 {
0298 struct ext4_sb_info *sbi = EXT4_SB(sb);
0299 struct ext4_system_blocks *system_blks;
0300 struct ext4_system_zone *entry;
0301 struct rb_node *n;
0302 int ret = 1;
0303
0304 if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
0305 (start_blk + count < start_blk) ||
0306 (start_blk + count > ext4_blocks_count(sbi->s_es)))
0307 return 0;
0308
0309
0310
0311
0312
0313
0314 rcu_read_lock();
0315 system_blks = rcu_dereference(sbi->s_system_blks);
0316 if (system_blks == NULL)
0317 goto out_rcu;
0318
0319 n = system_blks->root.rb_node;
0320 while (n) {
0321 entry = rb_entry(n, struct ext4_system_zone, node);
0322 if (start_blk + count - 1 < entry->start_blk)
0323 n = n->rb_left;
0324 else if (start_blk >= (entry->start_blk + entry->count))
0325 n = n->rb_right;
0326 else {
0327 ret = 0;
0328 if (inode)
0329 ret = (entry->ino == inode->i_ino);
0330 break;
0331 }
0332 }
0333 out_rcu:
0334 rcu_read_unlock();
0335 return ret;
0336 }
0337
0338
0339
0340
0341
0342
0343 int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk,
0344 unsigned int count)
0345 {
0346 return ext4_sb_block_valid(inode->i_sb, inode, start_blk, count);
0347 }
0348
0349 int ext4_check_blockref(const char *function, unsigned int line,
0350 struct inode *inode, __le32 *p, unsigned int max)
0351 {
0352 __le32 *bref = p;
0353 unsigned int blk;
0354
0355 if (ext4_has_feature_journal(inode->i_sb) &&
0356 (inode->i_ino ==
0357 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
0358 return 0;
0359
0360 while (bref < p+max) {
0361 blk = le32_to_cpu(*bref++);
0362 if (blk &&
0363 unlikely(!ext4_inode_block_valid(inode, blk, 1))) {
0364 ext4_error_inode(inode, function, line, blk,
0365 "invalid block");
0366 return -EFSCORRUPTED;
0367 }
0368 }
0369 return 0;
0370 }
0371