0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054 #include <linux/init.h>
0055 #include <linux/fs.h>
0056 #include <linux/slab.h>
0057 #include <linux/mbcache.h>
0058 #include <linux/quotaops.h>
0059 #include <linux/iversion.h>
0060 #include "ext4_jbd2.h"
0061 #include "ext4.h"
0062 #include "xattr.h"
0063 #include "acl.h"
0064
0065 #ifdef EXT4_XATTR_DEBUG
0066 # define ea_idebug(inode, fmt, ...) \
0067 printk(KERN_DEBUG "inode %s:%lu: " fmt "\n", \
0068 inode->i_sb->s_id, inode->i_ino, ##__VA_ARGS__)
0069 # define ea_bdebug(bh, fmt, ...) \
0070 printk(KERN_DEBUG "block %pg:%lu: " fmt "\n", \
0071 bh->b_bdev, (unsigned long)bh->b_blocknr, ##__VA_ARGS__)
0072 #else
0073 # define ea_idebug(inode, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
0074 # define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
0075 #endif
0076
0077 static void ext4_xattr_block_cache_insert(struct mb_cache *,
0078 struct buffer_head *);
0079 static struct buffer_head *
0080 ext4_xattr_block_cache_find(struct inode *, struct ext4_xattr_header *,
0081 struct mb_cache_entry **);
0082 static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
0083 size_t value_count);
0084 static void ext4_xattr_rehash(struct ext4_xattr_header *);
0085
0086 static const struct xattr_handler * const ext4_xattr_handler_map[] = {
0087 [EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler,
0088 #ifdef CONFIG_EXT4_FS_POSIX_ACL
0089 [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
0090 [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
0091 #endif
0092 [EXT4_XATTR_INDEX_TRUSTED] = &ext4_xattr_trusted_handler,
0093 #ifdef CONFIG_EXT4_FS_SECURITY
0094 [EXT4_XATTR_INDEX_SECURITY] = &ext4_xattr_security_handler,
0095 #endif
0096 [EXT4_XATTR_INDEX_HURD] = &ext4_xattr_hurd_handler,
0097 };
0098
0099 const struct xattr_handler *ext4_xattr_handlers[] = {
0100 &ext4_xattr_user_handler,
0101 &ext4_xattr_trusted_handler,
0102 #ifdef CONFIG_EXT4_FS_POSIX_ACL
0103 &posix_acl_access_xattr_handler,
0104 &posix_acl_default_xattr_handler,
0105 #endif
0106 #ifdef CONFIG_EXT4_FS_SECURITY
0107 &ext4_xattr_security_handler,
0108 #endif
0109 &ext4_xattr_hurd_handler,
0110 NULL
0111 };
0112
0113 #define EA_BLOCK_CACHE(inode) (((struct ext4_sb_info *) \
0114 inode->i_sb->s_fs_info)->s_ea_block_cache)
0115
0116 #define EA_INODE_CACHE(inode) (((struct ext4_sb_info *) \
0117 inode->i_sb->s_fs_info)->s_ea_inode_cache)
0118
0119 static int
0120 ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
0121 struct inode *inode);
0122
0123 #ifdef CONFIG_LOCKDEP
0124 void ext4_xattr_inode_set_class(struct inode *ea_inode)
0125 {
0126 lockdep_set_subclass(&ea_inode->i_rwsem, 1);
0127 }
0128 #endif
0129
0130 static __le32 ext4_xattr_block_csum(struct inode *inode,
0131 sector_t block_nr,
0132 struct ext4_xattr_header *hdr)
0133 {
0134 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
0135 __u32 csum;
0136 __le64 dsk_block_nr = cpu_to_le64(block_nr);
0137 __u32 dummy_csum = 0;
0138 int offset = offsetof(struct ext4_xattr_header, h_checksum);
0139
0140 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
0141 sizeof(dsk_block_nr));
0142 csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset);
0143 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
0144 offset += sizeof(dummy_csum);
0145 csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset,
0146 EXT4_BLOCK_SIZE(inode->i_sb) - offset);
0147
0148 return cpu_to_le32(csum);
0149 }
0150
0151 static int ext4_xattr_block_csum_verify(struct inode *inode,
0152 struct buffer_head *bh)
0153 {
0154 struct ext4_xattr_header *hdr = BHDR(bh);
0155 int ret = 1;
0156
0157 if (ext4_has_metadata_csum(inode->i_sb)) {
0158 lock_buffer(bh);
0159 ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
0160 bh->b_blocknr, hdr));
0161 unlock_buffer(bh);
0162 }
0163 return ret;
0164 }
0165
0166 static void ext4_xattr_block_csum_set(struct inode *inode,
0167 struct buffer_head *bh)
0168 {
0169 if (ext4_has_metadata_csum(inode->i_sb))
0170 BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
0171 bh->b_blocknr, BHDR(bh));
0172 }
0173
0174 static inline const struct xattr_handler *
0175 ext4_xattr_handler(int name_index)
0176 {
0177 const struct xattr_handler *handler = NULL;
0178
0179 if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map))
0180 handler = ext4_xattr_handler_map[name_index];
0181 return handler;
0182 }
0183
0184 static int
0185 ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
0186 void *value_start)
0187 {
0188 struct ext4_xattr_entry *e = entry;
0189
0190
0191 while (!IS_LAST_ENTRY(e)) {
0192 struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
0193 if ((void *)next >= end)
0194 return -EFSCORRUPTED;
0195 if (strnlen(e->e_name, e->e_name_len) != e->e_name_len)
0196 return -EFSCORRUPTED;
0197 e = next;
0198 }
0199
0200
0201 while (!IS_LAST_ENTRY(entry)) {
0202 u32 size = le32_to_cpu(entry->e_value_size);
0203
0204 if (size > EXT4_XATTR_SIZE_MAX)
0205 return -EFSCORRUPTED;
0206
0207 if (size != 0 && entry->e_value_inum == 0) {
0208 u16 offs = le16_to_cpu(entry->e_value_offs);
0209 void *value;
0210
0211
0212
0213
0214
0215
0216
0217 if (offs > end - value_start)
0218 return -EFSCORRUPTED;
0219 value = value_start + offs;
0220 if (value < (void *)e + sizeof(u32) ||
0221 size > end - value ||
0222 EXT4_XATTR_SIZE(size) > end - value)
0223 return -EFSCORRUPTED;
0224 }
0225 entry = EXT4_XATTR_NEXT(entry);
0226 }
0227
0228 return 0;
0229 }
0230
0231 static inline int
0232 __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh,
0233 const char *function, unsigned int line)
0234 {
0235 int error = -EFSCORRUPTED;
0236
0237 if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
0238 BHDR(bh)->h_blocks != cpu_to_le32(1))
0239 goto errout;
0240 if (buffer_verified(bh))
0241 return 0;
0242
0243 error = -EFSBADCRC;
0244 if (!ext4_xattr_block_csum_verify(inode, bh))
0245 goto errout;
0246 error = ext4_xattr_check_entries(BFIRST(bh), bh->b_data + bh->b_size,
0247 bh->b_data);
0248 errout:
0249 if (error)
0250 __ext4_error_inode(inode, function, line, 0, -error,
0251 "corrupted xattr block %llu",
0252 (unsigned long long) bh->b_blocknr);
0253 else
0254 set_buffer_verified(bh);
0255 return error;
0256 }
0257
0258 #define ext4_xattr_check_block(inode, bh) \
0259 __ext4_xattr_check_block((inode), (bh), __func__, __LINE__)
0260
0261
0262 static int
0263 __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
0264 void *end, const char *function, unsigned int line)
0265 {
0266 int error = -EFSCORRUPTED;
0267
0268 if (end - (void *)header < sizeof(*header) + sizeof(u32) ||
0269 (header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)))
0270 goto errout;
0271 error = ext4_xattr_check_entries(IFIRST(header), end, IFIRST(header));
0272 errout:
0273 if (error)
0274 __ext4_error_inode(inode, function, line, 0, -error,
0275 "corrupted in-inode xattr");
0276 return error;
0277 }
0278
0279 #define xattr_check_inode(inode, header, end) \
0280 __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
0281
0282 static int
0283 xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry,
0284 void *end, int name_index, const char *name, int sorted)
0285 {
0286 struct ext4_xattr_entry *entry, *next;
0287 size_t name_len;
0288 int cmp = 1;
0289
0290 if (name == NULL)
0291 return -EINVAL;
0292 name_len = strlen(name);
0293 for (entry = *pentry; !IS_LAST_ENTRY(entry); entry = next) {
0294 next = EXT4_XATTR_NEXT(entry);
0295 if ((void *) next >= end) {
0296 EXT4_ERROR_INODE(inode, "corrupted xattr entries");
0297 return -EFSCORRUPTED;
0298 }
0299 cmp = name_index - entry->e_name_index;
0300 if (!cmp)
0301 cmp = name_len - entry->e_name_len;
0302 if (!cmp)
0303 cmp = memcmp(name, entry->e_name, name_len);
0304 if (cmp <= 0 && (sorted || cmp == 0))
0305 break;
0306 }
0307 *pentry = entry;
0308 return cmp ? -ENODATA : 0;
0309 }
0310
0311 static u32
0312 ext4_xattr_inode_hash(struct ext4_sb_info *sbi, const void *buffer, size_t size)
0313 {
0314 return ext4_chksum(sbi, sbi->s_csum_seed, buffer, size);
0315 }
0316
0317 static u64 ext4_xattr_inode_get_ref(struct inode *ea_inode)
0318 {
0319 return ((u64)ea_inode->i_ctime.tv_sec << 32) |
0320 (u32) inode_peek_iversion_raw(ea_inode);
0321 }
0322
0323 static void ext4_xattr_inode_set_ref(struct inode *ea_inode, u64 ref_count)
0324 {
0325 ea_inode->i_ctime.tv_sec = (u32)(ref_count >> 32);
0326 inode_set_iversion_raw(ea_inode, ref_count & 0xffffffff);
0327 }
0328
0329 static u32 ext4_xattr_inode_get_hash(struct inode *ea_inode)
0330 {
0331 return (u32)ea_inode->i_atime.tv_sec;
0332 }
0333
0334 static void ext4_xattr_inode_set_hash(struct inode *ea_inode, u32 hash)
0335 {
0336 ea_inode->i_atime.tv_sec = hash;
0337 }
0338
0339
0340
0341
0342 static int ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t size)
0343 {
0344 int blocksize = 1 << ea_inode->i_blkbits;
0345 int bh_count = (size + blocksize - 1) >> ea_inode->i_blkbits;
0346 int tail_size = (size % blocksize) ?: blocksize;
0347 struct buffer_head *bhs_inline[8];
0348 struct buffer_head **bhs = bhs_inline;
0349 int i, ret;
0350
0351 if (bh_count > ARRAY_SIZE(bhs_inline)) {
0352 bhs = kmalloc_array(bh_count, sizeof(*bhs), GFP_NOFS);
0353 if (!bhs)
0354 return -ENOMEM;
0355 }
0356
0357 ret = ext4_bread_batch(ea_inode, 0 , bh_count,
0358 true , bhs);
0359 if (ret)
0360 goto free_bhs;
0361
0362 for (i = 0; i < bh_count; i++) {
0363
0364 if (!bhs[i]) {
0365 ret = -EFSCORRUPTED;
0366 goto put_bhs;
0367 }
0368 memcpy((char *)buf + blocksize * i, bhs[i]->b_data,
0369 i < bh_count - 1 ? blocksize : tail_size);
0370 }
0371 ret = 0;
0372 put_bhs:
0373 for (i = 0; i < bh_count; i++)
0374 brelse(bhs[i]);
0375 free_bhs:
0376 if (bhs != bhs_inline)
0377 kfree(bhs);
0378 return ret;
0379 }
0380
0381 #define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode)->i_mtime.tv_sec)
0382
0383 static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
0384 u32 ea_inode_hash, struct inode **ea_inode)
0385 {
0386 struct inode *inode;
0387 int err;
0388
0389 inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
0390 if (IS_ERR(inode)) {
0391 err = PTR_ERR(inode);
0392 ext4_error(parent->i_sb,
0393 "error while reading EA inode %lu err=%d", ea_ino,
0394 err);
0395 return err;
0396 }
0397
0398 if (is_bad_inode(inode)) {
0399 ext4_error(parent->i_sb,
0400 "error while reading EA inode %lu is_bad_inode",
0401 ea_ino);
0402 err = -EIO;
0403 goto error;
0404 }
0405
0406 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
0407 ext4_error(parent->i_sb,
0408 "EA inode %lu does not have EXT4_EA_INODE_FL flag",
0409 ea_ino);
0410 err = -EINVAL;
0411 goto error;
0412 }
0413
0414 ext4_xattr_inode_set_class(inode);
0415
0416
0417
0418
0419
0420
0421 if (ea_inode_hash != ext4_xattr_inode_get_hash(inode) &&
0422 EXT4_XATTR_INODE_GET_PARENT(inode) == parent->i_ino &&
0423 inode->i_generation == parent->i_generation) {
0424 ext4_set_inode_state(inode, EXT4_STATE_LUSTRE_EA_INODE);
0425 ext4_xattr_inode_set_ref(inode, 1);
0426 } else {
0427 inode_lock(inode);
0428 inode->i_flags |= S_NOQUOTA;
0429 inode_unlock(inode);
0430 }
0431
0432 *ea_inode = inode;
0433 return 0;
0434 error:
0435 iput(inode);
0436 return err;
0437 }
0438
0439
0440 void ext4_evict_ea_inode(struct inode *inode)
0441 {
0442 struct mb_cache_entry *oe;
0443
0444 if (!EA_INODE_CACHE(inode))
0445 return;
0446
0447 while ((oe = mb_cache_entry_delete_or_get(EA_INODE_CACHE(inode),
0448 ext4_xattr_inode_get_hash(inode), inode->i_ino))) {
0449 mb_cache_entry_wait_unused(oe);
0450 mb_cache_entry_put(EA_INODE_CACHE(inode), oe);
0451 }
0452 }
0453
0454 static int
0455 ext4_xattr_inode_verify_hashes(struct inode *ea_inode,
0456 struct ext4_xattr_entry *entry, void *buffer,
0457 size_t size)
0458 {
0459 u32 hash;
0460
0461
0462 hash = ext4_xattr_inode_hash(EXT4_SB(ea_inode->i_sb), buffer, size);
0463 if (hash != ext4_xattr_inode_get_hash(ea_inode))
0464 return -EFSCORRUPTED;
0465
0466 if (entry) {
0467 __le32 e_hash, tmp_data;
0468
0469
0470 tmp_data = cpu_to_le32(hash);
0471 e_hash = ext4_xattr_hash_entry(entry->e_name, entry->e_name_len,
0472 &tmp_data, 1);
0473 if (e_hash != entry->e_hash)
0474 return -EFSCORRUPTED;
0475 }
0476 return 0;
0477 }
0478
0479
0480
0481
0482 static int
0483 ext4_xattr_inode_get(struct inode *inode, struct ext4_xattr_entry *entry,
0484 void *buffer, size_t size)
0485 {
0486 struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode);
0487 struct inode *ea_inode;
0488 int err;
0489
0490 err = ext4_xattr_inode_iget(inode, le32_to_cpu(entry->e_value_inum),
0491 le32_to_cpu(entry->e_hash), &ea_inode);
0492 if (err) {
0493 ea_inode = NULL;
0494 goto out;
0495 }
0496
0497 if (i_size_read(ea_inode) != size) {
0498 ext4_warning_inode(ea_inode,
0499 "ea_inode file size=%llu entry size=%zu",
0500 i_size_read(ea_inode), size);
0501 err = -EFSCORRUPTED;
0502 goto out;
0503 }
0504
0505 err = ext4_xattr_inode_read(ea_inode, buffer, size);
0506 if (err)
0507 goto out;
0508
0509 if (!ext4_test_inode_state(ea_inode, EXT4_STATE_LUSTRE_EA_INODE)) {
0510 err = ext4_xattr_inode_verify_hashes(ea_inode, entry, buffer,
0511 size);
0512 if (err) {
0513 ext4_warning_inode(ea_inode,
0514 "EA inode hash validation failed");
0515 goto out;
0516 }
0517
0518 if (ea_inode_cache)
0519 mb_cache_entry_create(ea_inode_cache, GFP_NOFS,
0520 ext4_xattr_inode_get_hash(ea_inode),
0521 ea_inode->i_ino, true );
0522 }
0523 out:
0524 iput(ea_inode);
0525 return err;
0526 }
0527
0528 static int
0529 ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
0530 void *buffer, size_t buffer_size)
0531 {
0532 struct buffer_head *bh = NULL;
0533 struct ext4_xattr_entry *entry;
0534 size_t size;
0535 void *end;
0536 int error;
0537 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
0538
0539 ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
0540 name_index, name, buffer, (long)buffer_size);
0541
0542 if (!EXT4_I(inode)->i_file_acl)
0543 return -ENODATA;
0544 ea_idebug(inode, "reading block %llu",
0545 (unsigned long long)EXT4_I(inode)->i_file_acl);
0546 bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
0547 if (IS_ERR(bh))
0548 return PTR_ERR(bh);
0549 ea_bdebug(bh, "b_count=%d, refcount=%d",
0550 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
0551 error = ext4_xattr_check_block(inode, bh);
0552 if (error)
0553 goto cleanup;
0554 ext4_xattr_block_cache_insert(ea_block_cache, bh);
0555 entry = BFIRST(bh);
0556 end = bh->b_data + bh->b_size;
0557 error = xattr_find_entry(inode, &entry, end, name_index, name, 1);
0558 if (error)
0559 goto cleanup;
0560 size = le32_to_cpu(entry->e_value_size);
0561 error = -ERANGE;
0562 if (unlikely(size > EXT4_XATTR_SIZE_MAX))
0563 goto cleanup;
0564 if (buffer) {
0565 if (size > buffer_size)
0566 goto cleanup;
0567 if (entry->e_value_inum) {
0568 error = ext4_xattr_inode_get(inode, entry, buffer,
0569 size);
0570 if (error)
0571 goto cleanup;
0572 } else {
0573 u16 offset = le16_to_cpu(entry->e_value_offs);
0574 void *p = bh->b_data + offset;
0575
0576 if (unlikely(p + size > end))
0577 goto cleanup;
0578 memcpy(buffer, p, size);
0579 }
0580 }
0581 error = size;
0582
0583 cleanup:
0584 brelse(bh);
0585 return error;
0586 }
0587
0588 int
0589 ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
0590 void *buffer, size_t buffer_size)
0591 {
0592 struct ext4_xattr_ibody_header *header;
0593 struct ext4_xattr_entry *entry;
0594 struct ext4_inode *raw_inode;
0595 struct ext4_iloc iloc;
0596 size_t size;
0597 void *end;
0598 int error;
0599
0600 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
0601 return -ENODATA;
0602 error = ext4_get_inode_loc(inode, &iloc);
0603 if (error)
0604 return error;
0605 raw_inode = ext4_raw_inode(&iloc);
0606 header = IHDR(inode, raw_inode);
0607 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
0608 error = xattr_check_inode(inode, header, end);
0609 if (error)
0610 goto cleanup;
0611 entry = IFIRST(header);
0612 error = xattr_find_entry(inode, &entry, end, name_index, name, 0);
0613 if (error)
0614 goto cleanup;
0615 size = le32_to_cpu(entry->e_value_size);
0616 error = -ERANGE;
0617 if (unlikely(size > EXT4_XATTR_SIZE_MAX))
0618 goto cleanup;
0619 if (buffer) {
0620 if (size > buffer_size)
0621 goto cleanup;
0622 if (entry->e_value_inum) {
0623 error = ext4_xattr_inode_get(inode, entry, buffer,
0624 size);
0625 if (error)
0626 goto cleanup;
0627 } else {
0628 u16 offset = le16_to_cpu(entry->e_value_offs);
0629 void *p = (void *)IFIRST(header) + offset;
0630
0631 if (unlikely(p + size > end))
0632 goto cleanup;
0633 memcpy(buffer, p, size);
0634 }
0635 }
0636 error = size;
0637
0638 cleanup:
0639 brelse(iloc.bh);
0640 return error;
0641 }
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653 int
0654 ext4_xattr_get(struct inode *inode, int name_index, const char *name,
0655 void *buffer, size_t buffer_size)
0656 {
0657 int error;
0658
0659 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
0660 return -EIO;
0661
0662 if (strlen(name) > 255)
0663 return -ERANGE;
0664
0665 down_read(&EXT4_I(inode)->xattr_sem);
0666 error = ext4_xattr_ibody_get(inode, name_index, name, buffer,
0667 buffer_size);
0668 if (error == -ENODATA)
0669 error = ext4_xattr_block_get(inode, name_index, name, buffer,
0670 buffer_size);
0671 up_read(&EXT4_I(inode)->xattr_sem);
0672 return error;
0673 }
0674
0675 static int
0676 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
0677 char *buffer, size_t buffer_size)
0678 {
0679 size_t rest = buffer_size;
0680
0681 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
0682 const struct xattr_handler *handler =
0683 ext4_xattr_handler(entry->e_name_index);
0684
0685 if (handler && (!handler->list || handler->list(dentry))) {
0686 const char *prefix = handler->prefix ?: handler->name;
0687 size_t prefix_len = strlen(prefix);
0688 size_t size = prefix_len + entry->e_name_len + 1;
0689
0690 if (buffer) {
0691 if (size > rest)
0692 return -ERANGE;
0693 memcpy(buffer, prefix, prefix_len);
0694 buffer += prefix_len;
0695 memcpy(buffer, entry->e_name, entry->e_name_len);
0696 buffer += entry->e_name_len;
0697 *buffer++ = 0;
0698 }
0699 rest -= size;
0700 }
0701 }
0702 return buffer_size - rest;
0703 }
0704
0705 static int
0706 ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
0707 {
0708 struct inode *inode = d_inode(dentry);
0709 struct buffer_head *bh = NULL;
0710 int error;
0711
0712 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
0713 buffer, (long)buffer_size);
0714
0715 if (!EXT4_I(inode)->i_file_acl)
0716 return 0;
0717 ea_idebug(inode, "reading block %llu",
0718 (unsigned long long)EXT4_I(inode)->i_file_acl);
0719 bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
0720 if (IS_ERR(bh))
0721 return PTR_ERR(bh);
0722 ea_bdebug(bh, "b_count=%d, refcount=%d",
0723 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
0724 error = ext4_xattr_check_block(inode, bh);
0725 if (error)
0726 goto cleanup;
0727 ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
0728 error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer,
0729 buffer_size);
0730 cleanup:
0731 brelse(bh);
0732 return error;
0733 }
0734
0735 static int
0736 ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
0737 {
0738 struct inode *inode = d_inode(dentry);
0739 struct ext4_xattr_ibody_header *header;
0740 struct ext4_inode *raw_inode;
0741 struct ext4_iloc iloc;
0742 void *end;
0743 int error;
0744
0745 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
0746 return 0;
0747 error = ext4_get_inode_loc(inode, &iloc);
0748 if (error)
0749 return error;
0750 raw_inode = ext4_raw_inode(&iloc);
0751 header = IHDR(inode, raw_inode);
0752 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
0753 error = xattr_check_inode(inode, header, end);
0754 if (error)
0755 goto cleanup;
0756 error = ext4_xattr_list_entries(dentry, IFIRST(header),
0757 buffer, buffer_size);
0758
0759 cleanup:
0760 brelse(iloc.bh);
0761 return error;
0762 }
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776 ssize_t
0777 ext4_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
0778 {
0779 int ret, ret2;
0780
0781 down_read(&EXT4_I(d_inode(dentry))->xattr_sem);
0782 ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
0783 if (ret < 0)
0784 goto errout;
0785 if (buffer) {
0786 buffer += ret;
0787 buffer_size -= ret;
0788 }
0789 ret = ext4_xattr_block_list(dentry, buffer, buffer_size);
0790 if (ret < 0)
0791 goto errout;
0792 ret += ret2;
0793 errout:
0794 up_read(&EXT4_I(d_inode(dentry))->xattr_sem);
0795 return ret;
0796 }
0797
0798
0799
0800
0801
0802 static void ext4_xattr_update_super_block(handle_t *handle,
0803 struct super_block *sb)
0804 {
0805 if (ext4_has_feature_xattr(sb))
0806 return;
0807
0808 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
0809 if (ext4_journal_get_write_access(handle, sb, EXT4_SB(sb)->s_sbh,
0810 EXT4_JTR_NONE) == 0) {
0811 lock_buffer(EXT4_SB(sb)->s_sbh);
0812 ext4_set_feature_xattr(sb);
0813 ext4_superblock_csum_set(sb);
0814 unlock_buffer(EXT4_SB(sb)->s_sbh);
0815 ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
0816 }
0817 }
0818
0819 int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
0820 {
0821 struct ext4_iloc iloc = { .bh = NULL };
0822 struct buffer_head *bh = NULL;
0823 struct ext4_inode *raw_inode;
0824 struct ext4_xattr_ibody_header *header;
0825 struct ext4_xattr_entry *entry;
0826 qsize_t ea_inode_refs = 0;
0827 void *end;
0828 int ret;
0829
0830 lockdep_assert_held_read(&EXT4_I(inode)->xattr_sem);
0831
0832 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
0833 ret = ext4_get_inode_loc(inode, &iloc);
0834 if (ret)
0835 goto out;
0836 raw_inode = ext4_raw_inode(&iloc);
0837 header = IHDR(inode, raw_inode);
0838 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
0839 ret = xattr_check_inode(inode, header, end);
0840 if (ret)
0841 goto out;
0842
0843 for (entry = IFIRST(header); !IS_LAST_ENTRY(entry);
0844 entry = EXT4_XATTR_NEXT(entry))
0845 if (entry->e_value_inum)
0846 ea_inode_refs++;
0847 }
0848
0849 if (EXT4_I(inode)->i_file_acl) {
0850 bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
0851 if (IS_ERR(bh)) {
0852 ret = PTR_ERR(bh);
0853 bh = NULL;
0854 goto out;
0855 }
0856
0857 ret = ext4_xattr_check_block(inode, bh);
0858 if (ret)
0859 goto out;
0860
0861 for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
0862 entry = EXT4_XATTR_NEXT(entry))
0863 if (entry->e_value_inum)
0864 ea_inode_refs++;
0865 }
0866 *usage = ea_inode_refs + 1;
0867 ret = 0;
0868 out:
0869 brelse(iloc.bh);
0870 brelse(bh);
0871 return ret;
0872 }
0873
0874 static inline size_t round_up_cluster(struct inode *inode, size_t length)
0875 {
0876 struct super_block *sb = inode->i_sb;
0877 size_t cluster_size = 1 << (EXT4_SB(sb)->s_cluster_bits +
0878 inode->i_blkbits);
0879 size_t mask = ~(cluster_size - 1);
0880
0881 return (length + cluster_size - 1) & mask;
0882 }
0883
0884 static int ext4_xattr_inode_alloc_quota(struct inode *inode, size_t len)
0885 {
0886 int err;
0887
0888 err = dquot_alloc_inode(inode);
0889 if (err)
0890 return err;
0891 err = dquot_alloc_space_nodirty(inode, round_up_cluster(inode, len));
0892 if (err)
0893 dquot_free_inode(inode);
0894 return err;
0895 }
0896
0897 static void ext4_xattr_inode_free_quota(struct inode *parent,
0898 struct inode *ea_inode,
0899 size_t len)
0900 {
0901 if (ea_inode &&
0902 ext4_test_inode_state(ea_inode, EXT4_STATE_LUSTRE_EA_INODE))
0903 return;
0904 dquot_free_space_nodirty(parent, round_up_cluster(parent, len));
0905 dquot_free_inode(parent);
0906 }
0907
0908 int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode,
0909 struct buffer_head *block_bh, size_t value_len,
0910 bool is_create)
0911 {
0912 int credits;
0913 int blocks;
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928 credits = 7;
0929
0930
0931 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(sb);
0932
0933
0934
0935
0936
0937 if (inode && ext4_has_inline_data(inode))
0938 credits += ext4_writepage_trans_blocks(inode) + 1;
0939
0940
0941 if (!ext4_has_feature_ea_inode(sb))
0942 return credits;
0943
0944
0945 credits += 4;
0946
0947
0948 blocks = (value_len + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
0949
0950
0951 blocks += 1;
0952
0953
0954 credits += blocks * 2;
0955
0956
0957 credits += blocks;
0958
0959 if (!is_create) {
0960
0961
0962
0963 credits += 4;
0964
0965
0966 blocks = XATTR_SIZE_MAX >> sb->s_blocksize_bits;
0967
0968
0969
0970
0971 blocks += 1;
0972
0973
0974 credits += blocks * 2;
0975 }
0976
0977
0978
0979
0980 if (block_bh) {
0981 struct ext4_xattr_entry *entry = BFIRST(block_bh);
0982
0983 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry))
0984 if (entry->e_value_inum)
0985
0986 credits += 1;
0987 }
0988 return credits;
0989 }
0990
0991 static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
0992 int ref_change)
0993 {
0994 struct ext4_iloc iloc;
0995 s64 ref_count;
0996 int ret;
0997
0998 inode_lock(ea_inode);
0999
1000 ret = ext4_reserve_inode_write(handle, ea_inode, &iloc);
1001 if (ret)
1002 goto out;
1003
1004 ref_count = ext4_xattr_inode_get_ref(ea_inode);
1005 ref_count += ref_change;
1006 ext4_xattr_inode_set_ref(ea_inode, ref_count);
1007
1008 if (ref_change > 0) {
1009 WARN_ONCE(ref_count <= 0, "EA inode %lu ref_count=%lld",
1010 ea_inode->i_ino, ref_count);
1011
1012 if (ref_count == 1) {
1013 WARN_ONCE(ea_inode->i_nlink, "EA inode %lu i_nlink=%u",
1014 ea_inode->i_ino, ea_inode->i_nlink);
1015
1016 set_nlink(ea_inode, 1);
1017 ext4_orphan_del(handle, ea_inode);
1018 }
1019 } else {
1020 WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld",
1021 ea_inode->i_ino, ref_count);
1022
1023 if (ref_count == 0) {
1024 WARN_ONCE(ea_inode->i_nlink != 1,
1025 "EA inode %lu i_nlink=%u",
1026 ea_inode->i_ino, ea_inode->i_nlink);
1027
1028 clear_nlink(ea_inode);
1029 ext4_orphan_add(handle, ea_inode);
1030 }
1031 }
1032
1033 ret = ext4_mark_iloc_dirty(handle, ea_inode, &iloc);
1034 if (ret)
1035 ext4_warning_inode(ea_inode,
1036 "ext4_mark_iloc_dirty() failed ret=%d", ret);
1037 out:
1038 inode_unlock(ea_inode);
1039 return ret;
1040 }
1041
1042 static int ext4_xattr_inode_inc_ref(handle_t *handle, struct inode *ea_inode)
1043 {
1044 return ext4_xattr_inode_update_ref(handle, ea_inode, 1);
1045 }
1046
1047 static int ext4_xattr_inode_dec_ref(handle_t *handle, struct inode *ea_inode)
1048 {
1049 return ext4_xattr_inode_update_ref(handle, ea_inode, -1);
1050 }
1051
1052 static int ext4_xattr_inode_inc_ref_all(handle_t *handle, struct inode *parent,
1053 struct ext4_xattr_entry *first)
1054 {
1055 struct inode *ea_inode;
1056 struct ext4_xattr_entry *entry;
1057 struct ext4_xattr_entry *failed_entry;
1058 unsigned int ea_ino;
1059 int err, saved_err;
1060
1061 for (entry = first; !IS_LAST_ENTRY(entry);
1062 entry = EXT4_XATTR_NEXT(entry)) {
1063 if (!entry->e_value_inum)
1064 continue;
1065 ea_ino = le32_to_cpu(entry->e_value_inum);
1066 err = ext4_xattr_inode_iget(parent, ea_ino,
1067 le32_to_cpu(entry->e_hash),
1068 &ea_inode);
1069 if (err)
1070 goto cleanup;
1071 err = ext4_xattr_inode_inc_ref(handle, ea_inode);
1072 if (err) {
1073 ext4_warning_inode(ea_inode, "inc ref error %d", err);
1074 iput(ea_inode);
1075 goto cleanup;
1076 }
1077 iput(ea_inode);
1078 }
1079 return 0;
1080
1081 cleanup:
1082 saved_err = err;
1083 failed_entry = entry;
1084
1085 for (entry = first; entry != failed_entry;
1086 entry = EXT4_XATTR_NEXT(entry)) {
1087 if (!entry->e_value_inum)
1088 continue;
1089 ea_ino = le32_to_cpu(entry->e_value_inum);
1090 err = ext4_xattr_inode_iget(parent, ea_ino,
1091 le32_to_cpu(entry->e_hash),
1092 &ea_inode);
1093 if (err) {
1094 ext4_warning(parent->i_sb,
1095 "cleanup ea_ino %u iget error %d", ea_ino,
1096 err);
1097 continue;
1098 }
1099 err = ext4_xattr_inode_dec_ref(handle, ea_inode);
1100 if (err)
1101 ext4_warning_inode(ea_inode, "cleanup dec ref error %d",
1102 err);
1103 iput(ea_inode);
1104 }
1105 return saved_err;
1106 }
1107
1108 static int ext4_xattr_restart_fn(handle_t *handle, struct inode *inode,
1109 struct buffer_head *bh, bool block_csum, bool dirty)
1110 {
1111 int error;
1112
1113 if (bh && dirty) {
1114 if (block_csum)
1115 ext4_xattr_block_csum_set(inode, bh);
1116 error = ext4_handle_dirty_metadata(handle, NULL, bh);
1117 if (error) {
1118 ext4_warning(inode->i_sb, "Handle metadata (error %d)",
1119 error);
1120 return error;
1121 }
1122 }
1123 return 0;
1124 }
1125
1126 static void
1127 ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent,
1128 struct buffer_head *bh,
1129 struct ext4_xattr_entry *first, bool block_csum,
1130 struct ext4_xattr_inode_array **ea_inode_array,
1131 int extra_credits, bool skip_quota)
1132 {
1133 struct inode *ea_inode;
1134 struct ext4_xattr_entry *entry;
1135 bool dirty = false;
1136 unsigned int ea_ino;
1137 int err;
1138 int credits;
1139
1140
1141 credits = 2 + extra_credits;
1142
1143 for (entry = first; !IS_LAST_ENTRY(entry);
1144 entry = EXT4_XATTR_NEXT(entry)) {
1145 if (!entry->e_value_inum)
1146 continue;
1147 ea_ino = le32_to_cpu(entry->e_value_inum);
1148 err = ext4_xattr_inode_iget(parent, ea_ino,
1149 le32_to_cpu(entry->e_hash),
1150 &ea_inode);
1151 if (err)
1152 continue;
1153
1154 err = ext4_expand_inode_array(ea_inode_array, ea_inode);
1155 if (err) {
1156 ext4_warning_inode(ea_inode,
1157 "Expand inode array err=%d", err);
1158 iput(ea_inode);
1159 continue;
1160 }
1161
1162 err = ext4_journal_ensure_credits_fn(handle, credits, credits,
1163 ext4_free_metadata_revoke_credits(parent->i_sb, 1),
1164 ext4_xattr_restart_fn(handle, parent, bh, block_csum,
1165 dirty));
1166 if (err < 0) {
1167 ext4_warning_inode(ea_inode, "Ensure credits err=%d",
1168 err);
1169 continue;
1170 }
1171 if (err > 0) {
1172 err = ext4_journal_get_write_access(handle,
1173 parent->i_sb, bh, EXT4_JTR_NONE);
1174 if (err) {
1175 ext4_warning_inode(ea_inode,
1176 "Re-get write access err=%d",
1177 err);
1178 continue;
1179 }
1180 }
1181
1182 err = ext4_xattr_inode_dec_ref(handle, ea_inode);
1183 if (err) {
1184 ext4_warning_inode(ea_inode, "ea_inode dec ref err=%d",
1185 err);
1186 continue;
1187 }
1188
1189 if (!skip_quota)
1190 ext4_xattr_inode_free_quota(parent, ea_inode,
1191 le32_to_cpu(entry->e_value_size));
1192
1193
1194
1195
1196
1197
1198
1199 entry->e_value_inum = 0;
1200 entry->e_value_size = 0;
1201
1202 dirty = true;
1203 }
1204
1205 if (dirty) {
1206
1207
1208
1209
1210
1211
1212 err = ext4_handle_dirty_metadata(handle, NULL, bh);
1213 if (err)
1214 ext4_warning_inode(parent,
1215 "handle dirty metadata err=%d", err);
1216 }
1217 }
1218
1219
1220
1221
1222
1223 static void
1224 ext4_xattr_release_block(handle_t *handle, struct inode *inode,
1225 struct buffer_head *bh,
1226 struct ext4_xattr_inode_array **ea_inode_array,
1227 int extra_credits)
1228 {
1229 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
1230 u32 hash, ref;
1231 int error = 0;
1232
1233 BUFFER_TRACE(bh, "get_write_access");
1234 error = ext4_journal_get_write_access(handle, inode->i_sb, bh,
1235 EXT4_JTR_NONE);
1236 if (error)
1237 goto out;
1238
1239 retry_ref:
1240 lock_buffer(bh);
1241 hash = le32_to_cpu(BHDR(bh)->h_hash);
1242 ref = le32_to_cpu(BHDR(bh)->h_refcount);
1243 if (ref == 1) {
1244 ea_bdebug(bh, "refcount now=0; freeing");
1245
1246
1247
1248
1249 if (ea_block_cache) {
1250 struct mb_cache_entry *oe;
1251
1252 oe = mb_cache_entry_delete_or_get(ea_block_cache, hash,
1253 bh->b_blocknr);
1254 if (oe) {
1255 unlock_buffer(bh);
1256 mb_cache_entry_wait_unused(oe);
1257 mb_cache_entry_put(ea_block_cache, oe);
1258 goto retry_ref;
1259 }
1260 }
1261 get_bh(bh);
1262 unlock_buffer(bh);
1263
1264 if (ext4_has_feature_ea_inode(inode->i_sb))
1265 ext4_xattr_inode_dec_ref_all(handle, inode, bh,
1266 BFIRST(bh),
1267 true ,
1268 ea_inode_array,
1269 extra_credits,
1270 true );
1271 ext4_free_blocks(handle, inode, bh, 0, 1,
1272 EXT4_FREE_BLOCKS_METADATA |
1273 EXT4_FREE_BLOCKS_FORGET);
1274 } else {
1275 ref--;
1276 BHDR(bh)->h_refcount = cpu_to_le32(ref);
1277 if (ref == EXT4_XATTR_REFCOUNT_MAX - 1) {
1278 struct mb_cache_entry *ce;
1279
1280 if (ea_block_cache) {
1281 ce = mb_cache_entry_get(ea_block_cache, hash,
1282 bh->b_blocknr);
1283 if (ce) {
1284 ce->e_reusable = 1;
1285 mb_cache_entry_put(ea_block_cache, ce);
1286 }
1287 }
1288 }
1289
1290 ext4_xattr_block_csum_set(inode, bh);
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301 if (ext4_handle_valid(handle))
1302 error = ext4_handle_dirty_metadata(handle, inode, bh);
1303 unlock_buffer(bh);
1304 if (!ext4_handle_valid(handle))
1305 error = ext4_handle_dirty_metadata(handle, inode, bh);
1306 if (IS_SYNC(inode))
1307 ext4_handle_sync(handle);
1308 dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
1309 ea_bdebug(bh, "refcount now=%d; releasing",
1310 le32_to_cpu(BHDR(bh)->h_refcount));
1311 }
1312 out:
1313 ext4_std_error(inode->i_sb, error);
1314 return;
1315 }
1316
1317
1318
1319
1320
1321 static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
1322 size_t *min_offs, void *base, int *total)
1323 {
1324 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
1325 if (!last->e_value_inum && last->e_value_size) {
1326 size_t offs = le16_to_cpu(last->e_value_offs);
1327 if (offs < *min_offs)
1328 *min_offs = offs;
1329 }
1330 if (total)
1331 *total += EXT4_XATTR_LEN(last->e_name_len);
1332 }
1333 return (*min_offs - ((void *)last - base) - sizeof(__u32));
1334 }
1335
1336
1337
1338
1339 static int ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode,
1340 const void *buf, int bufsize)
1341 {
1342 struct buffer_head *bh = NULL;
1343 unsigned long block = 0;
1344 int blocksize = ea_inode->i_sb->s_blocksize;
1345 int max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits;
1346 int csize, wsize = 0;
1347 int ret = 0, ret2 = 0;
1348 int retries = 0;
1349
1350 retry:
1351 while (ret >= 0 && ret < max_blocks) {
1352 struct ext4_map_blocks map;
1353 map.m_lblk = block += ret;
1354 map.m_len = max_blocks -= ret;
1355
1356 ret = ext4_map_blocks(handle, ea_inode, &map,
1357 EXT4_GET_BLOCKS_CREATE);
1358 if (ret <= 0) {
1359 ext4_mark_inode_dirty(handle, ea_inode);
1360 if (ret == -ENOSPC &&
1361 ext4_should_retry_alloc(ea_inode->i_sb, &retries)) {
1362 ret = 0;
1363 goto retry;
1364 }
1365 break;
1366 }
1367 }
1368
1369 if (ret < 0)
1370 return ret;
1371
1372 block = 0;
1373 while (wsize < bufsize) {
1374 brelse(bh);
1375 csize = (bufsize - wsize) > blocksize ? blocksize :
1376 bufsize - wsize;
1377 bh = ext4_getblk(handle, ea_inode, block, 0);
1378 if (IS_ERR(bh))
1379 return PTR_ERR(bh);
1380 if (!bh) {
1381 WARN_ON_ONCE(1);
1382 EXT4_ERROR_INODE(ea_inode,
1383 "ext4_getblk() return bh = NULL");
1384 return -EFSCORRUPTED;
1385 }
1386 ret = ext4_journal_get_write_access(handle, ea_inode->i_sb, bh,
1387 EXT4_JTR_NONE);
1388 if (ret)
1389 goto out;
1390
1391 memcpy(bh->b_data, buf, csize);
1392 set_buffer_uptodate(bh);
1393 ext4_handle_dirty_metadata(handle, ea_inode, bh);
1394
1395 buf += csize;
1396 wsize += csize;
1397 block += 1;
1398 }
1399
1400 inode_lock(ea_inode);
1401 i_size_write(ea_inode, wsize);
1402 ext4_update_i_disksize(ea_inode, wsize);
1403 inode_unlock(ea_inode);
1404
1405 ret2 = ext4_mark_inode_dirty(handle, ea_inode);
1406 if (unlikely(ret2 && !ret))
1407 ret = ret2;
1408
1409 out:
1410 brelse(bh);
1411
1412 return ret;
1413 }
1414
1415
1416
1417
1418 static struct inode *ext4_xattr_inode_create(handle_t *handle,
1419 struct inode *inode, u32 hash)
1420 {
1421 struct inode *ea_inode = NULL;
1422 uid_t owner[2] = { i_uid_read(inode), i_gid_read(inode) };
1423 int err;
1424
1425
1426
1427
1428
1429 ea_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
1430 S_IFREG | 0600, NULL, inode->i_ino + 1, owner,
1431 EXT4_EA_INODE_FL);
1432 if (!IS_ERR(ea_inode)) {
1433 ea_inode->i_op = &ext4_file_inode_operations;
1434 ea_inode->i_fop = &ext4_file_operations;
1435 ext4_set_aops(ea_inode);
1436 ext4_xattr_inode_set_class(ea_inode);
1437 unlock_new_inode(ea_inode);
1438 ext4_xattr_inode_set_ref(ea_inode, 1);
1439 ext4_xattr_inode_set_hash(ea_inode, hash);
1440 err = ext4_mark_inode_dirty(handle, ea_inode);
1441 if (!err)
1442 err = ext4_inode_attach_jinode(ea_inode);
1443 if (err) {
1444 iput(ea_inode);
1445 return ERR_PTR(err);
1446 }
1447
1448
1449
1450
1451
1452 dquot_free_inode(ea_inode);
1453 dquot_drop(ea_inode);
1454 inode_lock(ea_inode);
1455 ea_inode->i_flags |= S_NOQUOTA;
1456 inode_unlock(ea_inode);
1457 }
1458
1459 return ea_inode;
1460 }
1461
1462 static struct inode *
1463 ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
1464 size_t value_len, u32 hash)
1465 {
1466 struct inode *ea_inode;
1467 struct mb_cache_entry *ce;
1468 struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode);
1469 void *ea_data;
1470
1471 if (!ea_inode_cache)
1472 return NULL;
1473
1474 ce = mb_cache_entry_find_first(ea_inode_cache, hash);
1475 if (!ce)
1476 return NULL;
1477
1478 WARN_ON_ONCE(ext4_handle_valid(journal_current_handle()) &&
1479 !(current->flags & PF_MEMALLOC_NOFS));
1480
1481 ea_data = kvmalloc(value_len, GFP_KERNEL);
1482 if (!ea_data) {
1483 mb_cache_entry_put(ea_inode_cache, ce);
1484 return NULL;
1485 }
1486
1487 while (ce) {
1488 ea_inode = ext4_iget(inode->i_sb, ce->e_value,
1489 EXT4_IGET_NORMAL);
1490 if (!IS_ERR(ea_inode) &&
1491 !is_bad_inode(ea_inode) &&
1492 (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
1493 i_size_read(ea_inode) == value_len &&
1494 !ext4_xattr_inode_read(ea_inode, ea_data, value_len) &&
1495 !ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data,
1496 value_len) &&
1497 !memcmp(value, ea_data, value_len)) {
1498 mb_cache_entry_touch(ea_inode_cache, ce);
1499 mb_cache_entry_put(ea_inode_cache, ce);
1500 kvfree(ea_data);
1501 return ea_inode;
1502 }
1503
1504 if (!IS_ERR(ea_inode))
1505 iput(ea_inode);
1506 ce = mb_cache_entry_find_next(ea_inode_cache, ce);
1507 }
1508 kvfree(ea_data);
1509 return NULL;
1510 }
1511
1512
1513
1514
1515 static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode,
1516 const void *value, size_t value_len,
1517 struct inode **ret_inode)
1518 {
1519 struct inode *ea_inode;
1520 u32 hash;
1521 int err;
1522
1523 hash = ext4_xattr_inode_hash(EXT4_SB(inode->i_sb), value, value_len);
1524 ea_inode = ext4_xattr_inode_cache_find(inode, value, value_len, hash);
1525 if (ea_inode) {
1526 err = ext4_xattr_inode_inc_ref(handle, ea_inode);
1527 if (err) {
1528 iput(ea_inode);
1529 return err;
1530 }
1531
1532 *ret_inode = ea_inode;
1533 return 0;
1534 }
1535
1536
1537 ea_inode = ext4_xattr_inode_create(handle, inode, hash);
1538 if (IS_ERR(ea_inode))
1539 return PTR_ERR(ea_inode);
1540
1541 err = ext4_xattr_inode_write(handle, ea_inode, value, value_len);
1542 if (err) {
1543 ext4_xattr_inode_dec_ref(handle, ea_inode);
1544 iput(ea_inode);
1545 return err;
1546 }
1547
1548 if (EA_INODE_CACHE(inode))
1549 mb_cache_entry_create(EA_INODE_CACHE(inode), GFP_NOFS, hash,
1550 ea_inode->i_ino, true );
1551
1552 *ret_inode = ea_inode;
1553 return 0;
1554 }
1555
1556
1557
1558
1559
1560 #define EXT4_XATTR_BLOCK_RESERVE(inode) min(i_blocksize(inode)/8, 1024U)
1561
1562 static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
1563 struct ext4_xattr_search *s,
1564 handle_t *handle, struct inode *inode,
1565 bool is_block)
1566 {
1567 struct ext4_xattr_entry *last, *next;
1568 struct ext4_xattr_entry *here = s->here;
1569 size_t min_offs = s->end - s->base, name_len = strlen(i->name);
1570 int in_inode = i->in_inode;
1571 struct inode *old_ea_inode = NULL;
1572 struct inode *new_ea_inode = NULL;
1573 size_t old_size, new_size;
1574 int ret;
1575
1576
1577 old_size = (!s->not_found && !here->e_value_inum) ?
1578 EXT4_XATTR_SIZE(le32_to_cpu(here->e_value_size)) : 0;
1579 new_size = (i->value && !in_inode) ? EXT4_XATTR_SIZE(i->value_len) : 0;
1580
1581
1582
1583
1584
1585 if (new_size && new_size == old_size) {
1586 size_t offs = le16_to_cpu(here->e_value_offs);
1587 void *val = s->base + offs;
1588
1589 here->e_value_size = cpu_to_le32(i->value_len);
1590 if (i->value == EXT4_ZERO_XATTR_VALUE) {
1591 memset(val, 0, new_size);
1592 } else {
1593 memcpy(val, i->value, i->value_len);
1594
1595 memset(val + i->value_len, 0, new_size - i->value_len);
1596 }
1597 goto update_hash;
1598 }
1599
1600
1601 last = s->first;
1602 for (; !IS_LAST_ENTRY(last); last = next) {
1603 next = EXT4_XATTR_NEXT(last);
1604 if ((void *)next >= s->end) {
1605 EXT4_ERROR_INODE(inode, "corrupted xattr entries");
1606 ret = -EFSCORRUPTED;
1607 goto out;
1608 }
1609 if (!last->e_value_inum && last->e_value_size) {
1610 size_t offs = le16_to_cpu(last->e_value_offs);
1611 if (offs < min_offs)
1612 min_offs = offs;
1613 }
1614 }
1615
1616
1617 if (i->value) {
1618 size_t free;
1619
1620 free = min_offs - ((void *)last - s->base) - sizeof(__u32);
1621 if (!s->not_found)
1622 free += EXT4_XATTR_LEN(name_len) + old_size;
1623
1624 if (free < EXT4_XATTR_LEN(name_len) + new_size) {
1625 ret = -ENOSPC;
1626 goto out;
1627 }
1628
1629
1630
1631
1632
1633
1634
1635 if (ext4_has_feature_ea_inode(inode->i_sb) &&
1636 new_size && is_block &&
1637 (min_offs + old_size - new_size) <
1638 EXT4_XATTR_BLOCK_RESERVE(inode)) {
1639 ret = -ENOSPC;
1640 goto out;
1641 }
1642 }
1643
1644
1645
1646
1647
1648 if (!s->not_found && here->e_value_inum) {
1649 ret = ext4_xattr_inode_iget(inode,
1650 le32_to_cpu(here->e_value_inum),
1651 le32_to_cpu(here->e_hash),
1652 &old_ea_inode);
1653 if (ret) {
1654 old_ea_inode = NULL;
1655 goto out;
1656 }
1657 }
1658 if (i->value && in_inode) {
1659 WARN_ON_ONCE(!i->value_len);
1660
1661 ret = ext4_xattr_inode_alloc_quota(inode, i->value_len);
1662 if (ret)
1663 goto out;
1664
1665 ret = ext4_xattr_inode_lookup_create(handle, inode, i->value,
1666 i->value_len,
1667 &new_ea_inode);
1668 if (ret) {
1669 new_ea_inode = NULL;
1670 ext4_xattr_inode_free_quota(inode, NULL, i->value_len);
1671 goto out;
1672 }
1673 }
1674
1675 if (old_ea_inode) {
1676
1677 ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode);
1678 if (ret) {
1679
1680 if (new_ea_inode) {
1681 int err;
1682
1683 err = ext4_xattr_inode_dec_ref(handle,
1684 new_ea_inode);
1685 if (err)
1686 ext4_warning_inode(new_ea_inode,
1687 "dec ref new_ea_inode err=%d",
1688 err);
1689 ext4_xattr_inode_free_quota(inode, new_ea_inode,
1690 i->value_len);
1691 }
1692 goto out;
1693 }
1694
1695 ext4_xattr_inode_free_quota(inode, old_ea_inode,
1696 le32_to_cpu(here->e_value_size));
1697 }
1698
1699
1700
1701 if (!s->not_found && here->e_value_size && !here->e_value_inum) {
1702
1703 void *first_val = s->base + min_offs;
1704 size_t offs = le16_to_cpu(here->e_value_offs);
1705 void *val = s->base + offs;
1706
1707 memmove(first_val + old_size, first_val, val - first_val);
1708 memset(first_val, 0, old_size);
1709 min_offs += old_size;
1710
1711
1712 last = s->first;
1713 while (!IS_LAST_ENTRY(last)) {
1714 size_t o = le16_to_cpu(last->e_value_offs);
1715
1716 if (!last->e_value_inum &&
1717 last->e_value_size && o < offs)
1718 last->e_value_offs = cpu_to_le16(o + old_size);
1719 last = EXT4_XATTR_NEXT(last);
1720 }
1721 }
1722
1723 if (!i->value) {
1724
1725 size_t size = EXT4_XATTR_LEN(name_len);
1726
1727 last = ENTRY((void *)last - size);
1728 memmove(here, (void *)here + size,
1729 (void *)last - (void *)here + sizeof(__u32));
1730 memset(last, 0, size);
1731 } else if (s->not_found) {
1732
1733 size_t size = EXT4_XATTR_LEN(name_len);
1734 size_t rest = (void *)last - (void *)here + sizeof(__u32);
1735
1736 memmove((void *)here + size, here, rest);
1737 memset(here, 0, size);
1738 here->e_name_index = i->name_index;
1739 here->e_name_len = name_len;
1740 memcpy(here->e_name, i->name, name_len);
1741 } else {
1742
1743 here->e_value_inum = 0;
1744 here->e_value_offs = 0;
1745 here->e_value_size = 0;
1746 }
1747
1748 if (i->value) {
1749
1750 if (in_inode) {
1751 here->e_value_inum = cpu_to_le32(new_ea_inode->i_ino);
1752 } else if (i->value_len) {
1753 void *val = s->base + min_offs - new_size;
1754
1755 here->e_value_offs = cpu_to_le16(min_offs - new_size);
1756 if (i->value == EXT4_ZERO_XATTR_VALUE) {
1757 memset(val, 0, new_size);
1758 } else {
1759 memcpy(val, i->value, i->value_len);
1760
1761 memset(val + i->value_len, 0,
1762 new_size - i->value_len);
1763 }
1764 }
1765 here->e_value_size = cpu_to_le32(i->value_len);
1766 }
1767
1768 update_hash:
1769 if (i->value) {
1770 __le32 hash = 0;
1771
1772
1773 if (in_inode) {
1774 __le32 crc32c_hash;
1775
1776
1777
1778
1779
1780
1781 crc32c_hash = cpu_to_le32(
1782 ext4_xattr_inode_get_hash(new_ea_inode));
1783 hash = ext4_xattr_hash_entry(here->e_name,
1784 here->e_name_len,
1785 &crc32c_hash, 1);
1786 } else if (is_block) {
1787 __le32 *value = s->base + le16_to_cpu(
1788 here->e_value_offs);
1789
1790 hash = ext4_xattr_hash_entry(here->e_name,
1791 here->e_name_len, value,
1792 new_size >> 2);
1793 }
1794 here->e_hash = hash;
1795 }
1796
1797 if (is_block)
1798 ext4_xattr_rehash((struct ext4_xattr_header *)s->base);
1799
1800 ret = 0;
1801 out:
1802 iput(old_ea_inode);
1803 iput(new_ea_inode);
1804 return ret;
1805 }
1806
1807 struct ext4_xattr_block_find {
1808 struct ext4_xattr_search s;
1809 struct buffer_head *bh;
1810 };
1811
1812 static int
1813 ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
1814 struct ext4_xattr_block_find *bs)
1815 {
1816 struct super_block *sb = inode->i_sb;
1817 int error;
1818
1819 ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
1820 i->name_index, i->name, i->value, (long)i->value_len);
1821
1822 if (EXT4_I(inode)->i_file_acl) {
1823
1824 bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
1825 if (IS_ERR(bs->bh)) {
1826 error = PTR_ERR(bs->bh);
1827 bs->bh = NULL;
1828 return error;
1829 }
1830 ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
1831 atomic_read(&(bs->bh->b_count)),
1832 le32_to_cpu(BHDR(bs->bh)->h_refcount));
1833 error = ext4_xattr_check_block(inode, bs->bh);
1834 if (error)
1835 return error;
1836
1837 bs->s.base = BHDR(bs->bh);
1838 bs->s.first = BFIRST(bs->bh);
1839 bs->s.end = bs->bh->b_data + bs->bh->b_size;
1840 bs->s.here = bs->s.first;
1841 error = xattr_find_entry(inode, &bs->s.here, bs->s.end,
1842 i->name_index, i->name, 1);
1843 if (error && error != -ENODATA)
1844 return error;
1845 bs->s.not_found = error;
1846 }
1847 return 0;
1848 }
1849
1850 static int
1851 ext4_xattr_block_set(handle_t *handle, struct inode *inode,
1852 struct ext4_xattr_info *i,
1853 struct ext4_xattr_block_find *bs)
1854 {
1855 struct super_block *sb = inode->i_sb;
1856 struct buffer_head *new_bh = NULL;
1857 struct ext4_xattr_search s_copy = bs->s;
1858 struct ext4_xattr_search *s = &s_copy;
1859 struct mb_cache_entry *ce = NULL;
1860 int error = 0;
1861 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
1862 struct inode *ea_inode = NULL, *tmp_inode;
1863 size_t old_ea_inode_quota = 0;
1864 unsigned int ea_ino;
1865
1866
1867 #define header(x) ((struct ext4_xattr_header *)(x))
1868
1869 if (s->base) {
1870 int offset = (char *)s->here - bs->bh->b_data;
1871
1872 BUFFER_TRACE(bs->bh, "get_write_access");
1873 error = ext4_journal_get_write_access(handle, sb, bs->bh,
1874 EXT4_JTR_NONE);
1875 if (error)
1876 goto cleanup;
1877 lock_buffer(bs->bh);
1878
1879 if (header(s->base)->h_refcount == cpu_to_le32(1)) {
1880 __u32 hash = le32_to_cpu(BHDR(bs->bh)->h_hash);
1881
1882
1883
1884
1885
1886
1887 if (ea_block_cache) {
1888 struct mb_cache_entry *oe;
1889
1890 oe = mb_cache_entry_delete_or_get(ea_block_cache,
1891 hash, bs->bh->b_blocknr);
1892 if (oe) {
1893
1894
1895
1896
1897 mb_cache_entry_put(ea_block_cache, oe);
1898 goto clone_block;
1899 }
1900 }
1901 ea_bdebug(bs->bh, "modifying in-place");
1902 error = ext4_xattr_set_entry(i, s, handle, inode,
1903 true );
1904 ext4_xattr_block_csum_set(inode, bs->bh);
1905 unlock_buffer(bs->bh);
1906 if (error == -EFSCORRUPTED)
1907 goto bad_block;
1908 if (!error)
1909 error = ext4_handle_dirty_metadata(handle,
1910 inode,
1911 bs->bh);
1912 if (error)
1913 goto cleanup;
1914 goto inserted;
1915 }
1916 clone_block:
1917 unlock_buffer(bs->bh);
1918 ea_bdebug(bs->bh, "cloning");
1919 s->base = kmemdup(BHDR(bs->bh), bs->bh->b_size, GFP_NOFS);
1920 error = -ENOMEM;
1921 if (s->base == NULL)
1922 goto cleanup;
1923 s->first = ENTRY(header(s->base)+1);
1924 header(s->base)->h_refcount = cpu_to_le32(1);
1925 s->here = ENTRY(s->base + offset);
1926 s->end = s->base + bs->bh->b_size;
1927
1928
1929
1930
1931
1932
1933
1934
1935 if (!s->not_found && s->here->e_value_inum) {
1936 ea_ino = le32_to_cpu(s->here->e_value_inum);
1937 error = ext4_xattr_inode_iget(inode, ea_ino,
1938 le32_to_cpu(s->here->e_hash),
1939 &tmp_inode);
1940 if (error)
1941 goto cleanup;
1942
1943 if (!ext4_test_inode_state(tmp_inode,
1944 EXT4_STATE_LUSTRE_EA_INODE)) {
1945
1946
1947
1948
1949 old_ea_inode_quota = le32_to_cpu(
1950 s->here->e_value_size);
1951 }
1952 iput(tmp_inode);
1953
1954 s->here->e_value_inum = 0;
1955 s->here->e_value_size = 0;
1956 }
1957 } else {
1958
1959 s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
1960 error = -ENOMEM;
1961 if (s->base == NULL)
1962 goto cleanup;
1963 header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
1964 header(s->base)->h_blocks = cpu_to_le32(1);
1965 header(s->base)->h_refcount = cpu_to_le32(1);
1966 s->first = ENTRY(header(s->base)+1);
1967 s->here = ENTRY(header(s->base)+1);
1968 s->end = s->base + sb->s_blocksize;
1969 }
1970
1971 error = ext4_xattr_set_entry(i, s, handle, inode, true );
1972 if (error == -EFSCORRUPTED)
1973 goto bad_block;
1974 if (error)
1975 goto cleanup;
1976
1977 if (i->value && s->here->e_value_inum) {
1978
1979
1980
1981
1982
1983
1984 ea_ino = le32_to_cpu(s->here->e_value_inum);
1985 error = ext4_xattr_inode_iget(inode, ea_ino,
1986 le32_to_cpu(s->here->e_hash),
1987 &ea_inode);
1988 if (error) {
1989 ea_inode = NULL;
1990 goto cleanup;
1991 }
1992 }
1993
1994 inserted:
1995 if (!IS_LAST_ENTRY(s->first)) {
1996 new_bh = ext4_xattr_block_cache_find(inode, header(s->base),
1997 &ce);
1998 if (new_bh) {
1999
2000 if (new_bh == bs->bh)
2001 ea_bdebug(new_bh, "keeping");
2002 else {
2003 u32 ref;
2004
2005 WARN_ON_ONCE(dquot_initialize_needed(inode));
2006
2007
2008
2009 error = dquot_alloc_block(inode,
2010 EXT4_C2B(EXT4_SB(sb), 1));
2011 if (error)
2012 goto cleanup;
2013 BUFFER_TRACE(new_bh, "get_write_access");
2014 error = ext4_journal_get_write_access(
2015 handle, sb, new_bh,
2016 EXT4_JTR_NONE);
2017 if (error)
2018 goto cleanup_dquot;
2019 lock_buffer(new_bh);
2020
2021
2022
2023
2024
2025
2026
2027 ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
2028 if (ref > EXT4_XATTR_REFCOUNT_MAX) {
2029
2030
2031
2032
2033 unlock_buffer(new_bh);
2034 dquot_free_block(inode,
2035 EXT4_C2B(EXT4_SB(sb),
2036 1));
2037 brelse(new_bh);
2038 mb_cache_entry_put(ea_block_cache, ce);
2039 ce = NULL;
2040 new_bh = NULL;
2041 goto inserted;
2042 }
2043 BHDR(new_bh)->h_refcount = cpu_to_le32(ref);
2044 if (ref == EXT4_XATTR_REFCOUNT_MAX)
2045 ce->e_reusable = 0;
2046 ea_bdebug(new_bh, "reusing; refcount now=%d",
2047 ref);
2048 ext4_xattr_block_csum_set(inode, new_bh);
2049 unlock_buffer(new_bh);
2050 error = ext4_handle_dirty_metadata(handle,
2051 inode,
2052 new_bh);
2053 if (error)
2054 goto cleanup_dquot;
2055 }
2056 mb_cache_entry_touch(ea_block_cache, ce);
2057 mb_cache_entry_put(ea_block_cache, ce);
2058 ce = NULL;
2059 } else if (bs->bh && s->base == bs->bh->b_data) {
2060
2061 ea_bdebug(bs->bh, "keeping this block");
2062 ext4_xattr_block_cache_insert(ea_block_cache, bs->bh);
2063 new_bh = bs->bh;
2064 get_bh(new_bh);
2065 } else {
2066
2067 ext4_fsblk_t goal, block;
2068
2069 WARN_ON_ONCE(dquot_initialize_needed(inode));
2070
2071 goal = ext4_group_first_block_no(sb,
2072 EXT4_I(inode)->i_block_group);
2073
2074
2075 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
2076 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
2077
2078 block = ext4_new_meta_blocks(handle, inode, goal, 0,
2079 NULL, &error);
2080 if (error)
2081 goto cleanup;
2082
2083 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
2084 BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
2085
2086 ea_idebug(inode, "creating block %llu",
2087 (unsigned long long)block);
2088
2089 new_bh = sb_getblk(sb, block);
2090 if (unlikely(!new_bh)) {
2091 error = -ENOMEM;
2092 getblk_failed:
2093 ext4_free_blocks(handle, inode, NULL, block, 1,
2094 EXT4_FREE_BLOCKS_METADATA);
2095 goto cleanup;
2096 }
2097 error = ext4_xattr_inode_inc_ref_all(handle, inode,
2098 ENTRY(header(s->base)+1));
2099 if (error)
2100 goto getblk_failed;
2101 if (ea_inode) {
2102
2103 error = ext4_xattr_inode_dec_ref(handle,
2104 ea_inode);
2105 if (error)
2106 ext4_warning_inode(ea_inode,
2107 "dec ref error=%d",
2108 error);
2109 iput(ea_inode);
2110 ea_inode = NULL;
2111 }
2112
2113 lock_buffer(new_bh);
2114 error = ext4_journal_get_create_access(handle, sb,
2115 new_bh, EXT4_JTR_NONE);
2116 if (error) {
2117 unlock_buffer(new_bh);
2118 error = -EIO;
2119 goto getblk_failed;
2120 }
2121 memcpy(new_bh->b_data, s->base, new_bh->b_size);
2122 ext4_xattr_block_csum_set(inode, new_bh);
2123 set_buffer_uptodate(new_bh);
2124 unlock_buffer(new_bh);
2125 ext4_xattr_block_cache_insert(ea_block_cache, new_bh);
2126 error = ext4_handle_dirty_metadata(handle, inode,
2127 new_bh);
2128 if (error)
2129 goto cleanup;
2130 }
2131 }
2132
2133 if (old_ea_inode_quota)
2134 ext4_xattr_inode_free_quota(inode, NULL, old_ea_inode_quota);
2135
2136
2137 EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
2138
2139
2140 if (bs->bh && bs->bh != new_bh) {
2141 struct ext4_xattr_inode_array *ea_inode_array = NULL;
2142
2143 ext4_xattr_release_block(handle, inode, bs->bh,
2144 &ea_inode_array,
2145 0 );
2146 ext4_xattr_inode_array_free(ea_inode_array);
2147 }
2148 error = 0;
2149
2150 cleanup:
2151 if (ea_inode) {
2152 int error2;
2153
2154 error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
2155 if (error2)
2156 ext4_warning_inode(ea_inode, "dec ref error=%d",
2157 error2);
2158
2159
2160 if (error)
2161 ext4_xattr_inode_free_quota(inode, ea_inode,
2162 i_size_read(ea_inode));
2163 iput(ea_inode);
2164 }
2165 if (ce)
2166 mb_cache_entry_put(ea_block_cache, ce);
2167 brelse(new_bh);
2168 if (!(bs->bh && s->base == bs->bh->b_data))
2169 kfree(s->base);
2170
2171 return error;
2172
2173 cleanup_dquot:
2174 dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
2175 goto cleanup;
2176
2177 bad_block:
2178 EXT4_ERROR_INODE(inode, "bad block %llu",
2179 EXT4_I(inode)->i_file_acl);
2180 goto cleanup;
2181
2182 #undef header
2183 }
2184
2185 int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
2186 struct ext4_xattr_ibody_find *is)
2187 {
2188 struct ext4_xattr_ibody_header *header;
2189 struct ext4_inode *raw_inode;
2190 int error;
2191
2192 if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
2193 return 0;
2194
2195 raw_inode = ext4_raw_inode(&is->iloc);
2196 header = IHDR(inode, raw_inode);
2197 is->s.base = is->s.first = IFIRST(header);
2198 is->s.here = is->s.first;
2199 is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
2200 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
2201 error = xattr_check_inode(inode, header, is->s.end);
2202 if (error)
2203 return error;
2204
2205 error = xattr_find_entry(inode, &is->s.here, is->s.end,
2206 i->name_index, i->name, 0);
2207 if (error && error != -ENODATA)
2208 return error;
2209 is->s.not_found = error;
2210 }
2211 return 0;
2212 }
2213
2214 int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
2215 struct ext4_xattr_info *i,
2216 struct ext4_xattr_ibody_find *is)
2217 {
2218 struct ext4_xattr_ibody_header *header;
2219 struct ext4_xattr_search *s = &is->s;
2220 int error;
2221
2222 if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
2223 return -ENOSPC;
2224
2225 error = ext4_xattr_set_entry(i, s, handle, inode, false );
2226 if (error)
2227 return error;
2228 header = IHDR(inode, ext4_raw_inode(&is->iloc));
2229 if (!IS_LAST_ENTRY(s->first)) {
2230 header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
2231 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
2232 } else {
2233 header->h_magic = cpu_to_le32(0);
2234 ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
2235 }
2236 return 0;
2237 }
2238
2239 static int ext4_xattr_value_same(struct ext4_xattr_search *s,
2240 struct ext4_xattr_info *i)
2241 {
2242 void *value;
2243
2244
2245 if (s->here->e_value_inum)
2246 return 0;
2247 if (le32_to_cpu(s->here->e_value_size) != i->value_len)
2248 return 0;
2249 value = ((void *)s->base) + le16_to_cpu(s->here->e_value_offs);
2250 return !memcmp(value, i->value, i->value_len);
2251 }
2252
2253 static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
2254 {
2255 struct buffer_head *bh;
2256 int error;
2257
2258 if (!EXT4_I(inode)->i_file_acl)
2259 return NULL;
2260 bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
2261 if (IS_ERR(bh))
2262 return bh;
2263 error = ext4_xattr_check_block(inode, bh);
2264 if (error) {
2265 brelse(bh);
2266 return ERR_PTR(error);
2267 }
2268 return bh;
2269 }
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283 int
2284 ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
2285 const char *name, const void *value, size_t value_len,
2286 int flags)
2287 {
2288 struct ext4_xattr_info i = {
2289 .name_index = name_index,
2290 .name = name,
2291 .value = value,
2292 .value_len = value_len,
2293 .in_inode = 0,
2294 };
2295 struct ext4_xattr_ibody_find is = {
2296 .s = { .not_found = -ENODATA, },
2297 };
2298 struct ext4_xattr_block_find bs = {
2299 .s = { .not_found = -ENODATA, },
2300 };
2301 int no_expand;
2302 int error;
2303
2304 if (!name)
2305 return -EINVAL;
2306 if (strlen(name) > 255)
2307 return -ERANGE;
2308
2309 ext4_write_lock_xattr(inode, &no_expand);
2310
2311
2312 if (ext4_handle_valid(handle)) {
2313 struct buffer_head *bh;
2314 int credits;
2315
2316 bh = ext4_xattr_get_block(inode);
2317 if (IS_ERR(bh)) {
2318 error = PTR_ERR(bh);
2319 goto cleanup;
2320 }
2321
2322 credits = __ext4_xattr_set_credits(inode->i_sb, inode, bh,
2323 value_len,
2324 flags & XATTR_CREATE);
2325 brelse(bh);
2326
2327 if (jbd2_handle_buffer_credits(handle) < credits) {
2328 error = -ENOSPC;
2329 goto cleanup;
2330 }
2331 WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
2332 }
2333
2334 error = ext4_reserve_inode_write(handle, inode, &is.iloc);
2335 if (error)
2336 goto cleanup;
2337
2338 if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) {
2339 struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
2340 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
2341 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
2342 }
2343
2344 error = ext4_xattr_ibody_find(inode, &i, &is);
2345 if (error)
2346 goto cleanup;
2347 if (is.s.not_found)
2348 error = ext4_xattr_block_find(inode, &i, &bs);
2349 if (error)
2350 goto cleanup;
2351 if (is.s.not_found && bs.s.not_found) {
2352 error = -ENODATA;
2353 if (flags & XATTR_REPLACE)
2354 goto cleanup;
2355 error = 0;
2356 if (!value)
2357 goto cleanup;
2358 } else {
2359 error = -EEXIST;
2360 if (flags & XATTR_CREATE)
2361 goto cleanup;
2362 }
2363
2364 if (!value) {
2365 if (!is.s.not_found)
2366 error = ext4_xattr_ibody_set(handle, inode, &i, &is);
2367 else if (!bs.s.not_found)
2368 error = ext4_xattr_block_set(handle, inode, &i, &bs);
2369 } else {
2370 error = 0;
2371
2372 if (!is.s.not_found && ext4_xattr_value_same(&is.s, &i))
2373 goto cleanup;
2374 if (!bs.s.not_found && ext4_xattr_value_same(&bs.s, &i))
2375 goto cleanup;
2376
2377 if (ext4_has_feature_ea_inode(inode->i_sb) &&
2378 (EXT4_XATTR_SIZE(i.value_len) >
2379 EXT4_XATTR_MIN_LARGE_EA_SIZE(inode->i_sb->s_blocksize)))
2380 i.in_inode = 1;
2381 retry_inode:
2382 error = ext4_xattr_ibody_set(handle, inode, &i, &is);
2383 if (!error && !bs.s.not_found) {
2384 i.value = NULL;
2385 error = ext4_xattr_block_set(handle, inode, &i, &bs);
2386 } else if (error == -ENOSPC) {
2387 if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
2388 brelse(bs.bh);
2389 bs.bh = NULL;
2390 error = ext4_xattr_block_find(inode, &i, &bs);
2391 if (error)
2392 goto cleanup;
2393 }
2394 error = ext4_xattr_block_set(handle, inode, &i, &bs);
2395 if (!error && !is.s.not_found) {
2396 i.value = NULL;
2397 error = ext4_xattr_ibody_set(handle, inode, &i,
2398 &is);
2399 } else if (error == -ENOSPC) {
2400
2401
2402
2403
2404 if (ext4_has_feature_ea_inode(inode->i_sb) &&
2405 i.value_len && !i.in_inode) {
2406 i.in_inode = 1;
2407 goto retry_inode;
2408 }
2409 }
2410 }
2411 }
2412 if (!error) {
2413 ext4_xattr_update_super_block(handle, inode->i_sb);
2414 inode->i_ctime = current_time(inode);
2415 if (!value)
2416 no_expand = 0;
2417 error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
2418
2419
2420
2421
2422 is.iloc.bh = NULL;
2423 if (IS_SYNC(inode))
2424 ext4_handle_sync(handle);
2425 }
2426 ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, handle);
2427
2428 cleanup:
2429 brelse(is.iloc.bh);
2430 brelse(bs.bh);
2431 ext4_write_unlock_xattr(inode, &no_expand);
2432 return error;
2433 }
2434
2435 int ext4_xattr_set_credits(struct inode *inode, size_t value_len,
2436 bool is_create, int *credits)
2437 {
2438 struct buffer_head *bh;
2439 int err;
2440
2441 *credits = 0;
2442
2443 if (!EXT4_SB(inode->i_sb)->s_journal)
2444 return 0;
2445
2446 down_read(&EXT4_I(inode)->xattr_sem);
2447
2448 bh = ext4_xattr_get_block(inode);
2449 if (IS_ERR(bh)) {
2450 err = PTR_ERR(bh);
2451 } else {
2452 *credits = __ext4_xattr_set_credits(inode->i_sb, inode, bh,
2453 value_len, is_create);
2454 brelse(bh);
2455 err = 0;
2456 }
2457
2458 up_read(&EXT4_I(inode)->xattr_sem);
2459 return err;
2460 }
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470 int
2471 ext4_xattr_set(struct inode *inode, int name_index, const char *name,
2472 const void *value, size_t value_len, int flags)
2473 {
2474 handle_t *handle;
2475 struct super_block *sb = inode->i_sb;
2476 int error, retries = 0;
2477 int credits;
2478
2479 error = dquot_initialize(inode);
2480 if (error)
2481 return error;
2482
2483 retry:
2484 error = ext4_xattr_set_credits(inode, value_len, flags & XATTR_CREATE,
2485 &credits);
2486 if (error)
2487 return error;
2488
2489 handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
2490 if (IS_ERR(handle)) {
2491 error = PTR_ERR(handle);
2492 } else {
2493 int error2;
2494
2495 error = ext4_xattr_set_handle(handle, inode, name_index, name,
2496 value, value_len, flags);
2497 error2 = ext4_journal_stop(handle);
2498 if (error == -ENOSPC &&
2499 ext4_should_retry_alloc(sb, &retries))
2500 goto retry;
2501 if (error == 0)
2502 error = error2;
2503 }
2504 ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, NULL);
2505
2506 return error;
2507 }
2508
2509
2510
2511
2512
2513 static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
2514 int value_offs_shift, void *to,
2515 void *from, size_t n)
2516 {
2517 struct ext4_xattr_entry *last = entry;
2518 int new_offs;
2519
2520
2521 BUG_ON(value_offs_shift > 0);
2522
2523
2524 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
2525 if (!last->e_value_inum && last->e_value_size) {
2526 new_offs = le16_to_cpu(last->e_value_offs) +
2527 value_offs_shift;
2528 last->e_value_offs = cpu_to_le16(new_offs);
2529 }
2530 }
2531
2532 memmove(to, from, n);
2533 }
2534
2535
2536
2537
2538 static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
2539 struct ext4_inode *raw_inode,
2540 struct ext4_xattr_entry *entry)
2541 {
2542 struct ext4_xattr_ibody_find *is = NULL;
2543 struct ext4_xattr_block_find *bs = NULL;
2544 char *buffer = NULL, *b_entry_name = NULL;
2545 size_t value_size = le32_to_cpu(entry->e_value_size);
2546 struct ext4_xattr_info i = {
2547 .value = NULL,
2548 .value_len = 0,
2549 .name_index = entry->e_name_index,
2550 .in_inode = !!entry->e_value_inum,
2551 };
2552 struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
2553 int error;
2554
2555 is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
2556 bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
2557 buffer = kmalloc(value_size, GFP_NOFS);
2558 b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
2559 if (!is || !bs || !buffer || !b_entry_name) {
2560 error = -ENOMEM;
2561 goto out;
2562 }
2563
2564 is->s.not_found = -ENODATA;
2565 bs->s.not_found = -ENODATA;
2566 is->iloc.bh = NULL;
2567 bs->bh = NULL;
2568
2569
2570 if (entry->e_value_inum) {
2571 error = ext4_xattr_inode_get(inode, entry, buffer, value_size);
2572 if (error)
2573 goto out;
2574 } else {
2575 size_t value_offs = le16_to_cpu(entry->e_value_offs);
2576 memcpy(buffer, (void *)IFIRST(header) + value_offs, value_size);
2577 }
2578
2579 memcpy(b_entry_name, entry->e_name, entry->e_name_len);
2580 b_entry_name[entry->e_name_len] = '\0';
2581 i.name = b_entry_name;
2582
2583 error = ext4_get_inode_loc(inode, &is->iloc);
2584 if (error)
2585 goto out;
2586
2587 error = ext4_xattr_ibody_find(inode, &i, is);
2588 if (error)
2589 goto out;
2590
2591
2592 error = ext4_xattr_ibody_set(handle, inode, &i, is);
2593 if (error)
2594 goto out;
2595
2596 i.value = buffer;
2597 i.value_len = value_size;
2598 error = ext4_xattr_block_find(inode, &i, bs);
2599 if (error)
2600 goto out;
2601
2602
2603 error = ext4_xattr_block_set(handle, inode, &i, bs);
2604 if (error)
2605 goto out;
2606 error = 0;
2607 out:
2608 kfree(b_entry_name);
2609 kfree(buffer);
2610 if (is)
2611 brelse(is->iloc.bh);
2612 if (bs)
2613 brelse(bs->bh);
2614 kfree(is);
2615 kfree(bs);
2616
2617 return error;
2618 }
2619
2620 static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode,
2621 struct ext4_inode *raw_inode,
2622 int isize_diff, size_t ifree,
2623 size_t bfree, int *total_ino)
2624 {
2625 struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
2626 struct ext4_xattr_entry *small_entry;
2627 struct ext4_xattr_entry *entry;
2628 struct ext4_xattr_entry *last;
2629 unsigned int entry_size;
2630 unsigned int total_size;
2631 unsigned int min_total_size;
2632 int error;
2633
2634 while (isize_diff > ifree) {
2635 entry = NULL;
2636 small_entry = NULL;
2637 min_total_size = ~0U;
2638 last = IFIRST(header);
2639
2640 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
2641
2642 if ((last->e_name_len == 4) &&
2643 (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) &&
2644 !memcmp(last->e_name, "data", 4))
2645 continue;
2646 total_size = EXT4_XATTR_LEN(last->e_name_len);
2647 if (!last->e_value_inum)
2648 total_size += EXT4_XATTR_SIZE(
2649 le32_to_cpu(last->e_value_size));
2650 if (total_size <= bfree &&
2651 total_size < min_total_size) {
2652 if (total_size + ifree < isize_diff) {
2653 small_entry = last;
2654 } else {
2655 entry = last;
2656 min_total_size = total_size;
2657 }
2658 }
2659 }
2660
2661 if (entry == NULL) {
2662 if (small_entry == NULL)
2663 return -ENOSPC;
2664 entry = small_entry;
2665 }
2666
2667 entry_size = EXT4_XATTR_LEN(entry->e_name_len);
2668 total_size = entry_size;
2669 if (!entry->e_value_inum)
2670 total_size += EXT4_XATTR_SIZE(
2671 le32_to_cpu(entry->e_value_size));
2672 error = ext4_xattr_move_to_block(handle, inode, raw_inode,
2673 entry);
2674 if (error)
2675 return error;
2676
2677 *total_ino -= entry_size;
2678 ifree += total_size;
2679 bfree -= total_size;
2680 }
2681
2682 return 0;
2683 }
2684
2685
2686
2687
2688
2689 int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
2690 struct ext4_inode *raw_inode, handle_t *handle)
2691 {
2692 struct ext4_xattr_ibody_header *header;
2693 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2694 static unsigned int mnt_count;
2695 size_t min_offs;
2696 size_t ifree, bfree;
2697 int total_ino;
2698 void *base, *end;
2699 int error = 0, tried_min_extra_isize = 0;
2700 int s_min_extra_isize = le16_to_cpu(sbi->s_es->s_min_extra_isize);
2701 int isize_diff;
2702
2703 retry:
2704 isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
2705 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
2706 return 0;
2707
2708 header = IHDR(inode, raw_inode);
2709
2710
2711
2712
2713
2714
2715 base = IFIRST(header);
2716 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
2717 min_offs = end - base;
2718 total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
2719
2720 error = xattr_check_inode(inode, header, end);
2721 if (error)
2722 goto cleanup;
2723
2724 ifree = ext4_xattr_free_space(base, &min_offs, base, &total_ino);
2725 if (ifree >= isize_diff)
2726 goto shift;
2727
2728
2729
2730
2731
2732 if (EXT4_I(inode)->i_file_acl) {
2733 struct buffer_head *bh;
2734
2735 bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
2736 if (IS_ERR(bh)) {
2737 error = PTR_ERR(bh);
2738 goto cleanup;
2739 }
2740 error = ext4_xattr_check_block(inode, bh);
2741 if (error) {
2742 brelse(bh);
2743 goto cleanup;
2744 }
2745 base = BHDR(bh);
2746 end = bh->b_data + bh->b_size;
2747 min_offs = end - base;
2748 bfree = ext4_xattr_free_space(BFIRST(bh), &min_offs, base,
2749 NULL);
2750 brelse(bh);
2751 if (bfree + ifree < isize_diff) {
2752 if (!tried_min_extra_isize && s_min_extra_isize) {
2753 tried_min_extra_isize++;
2754 new_extra_isize = s_min_extra_isize;
2755 goto retry;
2756 }
2757 error = -ENOSPC;
2758 goto cleanup;
2759 }
2760 } else {
2761 bfree = inode->i_sb->s_blocksize;
2762 }
2763
2764 error = ext4_xattr_make_inode_space(handle, inode, raw_inode,
2765 isize_diff, ifree, bfree,
2766 &total_ino);
2767 if (error) {
2768 if (error == -ENOSPC && !tried_min_extra_isize &&
2769 s_min_extra_isize) {
2770 tried_min_extra_isize++;
2771 new_extra_isize = s_min_extra_isize;
2772 goto retry;
2773 }
2774 goto cleanup;
2775 }
2776 shift:
2777
2778 ext4_xattr_shift_entries(IFIRST(header), EXT4_I(inode)->i_extra_isize
2779 - new_extra_isize, (void *)raw_inode +
2780 EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
2781 (void *)header, total_ino);
2782 EXT4_I(inode)->i_extra_isize = new_extra_isize;
2783
2784 cleanup:
2785 if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) {
2786 ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.",
2787 inode->i_ino);
2788 mnt_count = le16_to_cpu(sbi->s_es->s_mnt_count);
2789 }
2790 return error;
2791 }
2792
2793 #define EIA_INCR 16
2794 #define EIA_MASK (EIA_INCR - 1)
2795
2796
2797
2798
2799
2800 static int
2801 ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
2802 struct inode *inode)
2803 {
2804 if (*ea_inode_array == NULL) {
2805
2806
2807
2808
2809 (*ea_inode_array) =
2810 kmalloc(offsetof(struct ext4_xattr_inode_array,
2811 inodes[EIA_MASK]),
2812 GFP_NOFS);
2813 if (*ea_inode_array == NULL)
2814 return -ENOMEM;
2815 (*ea_inode_array)->count = 0;
2816 } else if (((*ea_inode_array)->count & EIA_MASK) == EIA_MASK) {
2817
2818 struct ext4_xattr_inode_array *new_array = NULL;
2819 int count = (*ea_inode_array)->count;
2820
2821
2822 new_array = kmalloc(
2823 offsetof(struct ext4_xattr_inode_array,
2824 inodes[count + EIA_INCR]),
2825 GFP_NOFS);
2826 if (new_array == NULL)
2827 return -ENOMEM;
2828 memcpy(new_array, *ea_inode_array,
2829 offsetof(struct ext4_xattr_inode_array, inodes[count]));
2830 kfree(*ea_inode_array);
2831 *ea_inode_array = new_array;
2832 }
2833 (*ea_inode_array)->inodes[(*ea_inode_array)->count++] = inode;
2834 return 0;
2835 }
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846 int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
2847 struct ext4_xattr_inode_array **ea_inode_array,
2848 int extra_credits)
2849 {
2850 struct buffer_head *bh = NULL;
2851 struct ext4_xattr_ibody_header *header;
2852 struct ext4_iloc iloc = { .bh = NULL };
2853 struct ext4_xattr_entry *entry;
2854 struct inode *ea_inode;
2855 int error;
2856
2857 error = ext4_journal_ensure_credits(handle, extra_credits,
2858 ext4_free_metadata_revoke_credits(inode->i_sb, 1));
2859 if (error < 0) {
2860 EXT4_ERROR_INODE(inode, "ensure credits (error %d)", error);
2861 goto cleanup;
2862 }
2863
2864 if (ext4_has_feature_ea_inode(inode->i_sb) &&
2865 ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
2866
2867 error = ext4_get_inode_loc(inode, &iloc);
2868 if (error) {
2869 EXT4_ERROR_INODE(inode, "inode loc (error %d)", error);
2870 goto cleanup;
2871 }
2872
2873 error = ext4_journal_get_write_access(handle, inode->i_sb,
2874 iloc.bh, EXT4_JTR_NONE);
2875 if (error) {
2876 EXT4_ERROR_INODE(inode, "write access (error %d)",
2877 error);
2878 goto cleanup;
2879 }
2880
2881 header = IHDR(inode, ext4_raw_inode(&iloc));
2882 if (header->h_magic == cpu_to_le32(EXT4_XATTR_MAGIC))
2883 ext4_xattr_inode_dec_ref_all(handle, inode, iloc.bh,
2884 IFIRST(header),
2885 false ,
2886 ea_inode_array,
2887 extra_credits,
2888 false );
2889 }
2890
2891 if (EXT4_I(inode)->i_file_acl) {
2892 bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
2893 if (IS_ERR(bh)) {
2894 error = PTR_ERR(bh);
2895 if (error == -EIO) {
2896 EXT4_ERROR_INODE_ERR(inode, EIO,
2897 "block %llu read error",
2898 EXT4_I(inode)->i_file_acl);
2899 }
2900 bh = NULL;
2901 goto cleanup;
2902 }
2903 error = ext4_xattr_check_block(inode, bh);
2904 if (error)
2905 goto cleanup;
2906
2907 if (ext4_has_feature_ea_inode(inode->i_sb)) {
2908 for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
2909 entry = EXT4_XATTR_NEXT(entry)) {
2910 if (!entry->e_value_inum)
2911 continue;
2912 error = ext4_xattr_inode_iget(inode,
2913 le32_to_cpu(entry->e_value_inum),
2914 le32_to_cpu(entry->e_hash),
2915 &ea_inode);
2916 if (error)
2917 continue;
2918 ext4_xattr_inode_free_quota(inode, ea_inode,
2919 le32_to_cpu(entry->e_value_size));
2920 iput(ea_inode);
2921 }
2922
2923 }
2924
2925 ext4_xattr_release_block(handle, inode, bh, ea_inode_array,
2926 extra_credits);
2927
2928
2929
2930
2931 EXT4_I(inode)->i_file_acl = 0;
2932 error = ext4_mark_inode_dirty(handle, inode);
2933 if (error) {
2934 EXT4_ERROR_INODE(inode, "mark inode dirty (error %d)",
2935 error);
2936 goto cleanup;
2937 }
2938 ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, handle);
2939 }
2940 error = 0;
2941 cleanup:
2942 brelse(iloc.bh);
2943 brelse(bh);
2944 return error;
2945 }
2946
2947 void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *ea_inode_array)
2948 {
2949 int idx;
2950
2951 if (ea_inode_array == NULL)
2952 return;
2953
2954 for (idx = 0; idx < ea_inode_array->count; ++idx)
2955 iput(ea_inode_array->inodes[idx]);
2956 kfree(ea_inode_array);
2957 }
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967 static void
2968 ext4_xattr_block_cache_insert(struct mb_cache *ea_block_cache,
2969 struct buffer_head *bh)
2970 {
2971 struct ext4_xattr_header *header = BHDR(bh);
2972 __u32 hash = le32_to_cpu(header->h_hash);
2973 int reusable = le32_to_cpu(header->h_refcount) <
2974 EXT4_XATTR_REFCOUNT_MAX;
2975 int error;
2976
2977 if (!ea_block_cache)
2978 return;
2979 error = mb_cache_entry_create(ea_block_cache, GFP_NOFS, hash,
2980 bh->b_blocknr, reusable);
2981 if (error) {
2982 if (error == -EBUSY)
2983 ea_bdebug(bh, "already in cache");
2984 } else
2985 ea_bdebug(bh, "inserting [%x]", (int)hash);
2986 }
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996 static int
2997 ext4_xattr_cmp(struct ext4_xattr_header *header1,
2998 struct ext4_xattr_header *header2)
2999 {
3000 struct ext4_xattr_entry *entry1, *entry2;
3001
3002 entry1 = ENTRY(header1+1);
3003 entry2 = ENTRY(header2+1);
3004 while (!IS_LAST_ENTRY(entry1)) {
3005 if (IS_LAST_ENTRY(entry2))
3006 return 1;
3007 if (entry1->e_hash != entry2->e_hash ||
3008 entry1->e_name_index != entry2->e_name_index ||
3009 entry1->e_name_len != entry2->e_name_len ||
3010 entry1->e_value_size != entry2->e_value_size ||
3011 entry1->e_value_inum != entry2->e_value_inum ||
3012 memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
3013 return 1;
3014 if (!entry1->e_value_inum &&
3015 memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
3016 (char *)header2 + le16_to_cpu(entry2->e_value_offs),
3017 le32_to_cpu(entry1->e_value_size)))
3018 return 1;
3019
3020 entry1 = EXT4_XATTR_NEXT(entry1);
3021 entry2 = EXT4_XATTR_NEXT(entry2);
3022 }
3023 if (!IS_LAST_ENTRY(entry2))
3024 return 1;
3025 return 0;
3026 }
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036 static struct buffer_head *
3037 ext4_xattr_block_cache_find(struct inode *inode,
3038 struct ext4_xattr_header *header,
3039 struct mb_cache_entry **pce)
3040 {
3041 __u32 hash = le32_to_cpu(header->h_hash);
3042 struct mb_cache_entry *ce;
3043 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
3044
3045 if (!ea_block_cache)
3046 return NULL;
3047 if (!header->h_hash)
3048 return NULL;
3049 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
3050 ce = mb_cache_entry_find_first(ea_block_cache, hash);
3051 while (ce) {
3052 struct buffer_head *bh;
3053
3054 bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
3055 if (IS_ERR(bh)) {
3056 if (PTR_ERR(bh) == -ENOMEM)
3057 return NULL;
3058 bh = NULL;
3059 EXT4_ERROR_INODE(inode, "block %lu read error",
3060 (unsigned long)ce->e_value);
3061 } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
3062 *pce = ce;
3063 return bh;
3064 }
3065 brelse(bh);
3066 ce = mb_cache_entry_find_next(ea_block_cache, ce);
3067 }
3068 return NULL;
3069 }
3070
3071 #define NAME_HASH_SHIFT 5
3072 #define VALUE_HASH_SHIFT 16
3073
3074
3075
3076
3077
3078
3079 static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
3080 size_t value_count)
3081 {
3082 __u32 hash = 0;
3083
3084 while (name_len--) {
3085 hash = (hash << NAME_HASH_SHIFT) ^
3086 (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
3087 *name++;
3088 }
3089 while (value_count--) {
3090 hash = (hash << VALUE_HASH_SHIFT) ^
3091 (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
3092 le32_to_cpu(*value++);
3093 }
3094 return cpu_to_le32(hash);
3095 }
3096
3097 #undef NAME_HASH_SHIFT
3098 #undef VALUE_HASH_SHIFT
3099
3100 #define BLOCK_HASH_SHIFT 16
3101
3102
3103
3104
3105
3106
3107 static void ext4_xattr_rehash(struct ext4_xattr_header *header)
3108 {
3109 struct ext4_xattr_entry *here;
3110 __u32 hash = 0;
3111
3112 here = ENTRY(header+1);
3113 while (!IS_LAST_ENTRY(here)) {
3114 if (!here->e_hash) {
3115
3116 hash = 0;
3117 break;
3118 }
3119 hash = (hash << BLOCK_HASH_SHIFT) ^
3120 (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
3121 le32_to_cpu(here->e_hash);
3122 here = EXT4_XATTR_NEXT(here);
3123 }
3124 header->h_hash = cpu_to_le32(hash);
3125 }
3126
3127 #undef BLOCK_HASH_SHIFT
3128
3129 #define HASH_BUCKET_BITS 10
3130
3131 struct mb_cache *
3132 ext4_xattr_create_cache(void)
3133 {
3134 return mb_cache_create(HASH_BUCKET_BITS);
3135 }
3136
3137 void ext4_xattr_destroy_cache(struct mb_cache *cache)
3138 {
3139 if (cache)
3140 mb_cache_destroy(cache);
3141 }
3142