0001
0002
0003
0004
0005
0006
0007 #include <linux/slab.h>
0008 #include <linux/spinlock.h>
0009 #include <linux/completion.h>
0010 #include <linux/buffer_head.h>
0011 #include <linux/xattr.h>
0012 #include <linux/gfs2_ondisk.h>
0013 #include <linux/posix_acl_xattr.h>
0014 #include <linux/uaccess.h>
0015
0016 #include "gfs2.h"
0017 #include "incore.h"
0018 #include "acl.h"
0019 #include "xattr.h"
0020 #include "glock.h"
0021 #include "inode.h"
0022 #include "meta_io.h"
0023 #include "quota.h"
0024 #include "rgrp.h"
0025 #include "super.h"
0026 #include "trans.h"
0027 #include "util.h"
0028
0029
0030
0031
0032
0033
0034
0035
0036 static int ea_calc_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize,
0037 unsigned int *size)
0038 {
0039 unsigned int jbsize = sdp->sd_jbsize;
0040
0041
0042 *size = ALIGN(sizeof(struct gfs2_ea_header) + nsize + dsize, 8);
0043
0044 if (*size <= jbsize)
0045 return 1;
0046
0047
0048 *size = ALIGN(sizeof(struct gfs2_ea_header) + nsize +
0049 (sizeof(__be64) * DIV_ROUND_UP(dsize, jbsize)), 8);
0050
0051 return 0;
0052 }
0053
0054 static int ea_check_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize)
0055 {
0056 unsigned int size;
0057
0058 if (dsize > GFS2_EA_MAX_DATA_LEN)
0059 return -ERANGE;
0060
0061 ea_calc_size(sdp, nsize, dsize, &size);
0062
0063
0064 if (size > sdp->sd_jbsize)
0065 return -ERANGE;
0066
0067 return 0;
0068 }
0069
0070 static bool gfs2_eatype_valid(struct gfs2_sbd *sdp, u8 type)
0071 {
0072 switch(sdp->sd_sb.sb_fs_format) {
0073 case GFS2_FS_FORMAT_MAX:
0074 return true;
0075
0076 case GFS2_FS_FORMAT_MIN:
0077 return type <= GFS2_EATYPE_SECURITY;
0078
0079 default:
0080 return false;
0081 }
0082 }
0083
0084 typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
0085 struct gfs2_ea_header *ea,
0086 struct gfs2_ea_header *prev, void *private);
0087
0088 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
0089 ea_call_t ea_call, void *data)
0090 {
0091 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
0092 struct gfs2_ea_header *ea, *prev = NULL;
0093 int error = 0;
0094
0095 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
0096 return -EIO;
0097
0098 for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
0099 if (!GFS2_EA_REC_LEN(ea))
0100 goto fail;
0101 if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
0102 bh->b_data + bh->b_size))
0103 goto fail;
0104 if (!gfs2_eatype_valid(sdp, ea->ea_type))
0105 goto fail;
0106 error = ea_call(ip, bh, ea, prev, data);
0107 if (error)
0108 return error;
0109
0110 if (GFS2_EA_IS_LAST(ea)) {
0111 if ((char *)GFS2_EA2NEXT(ea) !=
0112 bh->b_data + bh->b_size)
0113 goto fail;
0114 break;
0115 }
0116 }
0117
0118 return error;
0119
0120 fail:
0121 gfs2_consist_inode(ip);
0122 return -EIO;
0123 }
0124
0125 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
0126 {
0127 struct buffer_head *bh, *eabh;
0128 __be64 *eablk, *end;
0129 int error;
0130
0131 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, &bh);
0132 if (error)
0133 return error;
0134
0135 if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) {
0136 error = ea_foreach_i(ip, bh, ea_call, data);
0137 goto out;
0138 }
0139
0140 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
0141 error = -EIO;
0142 goto out;
0143 }
0144
0145 eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
0146 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
0147
0148 for (; eablk < end; eablk++) {
0149 u64 bn;
0150
0151 if (!*eablk)
0152 break;
0153 bn = be64_to_cpu(*eablk);
0154
0155 error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, 0, &eabh);
0156 if (error)
0157 break;
0158 error = ea_foreach_i(ip, eabh, ea_call, data);
0159 brelse(eabh);
0160 if (error)
0161 break;
0162 }
0163 out:
0164 brelse(bh);
0165 return error;
0166 }
0167
0168 struct ea_find {
0169 int type;
0170 const char *name;
0171 size_t namel;
0172 struct gfs2_ea_location *ef_el;
0173 };
0174
0175 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
0176 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
0177 void *private)
0178 {
0179 struct ea_find *ef = private;
0180
0181 if (ea->ea_type == GFS2_EATYPE_UNUSED)
0182 return 0;
0183
0184 if (ea->ea_type == ef->type) {
0185 if (ea->ea_name_len == ef->namel &&
0186 !memcmp(GFS2_EA2NAME(ea), ef->name, ea->ea_name_len)) {
0187 struct gfs2_ea_location *el = ef->ef_el;
0188 get_bh(bh);
0189 el->el_bh = bh;
0190 el->el_ea = ea;
0191 el->el_prev = prev;
0192 return 1;
0193 }
0194 }
0195
0196 return 0;
0197 }
0198
0199 static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
0200 struct gfs2_ea_location *el)
0201 {
0202 struct ea_find ef;
0203 int error;
0204
0205 ef.type = type;
0206 ef.name = name;
0207 ef.namel = strlen(name);
0208 ef.ef_el = el;
0209
0210 memset(el, 0, sizeof(struct gfs2_ea_location));
0211
0212 error = ea_foreach(ip, ea_find_i, &ef);
0213 if (error > 0)
0214 return 0;
0215
0216 return error;
0217 }
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
0230 struct gfs2_ea_header *ea,
0231 struct gfs2_ea_header *prev, void *private)
0232 {
0233 int *leave = private;
0234 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
0235 struct gfs2_rgrpd *rgd;
0236 struct gfs2_holder rg_gh;
0237 __be64 *dataptrs;
0238 u64 bn = 0;
0239 u64 bstart = 0;
0240 unsigned int blen = 0;
0241 unsigned int blks = 0;
0242 unsigned int x;
0243 int error;
0244
0245 error = gfs2_rindex_update(sdp);
0246 if (error)
0247 return error;
0248
0249 if (GFS2_EA_IS_STUFFED(ea))
0250 return 0;
0251
0252 dataptrs = GFS2_EA2DATAPTRS(ea);
0253 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
0254 if (*dataptrs) {
0255 blks++;
0256 bn = be64_to_cpu(*dataptrs);
0257 }
0258 }
0259 if (!blks)
0260 return 0;
0261
0262 rgd = gfs2_blk2rgrpd(sdp, bn, 1);
0263 if (!rgd) {
0264 gfs2_consist_inode(ip);
0265 return -EIO;
0266 }
0267
0268 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
0269 LM_FLAG_NODE_SCOPE, &rg_gh);
0270 if (error)
0271 return error;
0272
0273 error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE +
0274 RES_EATTR + RES_STATFS + RES_QUOTA, blks);
0275 if (error)
0276 goto out_gunlock;
0277
0278 gfs2_trans_add_meta(ip->i_gl, bh);
0279
0280 dataptrs = GFS2_EA2DATAPTRS(ea);
0281 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
0282 if (!*dataptrs)
0283 break;
0284 bn = be64_to_cpu(*dataptrs);
0285
0286 if (bstart + blen == bn)
0287 blen++;
0288 else {
0289 if (bstart)
0290 gfs2_free_meta(ip, rgd, bstart, blen);
0291 bstart = bn;
0292 blen = 1;
0293 }
0294
0295 *dataptrs = 0;
0296 gfs2_add_inode_blocks(&ip->i_inode, -1);
0297 }
0298 if (bstart)
0299 gfs2_free_meta(ip, rgd, bstart, blen);
0300
0301 if (prev && !leave) {
0302 u32 len;
0303
0304 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
0305 prev->ea_rec_len = cpu_to_be32(len);
0306
0307 if (GFS2_EA_IS_LAST(ea))
0308 prev->ea_flags |= GFS2_EAFLAG_LAST;
0309 } else {
0310 ea->ea_type = GFS2_EATYPE_UNUSED;
0311 ea->ea_num_ptrs = 0;
0312 }
0313
0314 ip->i_inode.i_ctime = current_time(&ip->i_inode);
0315 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
0316
0317 gfs2_trans_end(sdp);
0318
0319 out_gunlock:
0320 gfs2_glock_dq_uninit(&rg_gh);
0321 return error;
0322 }
0323
0324 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
0325 struct gfs2_ea_header *ea,
0326 struct gfs2_ea_header *prev, int leave)
0327 {
0328 int error;
0329
0330 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
0331 if (error)
0332 return error;
0333
0334 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
0335 if (error)
0336 goto out_alloc;
0337
0338 error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
0339
0340 gfs2_quota_unhold(ip);
0341 out_alloc:
0342 return error;
0343 }
0344
0345 struct ea_list {
0346 struct gfs2_ea_request *ei_er;
0347 unsigned int ei_size;
0348 };
0349
0350 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
0351 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
0352 void *private)
0353 {
0354 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
0355 struct ea_list *ei = private;
0356 struct gfs2_ea_request *er = ei->ei_er;
0357 unsigned int ea_size;
0358 char *prefix;
0359 unsigned int l;
0360
0361 if (ea->ea_type == GFS2_EATYPE_UNUSED)
0362 return 0;
0363
0364 BUG_ON(ea->ea_type > GFS2_EATYPE_SECURITY &&
0365 sdp->sd_sb.sb_fs_format == GFS2_FS_FORMAT_MIN);
0366 switch (ea->ea_type) {
0367 case GFS2_EATYPE_USR:
0368 prefix = "user.";
0369 l = 5;
0370 break;
0371 case GFS2_EATYPE_SYS:
0372 prefix = "system.";
0373 l = 7;
0374 break;
0375 case GFS2_EATYPE_SECURITY:
0376 prefix = "security.";
0377 l = 9;
0378 break;
0379 case GFS2_EATYPE_TRUSTED:
0380 prefix = "trusted.";
0381 l = 8;
0382 break;
0383 default:
0384 return 0;
0385 }
0386
0387 ea_size = l + ea->ea_name_len + 1;
0388 if (er->er_data_len) {
0389 if (ei->ei_size + ea_size > er->er_data_len)
0390 return -ERANGE;
0391
0392 memcpy(er->er_data + ei->ei_size, prefix, l);
0393 memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
0394 ea->ea_name_len);
0395 er->er_data[ei->ei_size + ea_size - 1] = 0;
0396 }
0397
0398 ei->ei_size += ea_size;
0399
0400 return 0;
0401 }
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412 ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
0413 {
0414 struct gfs2_inode *ip = GFS2_I(d_inode(dentry));
0415 struct gfs2_ea_request er;
0416 struct gfs2_holder i_gh;
0417 int error;
0418
0419 memset(&er, 0, sizeof(struct gfs2_ea_request));
0420 if (size) {
0421 er.er_data = buffer;
0422 er.er_data_len = size;
0423 }
0424
0425 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
0426 if (error)
0427 return error;
0428
0429 if (ip->i_eattr) {
0430 struct ea_list ei = { .ei_er = &er, .ei_size = 0 };
0431
0432 error = ea_foreach(ip, ea_list_i, &ei);
0433 if (!error)
0434 error = ei.ei_size;
0435 }
0436
0437 gfs2_glock_dq_uninit(&i_gh);
0438
0439 return error;
0440 }
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453 static int gfs2_iter_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
0454 const char *din, char *dout)
0455 {
0456 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
0457 struct buffer_head **bh;
0458 unsigned int amount = GFS2_EA_DATA_LEN(ea);
0459 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
0460 __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
0461 unsigned int x;
0462 int error = 0;
0463 unsigned char *pos;
0464 unsigned cp_size;
0465
0466 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
0467 if (!bh)
0468 return -ENOMEM;
0469
0470 for (x = 0; x < nptrs; x++) {
0471 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0, 0,
0472 bh + x);
0473 if (error) {
0474 while (x--)
0475 brelse(bh[x]);
0476 goto out;
0477 }
0478 dataptrs++;
0479 }
0480
0481 for (x = 0; x < nptrs; x++) {
0482 error = gfs2_meta_wait(sdp, bh[x]);
0483 if (error) {
0484 for (; x < nptrs; x++)
0485 brelse(bh[x]);
0486 goto out;
0487 }
0488 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
0489 for (; x < nptrs; x++)
0490 brelse(bh[x]);
0491 error = -EIO;
0492 goto out;
0493 }
0494
0495 pos = bh[x]->b_data + sizeof(struct gfs2_meta_header);
0496 cp_size = (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize;
0497
0498 if (dout) {
0499 memcpy(dout, pos, cp_size);
0500 dout += sdp->sd_jbsize;
0501 }
0502
0503 if (din) {
0504 gfs2_trans_add_meta(ip->i_gl, bh[x]);
0505 memcpy(pos, din, cp_size);
0506 din += sdp->sd_jbsize;
0507 }
0508
0509 amount -= sdp->sd_jbsize;
0510 brelse(bh[x]);
0511 }
0512
0513 out:
0514 kfree(bh);
0515 return error;
0516 }
0517
0518 static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
0519 char *data, size_t size)
0520 {
0521 int ret;
0522 size_t len = GFS2_EA_DATA_LEN(el->el_ea);
0523 if (len > size)
0524 return -ERANGE;
0525
0526 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
0527 memcpy(data, GFS2_EA2DATA(el->el_ea), len);
0528 return len;
0529 }
0530 ret = gfs2_iter_unstuffed(ip, el->el_ea, NULL, data);
0531 if (ret < 0)
0532 return ret;
0533 return len;
0534 }
0535
0536 int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
0537 {
0538 struct gfs2_ea_location el;
0539 int error;
0540 int len;
0541 char *data;
0542
0543 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el);
0544 if (error)
0545 return error;
0546 if (!el.el_ea)
0547 goto out;
0548 if (!GFS2_EA_DATA_LEN(el.el_ea))
0549 goto out;
0550
0551 len = GFS2_EA_DATA_LEN(el.el_ea);
0552 data = kmalloc(len, GFP_NOFS);
0553 error = -ENOMEM;
0554 if (data == NULL)
0555 goto out;
0556
0557 error = gfs2_ea_get_copy(ip, &el, data, len);
0558 if (error < 0)
0559 kfree(data);
0560 else
0561 *ppdata = data;
0562 out:
0563 brelse(el.el_bh);
0564 return error;
0565 }
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577 static int __gfs2_xattr_get(struct inode *inode, const char *name,
0578 void *buffer, size_t size, int type)
0579 {
0580 struct gfs2_inode *ip = GFS2_I(inode);
0581 struct gfs2_ea_location el;
0582 int error;
0583
0584 if (!ip->i_eattr)
0585 return -ENODATA;
0586 if (strlen(name) > GFS2_EA_MAX_NAME_LEN)
0587 return -EINVAL;
0588
0589 error = gfs2_ea_find(ip, type, name, &el);
0590 if (error)
0591 return error;
0592 if (!el.el_ea)
0593 return -ENODATA;
0594 if (size)
0595 error = gfs2_ea_get_copy(ip, &el, buffer, size);
0596 else
0597 error = GFS2_EA_DATA_LEN(el.el_ea);
0598 brelse(el.el_bh);
0599
0600 return error;
0601 }
0602
0603 static int gfs2_xattr_get(const struct xattr_handler *handler,
0604 struct dentry *unused, struct inode *inode,
0605 const char *name, void *buffer, size_t size)
0606 {
0607 struct gfs2_inode *ip = GFS2_I(inode);
0608 struct gfs2_holder gh;
0609 int ret;
0610
0611
0612
0613 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
0614 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
0615 if (ret)
0616 return ret;
0617 } else {
0618 gfs2_holder_mark_uninitialized(&gh);
0619 }
0620 ret = __gfs2_xattr_get(inode, name, buffer, size, handler->flags);
0621 if (gfs2_holder_initialized(&gh))
0622 gfs2_glock_dq_uninit(&gh);
0623 return ret;
0624 }
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
0635 {
0636 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
0637 struct gfs2_ea_header *ea;
0638 unsigned int n = 1;
0639 u64 block;
0640 int error;
0641
0642 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
0643 if (error)
0644 return error;
0645 gfs2_trans_remove_revoke(sdp, block, 1);
0646 *bhp = gfs2_meta_new(ip->i_gl, block);
0647 gfs2_trans_add_meta(ip->i_gl, *bhp);
0648 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
0649 gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
0650
0651 ea = GFS2_EA_BH2FIRST(*bhp);
0652 ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
0653 ea->ea_type = GFS2_EATYPE_UNUSED;
0654 ea->ea_flags = GFS2_EAFLAG_LAST;
0655 ea->ea_num_ptrs = 0;
0656
0657 gfs2_add_inode_blocks(&ip->i_inode, 1);
0658
0659 return 0;
0660 }
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
0675 struct gfs2_ea_request *er)
0676 {
0677 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
0678 int error;
0679
0680 ea->ea_data_len = cpu_to_be32(er->er_data_len);
0681 ea->ea_name_len = er->er_name_len;
0682 ea->ea_type = er->er_type;
0683 ea->__pad = 0;
0684
0685 memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
0686
0687 if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
0688 ea->ea_num_ptrs = 0;
0689 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
0690 } else {
0691 __be64 *dataptr = GFS2_EA2DATAPTRS(ea);
0692 const char *data = er->er_data;
0693 unsigned int data_len = er->er_data_len;
0694 unsigned int copy;
0695 unsigned int x;
0696
0697 ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
0698 for (x = 0; x < ea->ea_num_ptrs; x++) {
0699 struct buffer_head *bh;
0700 u64 block;
0701 int mh_size = sizeof(struct gfs2_meta_header);
0702 unsigned int n = 1;
0703
0704 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
0705 if (error)
0706 return error;
0707 gfs2_trans_remove_revoke(sdp, block, 1);
0708 bh = gfs2_meta_new(ip->i_gl, block);
0709 gfs2_trans_add_meta(ip->i_gl, bh);
0710 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
0711
0712 gfs2_add_inode_blocks(&ip->i_inode, 1);
0713
0714 copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
0715 data_len;
0716 memcpy(bh->b_data + mh_size, data, copy);
0717 if (copy < sdp->sd_jbsize)
0718 memset(bh->b_data + mh_size + copy, 0,
0719 sdp->sd_jbsize - copy);
0720
0721 *dataptr++ = cpu_to_be64(bh->b_blocknr);
0722 data += copy;
0723 data_len -= copy;
0724
0725 brelse(bh);
0726 }
0727
0728 gfs2_assert_withdraw(sdp, !data_len);
0729 }
0730
0731 return 0;
0732 }
0733
0734 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
0735 struct gfs2_ea_request *er, void *private);
0736
0737 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
0738 unsigned int blks,
0739 ea_skeleton_call_t skeleton_call, void *private)
0740 {
0741 struct gfs2_alloc_parms ap = { .target = blks };
0742 int error;
0743
0744 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
0745 if (error)
0746 return error;
0747
0748 error = gfs2_quota_lock_check(ip, &ap);
0749 if (error)
0750 return error;
0751
0752 error = gfs2_inplace_reserve(ip, &ap);
0753 if (error)
0754 goto out_gunlock_q;
0755
0756 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
0757 blks + gfs2_rg_blocks(ip, blks) +
0758 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
0759 if (error)
0760 goto out_ipres;
0761
0762 error = skeleton_call(ip, er, private);
0763 if (error)
0764 goto out_end_trans;
0765
0766 ip->i_inode.i_ctime = current_time(&ip->i_inode);
0767 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
0768
0769 out_end_trans:
0770 gfs2_trans_end(GFS2_SB(&ip->i_inode));
0771 out_ipres:
0772 gfs2_inplace_release(ip);
0773 out_gunlock_q:
0774 gfs2_quota_unlock(ip);
0775 return error;
0776 }
0777
0778 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
0779 void *private)
0780 {
0781 struct buffer_head *bh;
0782 int error;
0783
0784 error = ea_alloc_blk(ip, &bh);
0785 if (error)
0786 return error;
0787
0788 ip->i_eattr = bh->b_blocknr;
0789 error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
0790
0791 brelse(bh);
0792
0793 return error;
0794 }
0795
0796
0797
0798
0799
0800
0801 static int ea_init(struct gfs2_inode *ip, int type, const char *name,
0802 const void *data, size_t size)
0803 {
0804 struct gfs2_ea_request er;
0805 unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
0806 unsigned int blks = 1;
0807
0808 er.er_type = type;
0809 er.er_name = name;
0810 er.er_name_len = strlen(name);
0811 er.er_data = (void *)data;
0812 er.er_data_len = size;
0813
0814 if (GFS2_EAREQ_SIZE_STUFFED(&er) > jbsize)
0815 blks += DIV_ROUND_UP(er.er_data_len, jbsize);
0816
0817 return ea_alloc_skeleton(ip, &er, blks, ea_init_i, NULL);
0818 }
0819
0820 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
0821 {
0822 u32 ea_size = GFS2_EA_SIZE(ea);
0823 struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
0824 ea_size);
0825 u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
0826 int last = ea->ea_flags & GFS2_EAFLAG_LAST;
0827
0828 ea->ea_rec_len = cpu_to_be32(ea_size);
0829 ea->ea_flags ^= last;
0830
0831 new->ea_rec_len = cpu_to_be32(new_size);
0832 new->ea_flags = last;
0833
0834 return new;
0835 }
0836
0837 static void ea_set_remove_stuffed(struct gfs2_inode *ip,
0838 struct gfs2_ea_location *el)
0839 {
0840 struct gfs2_ea_header *ea = el->el_ea;
0841 struct gfs2_ea_header *prev = el->el_prev;
0842 u32 len;
0843
0844 gfs2_trans_add_meta(ip->i_gl, el->el_bh);
0845
0846 if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
0847 ea->ea_type = GFS2_EATYPE_UNUSED;
0848 return;
0849 } else if (GFS2_EA2NEXT(prev) != ea) {
0850 prev = GFS2_EA2NEXT(prev);
0851 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
0852 }
0853
0854 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
0855 prev->ea_rec_len = cpu_to_be32(len);
0856
0857 if (GFS2_EA_IS_LAST(ea))
0858 prev->ea_flags |= GFS2_EAFLAG_LAST;
0859 }
0860
0861 struct ea_set {
0862 int ea_split;
0863
0864 struct gfs2_ea_request *es_er;
0865 struct gfs2_ea_location *es_el;
0866
0867 struct buffer_head *es_bh;
0868 struct gfs2_ea_header *es_ea;
0869 };
0870
0871 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
0872 struct gfs2_ea_header *ea, struct ea_set *es)
0873 {
0874 struct gfs2_ea_request *er = es->es_er;
0875 int error;
0876
0877 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
0878 if (error)
0879 return error;
0880
0881 gfs2_trans_add_meta(ip->i_gl, bh);
0882
0883 if (es->ea_split)
0884 ea = ea_split_ea(ea);
0885
0886 ea_write(ip, ea, er);
0887
0888 if (es->es_el)
0889 ea_set_remove_stuffed(ip, es->es_el);
0890
0891 ip->i_inode.i_ctime = current_time(&ip->i_inode);
0892 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
0893
0894 gfs2_trans_end(GFS2_SB(&ip->i_inode));
0895 return error;
0896 }
0897
0898 static int ea_set_simple_alloc(struct gfs2_inode *ip,
0899 struct gfs2_ea_request *er, void *private)
0900 {
0901 struct ea_set *es = private;
0902 struct gfs2_ea_header *ea = es->es_ea;
0903 int error;
0904
0905 gfs2_trans_add_meta(ip->i_gl, es->es_bh);
0906
0907 if (es->ea_split)
0908 ea = ea_split_ea(ea);
0909
0910 error = ea_write(ip, ea, er);
0911 if (error)
0912 return error;
0913
0914 if (es->es_el)
0915 ea_set_remove_stuffed(ip, es->es_el);
0916
0917 return 0;
0918 }
0919
0920 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
0921 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
0922 void *private)
0923 {
0924 struct ea_set *es = private;
0925 unsigned int size;
0926 int stuffed;
0927 int error;
0928
0929 stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er->er_name_len,
0930 es->es_er->er_data_len, &size);
0931
0932 if (ea->ea_type == GFS2_EATYPE_UNUSED) {
0933 if (GFS2_EA_REC_LEN(ea) < size)
0934 return 0;
0935 if (!GFS2_EA_IS_STUFFED(ea)) {
0936 error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
0937 if (error)
0938 return error;
0939 }
0940 es->ea_split = 0;
0941 } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
0942 es->ea_split = 1;
0943 else
0944 return 0;
0945
0946 if (stuffed) {
0947 error = ea_set_simple_noalloc(ip, bh, ea, es);
0948 if (error)
0949 return error;
0950 } else {
0951 unsigned int blks;
0952
0953 es->es_bh = bh;
0954 es->es_ea = ea;
0955 blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
0956 GFS2_SB(&ip->i_inode)->sd_jbsize);
0957
0958 error = ea_alloc_skeleton(ip, es->es_er, blks,
0959 ea_set_simple_alloc, es);
0960 if (error)
0961 return error;
0962 }
0963
0964 return 1;
0965 }
0966
0967 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
0968 void *private)
0969 {
0970 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
0971 struct buffer_head *indbh, *newbh;
0972 __be64 *eablk;
0973 int error;
0974 int mh_size = sizeof(struct gfs2_meta_header);
0975
0976 if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
0977 __be64 *end;
0978
0979 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0,
0980 &indbh);
0981 if (error)
0982 return error;
0983
0984 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
0985 error = -EIO;
0986 goto out;
0987 }
0988
0989 eablk = (__be64 *)(indbh->b_data + mh_size);
0990 end = eablk + sdp->sd_inptrs;
0991
0992 for (; eablk < end; eablk++)
0993 if (!*eablk)
0994 break;
0995
0996 if (eablk == end) {
0997 error = -ENOSPC;
0998 goto out;
0999 }
1000
1001 gfs2_trans_add_meta(ip->i_gl, indbh);
1002 } else {
1003 u64 blk;
1004 unsigned int n = 1;
1005 error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL);
1006 if (error)
1007 return error;
1008 gfs2_trans_remove_revoke(sdp, blk, 1);
1009 indbh = gfs2_meta_new(ip->i_gl, blk);
1010 gfs2_trans_add_meta(ip->i_gl, indbh);
1011 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
1012 gfs2_buffer_clear_tail(indbh, mh_size);
1013
1014 eablk = (__be64 *)(indbh->b_data + mh_size);
1015 *eablk = cpu_to_be64(ip->i_eattr);
1016 ip->i_eattr = blk;
1017 ip->i_diskflags |= GFS2_DIF_EA_INDIRECT;
1018 gfs2_add_inode_blocks(&ip->i_inode, 1);
1019
1020 eablk++;
1021 }
1022
1023 error = ea_alloc_blk(ip, &newbh);
1024 if (error)
1025 goto out;
1026
1027 *eablk = cpu_to_be64((u64)newbh->b_blocknr);
1028 error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
1029 brelse(newbh);
1030 if (error)
1031 goto out;
1032
1033 if (private)
1034 ea_set_remove_stuffed(ip, private);
1035
1036 out:
1037 brelse(indbh);
1038 return error;
1039 }
1040
1041 static int ea_set_i(struct gfs2_inode *ip, int type, const char *name,
1042 const void *value, size_t size, struct gfs2_ea_location *el)
1043 {
1044 struct gfs2_ea_request er;
1045 struct ea_set es;
1046 unsigned int blks = 2;
1047 int error;
1048
1049 er.er_type = type;
1050 er.er_name = name;
1051 er.er_data = (void *)value;
1052 er.er_name_len = strlen(name);
1053 er.er_data_len = size;
1054
1055 memset(&es, 0, sizeof(struct ea_set));
1056 es.es_er = &er;
1057 es.es_el = el;
1058
1059 error = ea_foreach(ip, ea_set_simple, &es);
1060 if (error > 0)
1061 return 0;
1062 if (error)
1063 return error;
1064
1065 if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT))
1066 blks++;
1067 if (GFS2_EAREQ_SIZE_STUFFED(&er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
1068 blks += DIV_ROUND_UP(er.er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
1069
1070 return ea_alloc_skeleton(ip, &er, blks, ea_set_block, el);
1071 }
1072
1073 static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1074 struct gfs2_ea_location *el)
1075 {
1076 if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1077 el->el_prev = GFS2_EA2NEXT(el->el_prev);
1078 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
1079 GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1080 }
1081
1082 return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev, 0);
1083 }
1084
1085 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1086 {
1087 struct gfs2_ea_header *ea = el->el_ea;
1088 struct gfs2_ea_header *prev = el->el_prev;
1089 int error;
1090
1091 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1092 if (error)
1093 return error;
1094
1095 gfs2_trans_add_meta(ip->i_gl, el->el_bh);
1096
1097 if (prev) {
1098 u32 len;
1099
1100 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1101 prev->ea_rec_len = cpu_to_be32(len);
1102
1103 if (GFS2_EA_IS_LAST(ea))
1104 prev->ea_flags |= GFS2_EAFLAG_LAST;
1105 } else {
1106 ea->ea_type = GFS2_EATYPE_UNUSED;
1107 }
1108
1109 ip->i_inode.i_ctime = current_time(&ip->i_inode);
1110 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
1111
1112 gfs2_trans_end(GFS2_SB(&ip->i_inode));
1113
1114 return error;
1115 }
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130 static int gfs2_xattr_remove(struct gfs2_inode *ip, int type, const char *name)
1131 {
1132 struct gfs2_ea_location el;
1133 int error;
1134
1135 if (!ip->i_eattr)
1136 return -ENODATA;
1137
1138 error = gfs2_ea_find(ip, type, name, &el);
1139 if (error)
1140 return error;
1141 if (!el.el_ea)
1142 return -ENODATA;
1143
1144 if (GFS2_EA_IS_STUFFED(el.el_ea))
1145 error = ea_remove_stuffed(ip, &el);
1146 else
1147 error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev, 0);
1148
1149 brelse(el.el_bh);
1150
1151 return error;
1152 }
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168 int __gfs2_xattr_set(struct inode *inode, const char *name,
1169 const void *value, size_t size, int flags, int type)
1170 {
1171 struct gfs2_inode *ip = GFS2_I(inode);
1172 struct gfs2_sbd *sdp = GFS2_SB(inode);
1173 struct gfs2_ea_location el;
1174 unsigned int namel = strlen(name);
1175 int error;
1176
1177 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
1178 return -EPERM;
1179 if (namel > GFS2_EA_MAX_NAME_LEN)
1180 return -ERANGE;
1181
1182 if (value == NULL) {
1183 error = gfs2_xattr_remove(ip, type, name);
1184 if (error == -ENODATA && !(flags & XATTR_REPLACE))
1185 error = 0;
1186 return error;
1187 }
1188
1189 if (ea_check_size(sdp, namel, size))
1190 return -ERANGE;
1191
1192 if (!ip->i_eattr) {
1193 if (flags & XATTR_REPLACE)
1194 return -ENODATA;
1195 return ea_init(ip, type, name, value, size);
1196 }
1197
1198 error = gfs2_ea_find(ip, type, name, &el);
1199 if (error)
1200 return error;
1201
1202 if (el.el_ea) {
1203 if (ip->i_diskflags & GFS2_DIF_APPENDONLY) {
1204 brelse(el.el_bh);
1205 return -EPERM;
1206 }
1207
1208 error = -EEXIST;
1209 if (!(flags & XATTR_CREATE)) {
1210 int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1211 error = ea_set_i(ip, type, name, value, size, &el);
1212 if (!error && unstuffed)
1213 ea_set_remove_unstuffed(ip, &el);
1214 }
1215
1216 brelse(el.el_bh);
1217 return error;
1218 }
1219
1220 error = -ENODATA;
1221 if (!(flags & XATTR_REPLACE))
1222 error = ea_set_i(ip, type, name, value, size, NULL);
1223
1224 return error;
1225 }
1226
1227 static int gfs2_xattr_set(const struct xattr_handler *handler,
1228 struct user_namespace *mnt_userns,
1229 struct dentry *unused, struct inode *inode,
1230 const char *name, const void *value,
1231 size_t size, int flags)
1232 {
1233 struct gfs2_inode *ip = GFS2_I(inode);
1234 struct gfs2_holder gh;
1235 int ret;
1236
1237 ret = gfs2_qa_get(ip);
1238 if (ret)
1239 return ret;
1240
1241
1242
1243 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
1244 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
1245 if (ret)
1246 goto out;
1247 } else {
1248 if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE)) {
1249 ret = -EIO;
1250 goto out;
1251 }
1252 gfs2_holder_mark_uninitialized(&gh);
1253 }
1254 ret = __gfs2_xattr_set(inode, name, value, size, flags, handler->flags);
1255 if (gfs2_holder_initialized(&gh))
1256 gfs2_glock_dq_uninit(&gh);
1257 out:
1258 gfs2_qa_put(ip);
1259 return ret;
1260 }
1261
1262 static int ea_dealloc_indirect(struct gfs2_inode *ip)
1263 {
1264 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1265 struct gfs2_rgrp_list rlist;
1266 struct gfs2_rgrpd *rgd;
1267 struct buffer_head *indbh, *dibh;
1268 __be64 *eablk, *end;
1269 unsigned int rg_blocks = 0;
1270 u64 bstart = 0;
1271 unsigned int blen = 0;
1272 unsigned int blks = 0;
1273 unsigned int x;
1274 int error;
1275
1276 error = gfs2_rindex_update(sdp);
1277 if (error)
1278 return error;
1279
1280 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1281
1282 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, &indbh);
1283 if (error)
1284 return error;
1285
1286 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1287 error = -EIO;
1288 goto out;
1289 }
1290
1291 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1292 end = eablk + sdp->sd_inptrs;
1293
1294 for (; eablk < end; eablk++) {
1295 u64 bn;
1296
1297 if (!*eablk)
1298 break;
1299 bn = be64_to_cpu(*eablk);
1300
1301 if (bstart + blen == bn)
1302 blen++;
1303 else {
1304 if (bstart)
1305 gfs2_rlist_add(ip, &rlist, bstart);
1306 bstart = bn;
1307 blen = 1;
1308 }
1309 blks++;
1310 }
1311 if (bstart)
1312 gfs2_rlist_add(ip, &rlist, bstart);
1313 else
1314 goto out;
1315
1316 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE);
1317
1318 for (x = 0; x < rlist.rl_rgrps; x++) {
1319 rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
1320 rg_blocks += rgd->rd_length;
1321 }
1322
1323 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1324 if (error)
1325 goto out_rlist_free;
1326
1327 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
1328 RES_STATFS + RES_QUOTA, blks);
1329 if (error)
1330 goto out_gunlock;
1331
1332 gfs2_trans_add_meta(ip->i_gl, indbh);
1333
1334 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1335 bstart = 0;
1336 rgd = NULL;
1337 blen = 0;
1338
1339 for (; eablk < end; eablk++) {
1340 u64 bn;
1341
1342 if (!*eablk)
1343 break;
1344 bn = be64_to_cpu(*eablk);
1345
1346 if (bstart + blen == bn)
1347 blen++;
1348 else {
1349 if (bstart)
1350 gfs2_free_meta(ip, rgd, bstart, blen);
1351 bstart = bn;
1352 rgd = gfs2_blk2rgrpd(sdp, bstart, true);
1353 blen = 1;
1354 }
1355
1356 *eablk = 0;
1357 gfs2_add_inode_blocks(&ip->i_inode, -1);
1358 }
1359 if (bstart)
1360 gfs2_free_meta(ip, rgd, bstart, blen);
1361
1362 ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT;
1363
1364 error = gfs2_meta_inode_buffer(ip, &dibh);
1365 if (!error) {
1366 gfs2_trans_add_meta(ip->i_gl, dibh);
1367 gfs2_dinode_out(ip, dibh->b_data);
1368 brelse(dibh);
1369 }
1370
1371 gfs2_trans_end(sdp);
1372
1373 out_gunlock:
1374 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1375 out_rlist_free:
1376 gfs2_rlist_free(&rlist);
1377 out:
1378 brelse(indbh);
1379 return error;
1380 }
1381
1382 static int ea_dealloc_block(struct gfs2_inode *ip)
1383 {
1384 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1385 struct gfs2_rgrpd *rgd;
1386 struct buffer_head *dibh;
1387 struct gfs2_holder gh;
1388 int error;
1389
1390 error = gfs2_rindex_update(sdp);
1391 if (error)
1392 return error;
1393
1394 rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr, 1);
1395 if (!rgd) {
1396 gfs2_consist_inode(ip);
1397 return -EIO;
1398 }
1399
1400 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1401 LM_FLAG_NODE_SCOPE, &gh);
1402 if (error)
1403 return error;
1404
1405 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
1406 RES_QUOTA, 1);
1407 if (error)
1408 goto out_gunlock;
1409
1410 gfs2_free_meta(ip, rgd, ip->i_eattr, 1);
1411
1412 ip->i_eattr = 0;
1413 gfs2_add_inode_blocks(&ip->i_inode, -1);
1414
1415 error = gfs2_meta_inode_buffer(ip, &dibh);
1416 if (!error) {
1417 gfs2_trans_add_meta(ip->i_gl, dibh);
1418 gfs2_dinode_out(ip, dibh->b_data);
1419 brelse(dibh);
1420 }
1421
1422 gfs2_trans_end(sdp);
1423
1424 out_gunlock:
1425 gfs2_glock_dq_uninit(&gh);
1426 return error;
1427 }
1428
1429
1430
1431
1432
1433
1434
1435
1436 int gfs2_ea_dealloc(struct gfs2_inode *ip)
1437 {
1438 int error;
1439
1440 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
1441 if (error)
1442 return error;
1443
1444 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1445 if (error)
1446 return error;
1447
1448 error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1449 if (error)
1450 goto out_quota;
1451
1452 if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
1453 error = ea_dealloc_indirect(ip);
1454 if (error)
1455 goto out_quota;
1456 }
1457
1458 error = ea_dealloc_block(ip);
1459
1460 out_quota:
1461 gfs2_quota_unhold(ip);
1462 return error;
1463 }
1464
1465 static const struct xattr_handler gfs2_xattr_user_handler = {
1466 .prefix = XATTR_USER_PREFIX,
1467 .flags = GFS2_EATYPE_USR,
1468 .get = gfs2_xattr_get,
1469 .set = gfs2_xattr_set,
1470 };
1471
1472 static const struct xattr_handler gfs2_xattr_security_handler = {
1473 .prefix = XATTR_SECURITY_PREFIX,
1474 .flags = GFS2_EATYPE_SECURITY,
1475 .get = gfs2_xattr_get,
1476 .set = gfs2_xattr_set,
1477 };
1478
1479 static bool
1480 gfs2_xattr_trusted_list(struct dentry *dentry)
1481 {
1482 return capable(CAP_SYS_ADMIN);
1483 }
1484
1485 static const struct xattr_handler gfs2_xattr_trusted_handler = {
1486 .prefix = XATTR_TRUSTED_PREFIX,
1487 .flags = GFS2_EATYPE_TRUSTED,
1488 .list = gfs2_xattr_trusted_list,
1489 .get = gfs2_xattr_get,
1490 .set = gfs2_xattr_set,
1491 };
1492
1493 const struct xattr_handler *gfs2_xattr_handlers_max[] = {
1494
1495 &gfs2_xattr_trusted_handler,
1496
1497
1498 &gfs2_xattr_user_handler,
1499 &gfs2_xattr_security_handler,
1500 &posix_acl_access_xattr_handler,
1501 &posix_acl_default_xattr_handler,
1502 NULL,
1503 };
1504
1505 const struct xattr_handler **gfs2_xattr_handlers_min = gfs2_xattr_handlers_max + 1;