0001
0002
0003
0004
0005
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_format.h"
0009 #include "xfs_log_format.h"
0010 #include "xfs_shared.h"
0011 #include "xfs_trans_resv.h"
0012 #include "xfs_bit.h"
0013 #include "xfs_mount.h"
0014 #include "xfs_defer.h"
0015 #include "xfs_inode.h"
0016 #include "xfs_bmap.h"
0017 #include "xfs_quota.h"
0018 #include "xfs_trans.h"
0019 #include "xfs_buf_item.h"
0020 #include "xfs_trans_space.h"
0021 #include "xfs_trans_priv.h"
0022 #include "xfs_qm.h"
0023 #include "xfs_trace.h"
0024 #include "xfs_log.h"
0025 #include "xfs_bmap_btree.h"
0026 #include "xfs_error.h"
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041 struct kmem_cache *xfs_dqtrx_cache;
0042 static struct kmem_cache *xfs_dquot_cache;
0043
0044 static struct lock_class_key xfs_dquot_group_class;
0045 static struct lock_class_key xfs_dquot_project_class;
0046
0047
0048
0049
0050 void
0051 xfs_qm_dqdestroy(
0052 struct xfs_dquot *dqp)
0053 {
0054 ASSERT(list_empty(&dqp->q_lru));
0055
0056 kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
0057 mutex_destroy(&dqp->q_qlock);
0058
0059 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
0060 kmem_cache_free(xfs_dquot_cache, dqp);
0061 }
0062
0063
0064
0065
0066
0067
0068 void
0069 xfs_qm_adjust_dqlimits(
0070 struct xfs_dquot *dq)
0071 {
0072 struct xfs_mount *mp = dq->q_mount;
0073 struct xfs_quotainfo *q = mp->m_quotainfo;
0074 struct xfs_def_quota *defq;
0075 int prealloc = 0;
0076
0077 ASSERT(dq->q_id);
0078 defq = xfs_get_defquota(q, xfs_dquot_type(dq));
0079
0080 if (!dq->q_blk.softlimit) {
0081 dq->q_blk.softlimit = defq->blk.soft;
0082 prealloc = 1;
0083 }
0084 if (!dq->q_blk.hardlimit) {
0085 dq->q_blk.hardlimit = defq->blk.hard;
0086 prealloc = 1;
0087 }
0088 if (!dq->q_ino.softlimit)
0089 dq->q_ino.softlimit = defq->ino.soft;
0090 if (!dq->q_ino.hardlimit)
0091 dq->q_ino.hardlimit = defq->ino.hard;
0092 if (!dq->q_rtb.softlimit)
0093 dq->q_rtb.softlimit = defq->rtb.soft;
0094 if (!dq->q_rtb.hardlimit)
0095 dq->q_rtb.hardlimit = defq->rtb.hard;
0096
0097 if (prealloc)
0098 xfs_dquot_set_prealloc_limits(dq);
0099 }
0100
0101
0102 time64_t
0103 xfs_dquot_set_timeout(
0104 struct xfs_mount *mp,
0105 time64_t timeout)
0106 {
0107 struct xfs_quotainfo *qi = mp->m_quotainfo;
0108
0109 return clamp_t(time64_t, timeout, qi->qi_expiry_min,
0110 qi->qi_expiry_max);
0111 }
0112
0113
0114 time64_t
0115 xfs_dquot_set_grace_period(
0116 time64_t grace)
0117 {
0118 return clamp_t(time64_t, grace, XFS_DQ_GRACE_MIN, XFS_DQ_GRACE_MAX);
0119 }
0120
0121
0122
0123
0124
0125 static inline void
0126 xfs_qm_adjust_res_timer(
0127 struct xfs_mount *mp,
0128 struct xfs_dquot_res *res,
0129 struct xfs_quota_limits *qlim)
0130 {
0131 ASSERT(res->hardlimit == 0 || res->softlimit <= res->hardlimit);
0132
0133 if ((res->softlimit && res->count > res->softlimit) ||
0134 (res->hardlimit && res->count > res->hardlimit)) {
0135 if (res->timer == 0)
0136 res->timer = xfs_dquot_set_timeout(mp,
0137 ktime_get_real_seconds() + qlim->time);
0138 } else {
0139 res->timer = 0;
0140 }
0141 }
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156 void
0157 xfs_qm_adjust_dqtimers(
0158 struct xfs_dquot *dq)
0159 {
0160 struct xfs_mount *mp = dq->q_mount;
0161 struct xfs_quotainfo *qi = mp->m_quotainfo;
0162 struct xfs_def_quota *defq;
0163
0164 ASSERT(dq->q_id);
0165 defq = xfs_get_defquota(qi, xfs_dquot_type(dq));
0166
0167 xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_blk, &defq->blk);
0168 xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_ino, &defq->ino);
0169 xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_rtb, &defq->rtb);
0170 }
0171
0172
0173
0174
0175 STATIC void
0176 xfs_qm_init_dquot_blk(
0177 struct xfs_trans *tp,
0178 struct xfs_mount *mp,
0179 xfs_dqid_t id,
0180 xfs_dqtype_t type,
0181 struct xfs_buf *bp)
0182 {
0183 struct xfs_quotainfo *q = mp->m_quotainfo;
0184 struct xfs_dqblk *d;
0185 xfs_dqid_t curid;
0186 unsigned int qflag;
0187 unsigned int blftype;
0188 int i;
0189
0190 ASSERT(tp);
0191 ASSERT(xfs_buf_islocked(bp));
0192
0193 switch (type) {
0194 case XFS_DQTYPE_USER:
0195 qflag = XFS_UQUOTA_CHKD;
0196 blftype = XFS_BLF_UDQUOT_BUF;
0197 break;
0198 case XFS_DQTYPE_PROJ:
0199 qflag = XFS_PQUOTA_CHKD;
0200 blftype = XFS_BLF_PDQUOT_BUF;
0201 break;
0202 case XFS_DQTYPE_GROUP:
0203 qflag = XFS_GQUOTA_CHKD;
0204 blftype = XFS_BLF_GDQUOT_BUF;
0205 break;
0206 default:
0207 ASSERT(0);
0208 return;
0209 }
0210
0211 d = bp->b_addr;
0212
0213
0214
0215
0216 curid = id - (id % q->qi_dqperchunk);
0217 memset(d, 0, BBTOB(q->qi_dqchunklen));
0218 for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
0219 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
0220 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
0221 d->dd_diskdq.d_id = cpu_to_be32(curid);
0222 d->dd_diskdq.d_type = type;
0223 if (curid > 0 && xfs_has_bigtime(mp))
0224 d->dd_diskdq.d_type |= XFS_DQTYPE_BIGTIME;
0225 if (xfs_has_crc(mp)) {
0226 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
0227 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
0228 XFS_DQUOT_CRC_OFF);
0229 }
0230 }
0231
0232 xfs_trans_dquot_buf(tp, bp, blftype);
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250 if (!(mp->m_qflags & qflag))
0251 xfs_trans_ordered_buf(tp, bp);
0252 else
0253 xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
0254 }
0255
0256
0257
0258
0259
0260
0261 void
0262 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
0263 {
0264 uint64_t space;
0265
0266 dqp->q_prealloc_hi_wmark = dqp->q_blk.hardlimit;
0267 dqp->q_prealloc_lo_wmark = dqp->q_blk.softlimit;
0268 if (!dqp->q_prealloc_lo_wmark) {
0269 dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
0270 do_div(dqp->q_prealloc_lo_wmark, 100);
0271 dqp->q_prealloc_lo_wmark *= 95;
0272 }
0273
0274 space = dqp->q_prealloc_hi_wmark;
0275
0276 do_div(space, 100);
0277 dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
0278 dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
0279 dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
0280 }
0281
0282
0283
0284
0285
0286
0287 STATIC int
0288 xfs_dquot_disk_alloc(
0289 struct xfs_dquot *dqp,
0290 struct xfs_buf **bpp)
0291 {
0292 struct xfs_bmbt_irec map;
0293 struct xfs_trans *tp;
0294 struct xfs_mount *mp = dqp->q_mount;
0295 struct xfs_buf *bp;
0296 xfs_dqtype_t qtype = xfs_dquot_type(dqp);
0297 struct xfs_inode *quotip = xfs_quota_inode(mp, qtype);
0298 int nmaps = 1;
0299 int error;
0300
0301 trace_xfs_dqalloc(dqp);
0302
0303 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
0304 XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
0305 if (error)
0306 return error;
0307
0308 xfs_ilock(quotip, XFS_ILOCK_EXCL);
0309 xfs_trans_ijoin(tp, quotip, 0);
0310
0311 if (!xfs_this_quota_on(dqp->q_mount, qtype)) {
0312
0313
0314
0315
0316 error = -ESRCH;
0317 goto err_cancel;
0318 }
0319
0320 error = xfs_iext_count_may_overflow(quotip, XFS_DATA_FORK,
0321 XFS_IEXT_ADD_NOSPLIT_CNT);
0322 if (error == -EFBIG)
0323 error = xfs_iext_count_upgrade(tp, quotip,
0324 XFS_IEXT_ADD_NOSPLIT_CNT);
0325 if (error)
0326 goto err_cancel;
0327
0328
0329 error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset,
0330 XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0, &map,
0331 &nmaps);
0332 if (error)
0333 goto err_cancel;
0334
0335 ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
0336 ASSERT(nmaps == 1);
0337 ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
0338 (map.br_startblock != HOLESTARTBLOCK));
0339
0340
0341
0342
0343 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
0344
0345
0346 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
0347 mp->m_quotainfo->qi_dqchunklen, 0, &bp);
0348 if (error)
0349 goto err_cancel;
0350 bp->b_ops = &xfs_dquot_buf_ops;
0351
0352
0353
0354
0355
0356 xfs_qm_init_dquot_blk(tp, mp, dqp->q_id, qtype, bp);
0357 xfs_buf_set_ref(bp, XFS_DQUOT_REF);
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383 xfs_trans_bhold(tp, bp);
0384 error = xfs_trans_commit(tp);
0385 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
0386 if (error) {
0387 xfs_buf_relse(bp);
0388 return error;
0389 }
0390
0391 *bpp = bp;
0392 return 0;
0393
0394 err_cancel:
0395 xfs_trans_cancel(tp);
0396 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
0397 return error;
0398 }
0399
0400
0401
0402
0403
0404 STATIC int
0405 xfs_dquot_disk_read(
0406 struct xfs_mount *mp,
0407 struct xfs_dquot *dqp,
0408 struct xfs_buf **bpp)
0409 {
0410 struct xfs_bmbt_irec map;
0411 struct xfs_buf *bp;
0412 xfs_dqtype_t qtype = xfs_dquot_type(dqp);
0413 struct xfs_inode *quotip = xfs_quota_inode(mp, qtype);
0414 uint lock_mode;
0415 int nmaps = 1;
0416 int error;
0417
0418 lock_mode = xfs_ilock_data_map_shared(quotip);
0419 if (!xfs_this_quota_on(mp, qtype)) {
0420
0421
0422
0423
0424 xfs_iunlock(quotip, lock_mode);
0425 return -ESRCH;
0426 }
0427
0428
0429
0430
0431 error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
0432 XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
0433 xfs_iunlock(quotip, lock_mode);
0434 if (error)
0435 return error;
0436
0437 ASSERT(nmaps == 1);
0438 ASSERT(map.br_blockcount >= 1);
0439 ASSERT(map.br_startblock != DELAYSTARTBLOCK);
0440 if (map.br_startblock == HOLESTARTBLOCK)
0441 return -ENOENT;
0442
0443 trace_xfs_dqtobp_read(dqp);
0444
0445
0446
0447
0448
0449 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
0450
0451 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
0452 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
0453 &xfs_dquot_buf_ops);
0454 if (error) {
0455 ASSERT(bp == NULL);
0456 return error;
0457 }
0458
0459 ASSERT(xfs_buf_islocked(bp));
0460 xfs_buf_set_ref(bp, XFS_DQUOT_REF);
0461 *bpp = bp;
0462
0463 return 0;
0464 }
0465
0466
0467 STATIC struct xfs_dquot *
0468 xfs_dquot_alloc(
0469 struct xfs_mount *mp,
0470 xfs_dqid_t id,
0471 xfs_dqtype_t type)
0472 {
0473 struct xfs_dquot *dqp;
0474
0475 dqp = kmem_cache_zalloc(xfs_dquot_cache, GFP_KERNEL | __GFP_NOFAIL);
0476
0477 dqp->q_type = type;
0478 dqp->q_id = id;
0479 dqp->q_mount = mp;
0480 INIT_LIST_HEAD(&dqp->q_lru);
0481 mutex_init(&dqp->q_qlock);
0482 init_waitqueue_head(&dqp->q_pinwait);
0483 dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
0484
0485
0486
0487 dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
0488 sizeof(struct xfs_dqblk);
0489
0490
0491
0492
0493
0494
0495 init_completion(&dqp->q_flush);
0496 complete(&dqp->q_flush);
0497
0498
0499
0500
0501
0502 switch (type) {
0503 case XFS_DQTYPE_USER:
0504
0505 break;
0506 case XFS_DQTYPE_GROUP:
0507 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
0508 break;
0509 case XFS_DQTYPE_PROJ:
0510 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
0511 break;
0512 default:
0513 ASSERT(0);
0514 break;
0515 }
0516
0517 xfs_qm_dquot_logitem_init(dqp);
0518
0519 XFS_STATS_INC(mp, xs_qm_dquot);
0520 return dqp;
0521 }
0522
0523
0524 static bool
0525 xfs_dquot_check_type(
0526 struct xfs_dquot *dqp,
0527 struct xfs_disk_dquot *ddqp)
0528 {
0529 uint8_t ddqp_type;
0530 uint8_t dqp_type;
0531
0532 ddqp_type = ddqp->d_type & XFS_DQTYPE_REC_MASK;
0533 dqp_type = xfs_dquot_type(dqp);
0534
0535 if (be32_to_cpu(ddqp->d_id) != dqp->q_id)
0536 return false;
0537
0538
0539
0540
0541
0542
0543 if (xfs_has_crc(dqp->q_mount) ||
0544 dqp_type == XFS_DQTYPE_USER || dqp->q_id != 0)
0545 return ddqp_type == dqp_type;
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556 return ddqp_type == XFS_DQTYPE_GROUP || ddqp_type == XFS_DQTYPE_PROJ;
0557 }
0558
0559
0560 STATIC int
0561 xfs_dquot_from_disk(
0562 struct xfs_dquot *dqp,
0563 struct xfs_buf *bp)
0564 {
0565 struct xfs_disk_dquot *ddqp = bp->b_addr + dqp->q_bufoffset;
0566
0567
0568
0569
0570
0571 if (!xfs_dquot_check_type(dqp, ddqp)) {
0572 xfs_alert_tag(bp->b_mount, XFS_PTAG_VERIFIER_ERROR,
0573 "Metadata corruption detected at %pS, quota %u",
0574 __this_address, dqp->q_id);
0575 xfs_alert(bp->b_mount, "Unmount and run xfs_repair");
0576 return -EFSCORRUPTED;
0577 }
0578
0579
0580 dqp->q_type = ddqp->d_type;
0581 dqp->q_blk.hardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
0582 dqp->q_blk.softlimit = be64_to_cpu(ddqp->d_blk_softlimit);
0583 dqp->q_ino.hardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
0584 dqp->q_ino.softlimit = be64_to_cpu(ddqp->d_ino_softlimit);
0585 dqp->q_rtb.hardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
0586 dqp->q_rtb.softlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
0587
0588 dqp->q_blk.count = be64_to_cpu(ddqp->d_bcount);
0589 dqp->q_ino.count = be64_to_cpu(ddqp->d_icount);
0590 dqp->q_rtb.count = be64_to_cpu(ddqp->d_rtbcount);
0591
0592 dqp->q_blk.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_btimer);
0593 dqp->q_ino.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_itimer);
0594 dqp->q_rtb.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_rtbtimer);
0595
0596
0597
0598
0599
0600 dqp->q_blk.reserved = dqp->q_blk.count;
0601 dqp->q_ino.reserved = dqp->q_ino.count;
0602 dqp->q_rtb.reserved = dqp->q_rtb.count;
0603
0604
0605 xfs_dquot_set_prealloc_limits(dqp);
0606 return 0;
0607 }
0608
0609
0610 void
0611 xfs_dquot_to_disk(
0612 struct xfs_disk_dquot *ddqp,
0613 struct xfs_dquot *dqp)
0614 {
0615 ddqp->d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
0616 ddqp->d_version = XFS_DQUOT_VERSION;
0617 ddqp->d_type = dqp->q_type;
0618 ddqp->d_id = cpu_to_be32(dqp->q_id);
0619 ddqp->d_pad0 = 0;
0620 ddqp->d_pad = 0;
0621
0622 ddqp->d_blk_hardlimit = cpu_to_be64(dqp->q_blk.hardlimit);
0623 ddqp->d_blk_softlimit = cpu_to_be64(dqp->q_blk.softlimit);
0624 ddqp->d_ino_hardlimit = cpu_to_be64(dqp->q_ino.hardlimit);
0625 ddqp->d_ino_softlimit = cpu_to_be64(dqp->q_ino.softlimit);
0626 ddqp->d_rtb_hardlimit = cpu_to_be64(dqp->q_rtb.hardlimit);
0627 ddqp->d_rtb_softlimit = cpu_to_be64(dqp->q_rtb.softlimit);
0628
0629 ddqp->d_bcount = cpu_to_be64(dqp->q_blk.count);
0630 ddqp->d_icount = cpu_to_be64(dqp->q_ino.count);
0631 ddqp->d_rtbcount = cpu_to_be64(dqp->q_rtb.count);
0632
0633 ddqp->d_bwarns = 0;
0634 ddqp->d_iwarns = 0;
0635 ddqp->d_rtbwarns = 0;
0636
0637 ddqp->d_btimer = xfs_dquot_to_disk_ts(dqp, dqp->q_blk.timer);
0638 ddqp->d_itimer = xfs_dquot_to_disk_ts(dqp, dqp->q_ino.timer);
0639 ddqp->d_rtbtimer = xfs_dquot_to_disk_ts(dqp, dqp->q_rtb.timer);
0640 }
0641
0642
0643
0644
0645
0646
0647 static int
0648 xfs_qm_dqread(
0649 struct xfs_mount *mp,
0650 xfs_dqid_t id,
0651 xfs_dqtype_t type,
0652 bool can_alloc,
0653 struct xfs_dquot **dqpp)
0654 {
0655 struct xfs_dquot *dqp;
0656 struct xfs_buf *bp;
0657 int error;
0658
0659 dqp = xfs_dquot_alloc(mp, id, type);
0660 trace_xfs_dqread(dqp);
0661
0662
0663 error = xfs_dquot_disk_read(mp, dqp, &bp);
0664 if (error == -ENOENT && can_alloc)
0665 error = xfs_dquot_disk_alloc(dqp, &bp);
0666 if (error)
0667 goto err;
0668
0669
0670
0671
0672
0673
0674
0675 ASSERT(xfs_buf_islocked(bp));
0676 error = xfs_dquot_from_disk(dqp, bp);
0677 xfs_buf_relse(bp);
0678 if (error)
0679 goto err;
0680
0681 *dqpp = dqp;
0682 return error;
0683
0684 err:
0685 trace_xfs_dqread_fail(dqp);
0686 xfs_qm_dqdestroy(dqp);
0687 *dqpp = NULL;
0688 return error;
0689 }
0690
0691
0692
0693
0694
0695
0696 static int
0697 xfs_dq_get_next_id(
0698 struct xfs_mount *mp,
0699 xfs_dqtype_t type,
0700 xfs_dqid_t *id)
0701 {
0702 struct xfs_inode *quotip = xfs_quota_inode(mp, type);
0703 xfs_dqid_t next_id = *id + 1;
0704 uint lock_flags;
0705 struct xfs_bmbt_irec got;
0706 struct xfs_iext_cursor cur;
0707 xfs_fsblock_t start;
0708 int error = 0;
0709
0710
0711 if (next_id < *id)
0712 return -ENOENT;
0713
0714
0715 if (next_id % mp->m_quotainfo->qi_dqperchunk) {
0716 *id = next_id;
0717 return 0;
0718 }
0719
0720
0721 start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
0722
0723 lock_flags = xfs_ilock_data_map_shared(quotip);
0724 error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK);
0725 if (error)
0726 return error;
0727
0728 if (xfs_iext_lookup_extent(quotip, "ip->i_df, start, &cur, &got)) {
0729
0730 if (got.br_startoff < start)
0731 got.br_startoff = start;
0732 *id = got.br_startoff * mp->m_quotainfo->qi_dqperchunk;
0733 } else {
0734 error = -ENOENT;
0735 }
0736
0737 xfs_iunlock(quotip, lock_flags);
0738
0739 return error;
0740 }
0741
0742
0743
0744
0745
0746 static struct xfs_dquot *
0747 xfs_qm_dqget_cache_lookup(
0748 struct xfs_mount *mp,
0749 struct xfs_quotainfo *qi,
0750 struct radix_tree_root *tree,
0751 xfs_dqid_t id)
0752 {
0753 struct xfs_dquot *dqp;
0754
0755 restart:
0756 mutex_lock(&qi->qi_tree_lock);
0757 dqp = radix_tree_lookup(tree, id);
0758 if (!dqp) {
0759 mutex_unlock(&qi->qi_tree_lock);
0760 XFS_STATS_INC(mp, xs_qm_dqcachemisses);
0761 return NULL;
0762 }
0763
0764 xfs_dqlock(dqp);
0765 if (dqp->q_flags & XFS_DQFLAG_FREEING) {
0766 xfs_dqunlock(dqp);
0767 mutex_unlock(&qi->qi_tree_lock);
0768 trace_xfs_dqget_freeing(dqp);
0769 delay(1);
0770 goto restart;
0771 }
0772
0773 dqp->q_nrefs++;
0774 mutex_unlock(&qi->qi_tree_lock);
0775
0776 trace_xfs_dqget_hit(dqp);
0777 XFS_STATS_INC(mp, xs_qm_dqcachehits);
0778 return dqp;
0779 }
0780
0781
0782
0783
0784
0785
0786
0787 static int
0788 xfs_qm_dqget_cache_insert(
0789 struct xfs_mount *mp,
0790 struct xfs_quotainfo *qi,
0791 struct radix_tree_root *tree,
0792 xfs_dqid_t id,
0793 struct xfs_dquot *dqp)
0794 {
0795 int error;
0796
0797 mutex_lock(&qi->qi_tree_lock);
0798 error = radix_tree_insert(tree, id, dqp);
0799 if (unlikely(error)) {
0800
0801 WARN_ON(error != -EEXIST);
0802 mutex_unlock(&qi->qi_tree_lock);
0803 trace_xfs_dqget_dup(dqp);
0804 return error;
0805 }
0806
0807
0808 xfs_dqlock(dqp);
0809 dqp->q_nrefs = 1;
0810
0811 qi->qi_dquots++;
0812 mutex_unlock(&qi->qi_tree_lock);
0813
0814 return 0;
0815 }
0816
0817
0818 static int
0819 xfs_qm_dqget_checks(
0820 struct xfs_mount *mp,
0821 xfs_dqtype_t type)
0822 {
0823 switch (type) {
0824 case XFS_DQTYPE_USER:
0825 if (!XFS_IS_UQUOTA_ON(mp))
0826 return -ESRCH;
0827 return 0;
0828 case XFS_DQTYPE_GROUP:
0829 if (!XFS_IS_GQUOTA_ON(mp))
0830 return -ESRCH;
0831 return 0;
0832 case XFS_DQTYPE_PROJ:
0833 if (!XFS_IS_PQUOTA_ON(mp))
0834 return -ESRCH;
0835 return 0;
0836 default:
0837 WARN_ON_ONCE(0);
0838 return -EINVAL;
0839 }
0840 }
0841
0842
0843
0844
0845
0846 int
0847 xfs_qm_dqget(
0848 struct xfs_mount *mp,
0849 xfs_dqid_t id,
0850 xfs_dqtype_t type,
0851 bool can_alloc,
0852 struct xfs_dquot **O_dqpp)
0853 {
0854 struct xfs_quotainfo *qi = mp->m_quotainfo;
0855 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
0856 struct xfs_dquot *dqp;
0857 int error;
0858
0859 error = xfs_qm_dqget_checks(mp, type);
0860 if (error)
0861 return error;
0862
0863 restart:
0864 dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
0865 if (dqp) {
0866 *O_dqpp = dqp;
0867 return 0;
0868 }
0869
0870 error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
0871 if (error)
0872 return error;
0873
0874 error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
0875 if (error) {
0876
0877
0878
0879
0880 xfs_qm_dqdestroy(dqp);
0881 XFS_STATS_INC(mp, xs_qm_dquot_dups);
0882 goto restart;
0883 }
0884
0885 trace_xfs_dqget_miss(dqp);
0886 *O_dqpp = dqp;
0887 return 0;
0888 }
0889
0890
0891
0892
0893
0894
0895
0896 int
0897 xfs_qm_dqget_uncached(
0898 struct xfs_mount *mp,
0899 xfs_dqid_t id,
0900 xfs_dqtype_t type,
0901 struct xfs_dquot **dqpp)
0902 {
0903 int error;
0904
0905 error = xfs_qm_dqget_checks(mp, type);
0906 if (error)
0907 return error;
0908
0909 return xfs_qm_dqread(mp, id, type, 0, dqpp);
0910 }
0911
0912
0913 xfs_dqid_t
0914 xfs_qm_id_for_quotatype(
0915 struct xfs_inode *ip,
0916 xfs_dqtype_t type)
0917 {
0918 switch (type) {
0919 case XFS_DQTYPE_USER:
0920 return i_uid_read(VFS_I(ip));
0921 case XFS_DQTYPE_GROUP:
0922 return i_gid_read(VFS_I(ip));
0923 case XFS_DQTYPE_PROJ:
0924 return ip->i_projid;
0925 }
0926 ASSERT(0);
0927 return 0;
0928 }
0929
0930
0931
0932
0933
0934
0935 int
0936 xfs_qm_dqget_inode(
0937 struct xfs_inode *ip,
0938 xfs_dqtype_t type,
0939 bool can_alloc,
0940 struct xfs_dquot **O_dqpp)
0941 {
0942 struct xfs_mount *mp = ip->i_mount;
0943 struct xfs_quotainfo *qi = mp->m_quotainfo;
0944 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
0945 struct xfs_dquot *dqp;
0946 xfs_dqid_t id;
0947 int error;
0948
0949 error = xfs_qm_dqget_checks(mp, type);
0950 if (error)
0951 return error;
0952
0953 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
0954 ASSERT(xfs_inode_dquot(ip, type) == NULL);
0955
0956 id = xfs_qm_id_for_quotatype(ip, type);
0957
0958 restart:
0959 dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
0960 if (dqp) {
0961 *O_dqpp = dqp;
0962 return 0;
0963 }
0964
0965
0966
0967
0968
0969
0970
0971
0972 xfs_iunlock(ip, XFS_ILOCK_EXCL);
0973 error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
0974 xfs_ilock(ip, XFS_ILOCK_EXCL);
0975 if (error)
0976 return error;
0977
0978
0979
0980
0981
0982 if (xfs_this_quota_on(mp, type)) {
0983 struct xfs_dquot *dqp1;
0984
0985 dqp1 = xfs_inode_dquot(ip, type);
0986 if (dqp1) {
0987 xfs_qm_dqdestroy(dqp);
0988 dqp = dqp1;
0989 xfs_dqlock(dqp);
0990 goto dqret;
0991 }
0992 } else {
0993
0994 xfs_qm_dqdestroy(dqp);
0995 return -ESRCH;
0996 }
0997
0998 error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
0999 if (error) {
1000
1001
1002
1003
1004 xfs_qm_dqdestroy(dqp);
1005 XFS_STATS_INC(mp, xs_qm_dquot_dups);
1006 goto restart;
1007 }
1008
1009 dqret:
1010 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1011 trace_xfs_dqget_miss(dqp);
1012 *O_dqpp = dqp;
1013 return 0;
1014 }
1015
1016
1017
1018
1019
1020 int
1021 xfs_qm_dqget_next(
1022 struct xfs_mount *mp,
1023 xfs_dqid_t id,
1024 xfs_dqtype_t type,
1025 struct xfs_dquot **dqpp)
1026 {
1027 struct xfs_dquot *dqp;
1028 int error = 0;
1029
1030 *dqpp = NULL;
1031 for (; !error; error = xfs_dq_get_next_id(mp, type, &id)) {
1032 error = xfs_qm_dqget(mp, id, type, false, &dqp);
1033 if (error == -ENOENT)
1034 continue;
1035 else if (error != 0)
1036 break;
1037
1038 if (!XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
1039 *dqpp = dqp;
1040 return 0;
1041 }
1042
1043 xfs_qm_dqput(dqp);
1044 }
1045
1046 return error;
1047 }
1048
1049
1050
1051
1052
1053
1054
1055 void
1056 xfs_qm_dqput(
1057 struct xfs_dquot *dqp)
1058 {
1059 ASSERT(dqp->q_nrefs > 0);
1060 ASSERT(XFS_DQ_IS_LOCKED(dqp));
1061
1062 trace_xfs_dqput(dqp);
1063
1064 if (--dqp->q_nrefs == 0) {
1065 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
1066 trace_xfs_dqput_free(dqp);
1067
1068 if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
1069 XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
1070 }
1071 xfs_dqunlock(dqp);
1072 }
1073
1074
1075
1076
1077
1078 void
1079 xfs_qm_dqrele(
1080 struct xfs_dquot *dqp)
1081 {
1082 if (!dqp)
1083 return;
1084
1085 trace_xfs_dqrele(dqp);
1086
1087 xfs_dqlock(dqp);
1088
1089
1090
1091
1092
1093
1094 xfs_qm_dqput(dqp);
1095 }
1096
1097
1098
1099
1100
1101
1102
1103
1104 static void
1105 xfs_qm_dqflush_done(
1106 struct xfs_log_item *lip)
1107 {
1108 struct xfs_dq_logitem *qip = (struct xfs_dq_logitem *)lip;
1109 struct xfs_dquot *dqp = qip->qli_dquot;
1110 struct xfs_ail *ailp = lip->li_ailp;
1111 xfs_lsn_t tail_lsn;
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121 if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) &&
1122 ((lip->li_lsn == qip->qli_flush_lsn) ||
1123 test_bit(XFS_LI_FAILED, &lip->li_flags))) {
1124
1125 spin_lock(&ailp->ail_lock);
1126 xfs_clear_li_failed(lip);
1127 if (lip->li_lsn == qip->qli_flush_lsn) {
1128
1129 tail_lsn = xfs_ail_delete_one(ailp, lip);
1130 xfs_ail_update_finish(ailp, tail_lsn);
1131 } else {
1132 spin_unlock(&ailp->ail_lock);
1133 }
1134 }
1135
1136
1137
1138
1139 xfs_dqfunlock(dqp);
1140 }
1141
1142 void
1143 xfs_buf_dquot_iodone(
1144 struct xfs_buf *bp)
1145 {
1146 struct xfs_log_item *lip, *n;
1147
1148 list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
1149 list_del_init(&lip->li_bio_list);
1150 xfs_qm_dqflush_done(lip);
1151 }
1152 }
1153
1154 void
1155 xfs_buf_dquot_io_fail(
1156 struct xfs_buf *bp)
1157 {
1158 struct xfs_log_item *lip;
1159
1160 spin_lock(&bp->b_mount->m_ail->ail_lock);
1161 list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
1162 xfs_set_li_failed(lip, bp);
1163 spin_unlock(&bp->b_mount->m_ail->ail_lock);
1164 }
1165
1166
1167 static xfs_failaddr_t
1168 xfs_qm_dqflush_check(
1169 struct xfs_dquot *dqp)
1170 {
1171 xfs_dqtype_t type = xfs_dquot_type(dqp);
1172
1173 if (type != XFS_DQTYPE_USER &&
1174 type != XFS_DQTYPE_GROUP &&
1175 type != XFS_DQTYPE_PROJ)
1176 return __this_address;
1177
1178 if (dqp->q_id == 0)
1179 return NULL;
1180
1181 if (dqp->q_blk.softlimit && dqp->q_blk.count > dqp->q_blk.softlimit &&
1182 !dqp->q_blk.timer)
1183 return __this_address;
1184
1185 if (dqp->q_ino.softlimit && dqp->q_ino.count > dqp->q_ino.softlimit &&
1186 !dqp->q_ino.timer)
1187 return __this_address;
1188
1189 if (dqp->q_rtb.softlimit && dqp->q_rtb.count > dqp->q_rtb.softlimit &&
1190 !dqp->q_rtb.timer)
1191 return __this_address;
1192
1193
1194 if (dqp->q_type & XFS_DQTYPE_BIGTIME) {
1195 if (!xfs_has_bigtime(dqp->q_mount))
1196 return __this_address;
1197 if (dqp->q_id == 0)
1198 return __this_address;
1199 }
1200
1201 return NULL;
1202 }
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212 int
1213 xfs_qm_dqflush(
1214 struct xfs_dquot *dqp,
1215 struct xfs_buf **bpp)
1216 {
1217 struct xfs_mount *mp = dqp->q_mount;
1218 struct xfs_log_item *lip = &dqp->q_logitem.qli_item;
1219 struct xfs_buf *bp;
1220 struct xfs_dqblk *dqblk;
1221 xfs_failaddr_t fa;
1222 int error;
1223
1224 ASSERT(XFS_DQ_IS_LOCKED(dqp));
1225 ASSERT(!completion_done(&dqp->q_flush));
1226
1227 trace_xfs_dqflush(dqp);
1228
1229 *bpp = NULL;
1230
1231 xfs_qm_dqunpin_wait(dqp);
1232
1233
1234
1235
1236 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1237 mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK,
1238 &bp, &xfs_dquot_buf_ops);
1239 if (error == -EAGAIN)
1240 goto out_unlock;
1241 if (error)
1242 goto out_abort;
1243
1244 fa = xfs_qm_dqflush_check(dqp);
1245 if (fa) {
1246 xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
1247 dqp->q_id, fa);
1248 xfs_buf_relse(bp);
1249 error = -EFSCORRUPTED;
1250 goto out_abort;
1251 }
1252
1253
1254 dqblk = bp->b_addr + dqp->q_bufoffset;
1255 xfs_dquot_to_disk(&dqblk->dd_diskdq, dqp);
1256
1257
1258
1259
1260 dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
1261
1262 xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1263 &dqp->q_logitem.qli_item.li_lsn);
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274 if (xfs_has_crc(mp)) {
1275 dqblk->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1276 xfs_update_cksum((char *)dqblk, sizeof(struct xfs_dqblk),
1277 XFS_DQUOT_CRC_OFF);
1278 }
1279
1280
1281
1282
1283
1284 bp->b_flags |= _XBF_DQUOTS;
1285 list_add_tail(&dqp->q_logitem.qli_item.li_bio_list, &bp->b_li_list);
1286
1287
1288
1289
1290
1291 if (xfs_buf_ispinned(bp)) {
1292 trace_xfs_dqflush_force(dqp);
1293 xfs_log_force(mp, 0);
1294 }
1295
1296 trace_xfs_dqflush_done(dqp);
1297 *bpp = bp;
1298 return 0;
1299
1300 out_abort:
1301 dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
1302 xfs_trans_ail_delete(lip, 0);
1303 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1304 out_unlock:
1305 xfs_dqfunlock(dqp);
1306 return error;
1307 }
1308
1309
1310
1311
1312
1313
1314
1315 void
1316 xfs_dqlock2(
1317 struct xfs_dquot *d1,
1318 struct xfs_dquot *d2)
1319 {
1320 if (d1 && d2) {
1321 ASSERT(d1 != d2);
1322 if (d1->q_id > d2->q_id) {
1323 mutex_lock(&d2->q_qlock);
1324 mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1325 } else {
1326 mutex_lock(&d1->q_qlock);
1327 mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1328 }
1329 } else if (d1) {
1330 mutex_lock(&d1->q_qlock);
1331 } else if (d2) {
1332 mutex_lock(&d2->q_qlock);
1333 }
1334 }
1335
1336 int __init
1337 xfs_qm_init(void)
1338 {
1339 xfs_dquot_cache = kmem_cache_create("xfs_dquot",
1340 sizeof(struct xfs_dquot),
1341 0, 0, NULL);
1342 if (!xfs_dquot_cache)
1343 goto out;
1344
1345 xfs_dqtrx_cache = kmem_cache_create("xfs_dqtrx",
1346 sizeof(struct xfs_dquot_acct),
1347 0, 0, NULL);
1348 if (!xfs_dqtrx_cache)
1349 goto out_free_dquot_cache;
1350
1351 return 0;
1352
1353 out_free_dquot_cache:
1354 kmem_cache_destroy(xfs_dquot_cache);
1355 out:
1356 return -ENOMEM;
1357 }
1358
1359 void
1360 xfs_qm_exit(void)
1361 {
1362 kmem_cache_destroy(xfs_dqtrx_cache);
1363 kmem_cache_destroy(xfs_dquot_cache);
1364 }
1365
1366
1367
1368
1369
1370
1371 int
1372 xfs_qm_dqiterate(
1373 struct xfs_mount *mp,
1374 xfs_dqtype_t type,
1375 xfs_qm_dqiterate_fn iter_fn,
1376 void *priv)
1377 {
1378 struct xfs_dquot *dq;
1379 xfs_dqid_t id = 0;
1380 int error;
1381
1382 do {
1383 error = xfs_qm_dqget_next(mp, id, type, &dq);
1384 if (error == -ENOENT)
1385 return 0;
1386 if (error)
1387 return error;
1388
1389 error = iter_fn(dq, type, priv);
1390 id = dq->q_id;
1391 xfs_qm_dqput(dq);
1392 } while (error == 0 && id != 0);
1393
1394 return error;
1395 }