Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
0004  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
0005  */
0006 
0007 /*
0008  * Quota change tags are associated with each transaction that allocates or
0009  * deallocates space.  Those changes are accumulated locally to each node (in a
0010  * per-node file) and then are periodically synced to the quota file.  This
0011  * avoids the bottleneck of constantly touching the quota file, but introduces
0012  * fuzziness in the current usage value of IDs that are being used on different
0013  * nodes in the cluster simultaneously.  So, it is possible for a user on
0014  * multiple nodes to overrun their quota, but that overrun is controlable.
0015  * Since quota tags are part of transactions, there is no need for a quota check
0016  * program to be run on node crashes or anything like that.
0017  *
0018  * There are couple of knobs that let the administrator manage the quota
0019  * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
0020  * sitting on one node before being synced to the quota file.  (The default is
0021  * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
0022  * of quota file syncs increases as the user moves closer to their limit.  The
0023  * more frequent the syncs, the more accurate the quota enforcement, but that
0024  * means that there is more contention between the nodes for the quota file.
0025  * The default value is one.  This sets the maximum theoretical quota overrun
0026  * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
0027  * practice, the maximum overrun you see should be much less.)  A "quota_scale"
0028  * number greater than one makes quota syncs more frequent and reduces the
0029  * maximum overrun.  Numbers less than one (but greater than zero) make quota
0030  * syncs less frequent.
0031  *
0032  * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
0033  * the quota file, so it is not being constantly read.
0034  */
0035 
0036 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0037 
0038 #include <linux/sched.h>
0039 #include <linux/slab.h>
0040 #include <linux/mm.h>
0041 #include <linux/spinlock.h>
0042 #include <linux/completion.h>
0043 #include <linux/buffer_head.h>
0044 #include <linux/sort.h>
0045 #include <linux/fs.h>
0046 #include <linux/bio.h>
0047 #include <linux/gfs2_ondisk.h>
0048 #include <linux/kthread.h>
0049 #include <linux/freezer.h>
0050 #include <linux/quota.h>
0051 #include <linux/dqblk_xfs.h>
0052 #include <linux/lockref.h>
0053 #include <linux/list_lru.h>
0054 #include <linux/rcupdate.h>
0055 #include <linux/rculist_bl.h>
0056 #include <linux/bit_spinlock.h>
0057 #include <linux/jhash.h>
0058 #include <linux/vmalloc.h>
0059 
0060 #include "gfs2.h"
0061 #include "incore.h"
0062 #include "bmap.h"
0063 #include "glock.h"
0064 #include "glops.h"
0065 #include "log.h"
0066 #include "meta_io.h"
0067 #include "quota.h"
0068 #include "rgrp.h"
0069 #include "super.h"
0070 #include "trans.h"
0071 #include "inode.h"
0072 #include "util.h"
0073 
0074 #define GFS2_QD_HASH_SHIFT      12
0075 #define GFS2_QD_HASH_SIZE       BIT(GFS2_QD_HASH_SHIFT)
0076 #define GFS2_QD_HASH_MASK       (GFS2_QD_HASH_SIZE - 1)
0077 
0078 /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
0079 /*                     -> sd_bitmap_lock                              */
0080 static DEFINE_SPINLOCK(qd_lock);
0081 struct list_lru gfs2_qd_lru;
0082 
0083 static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
0084 
0085 static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
0086                  const struct kqid qid)
0087 {
0088     unsigned int h;
0089 
0090     h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
0091     h = jhash(&qid, sizeof(struct kqid), h);
0092 
0093     return h & GFS2_QD_HASH_MASK;
0094 }
0095 
0096 static inline void spin_lock_bucket(unsigned int hash)
0097 {
0098         hlist_bl_lock(&qd_hash_table[hash]);
0099 }
0100 
0101 static inline void spin_unlock_bucket(unsigned int hash)
0102 {
0103         hlist_bl_unlock(&qd_hash_table[hash]);
0104 }
0105 
0106 static void gfs2_qd_dealloc(struct rcu_head *rcu)
0107 {
0108     struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
0109     kmem_cache_free(gfs2_quotad_cachep, qd);
0110 }
0111 
0112 static void gfs2_qd_dispose(struct list_head *list)
0113 {
0114     struct gfs2_quota_data *qd;
0115     struct gfs2_sbd *sdp;
0116 
0117     while (!list_empty(list)) {
0118         qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
0119         sdp = qd->qd_gl->gl_name.ln_sbd;
0120 
0121         list_del(&qd->qd_lru);
0122 
0123         /* Free from the filesystem-specific list */
0124         spin_lock(&qd_lock);
0125         list_del(&qd->qd_list);
0126         spin_unlock(&qd_lock);
0127 
0128         spin_lock_bucket(qd->qd_hash);
0129         hlist_bl_del_rcu(&qd->qd_hlist);
0130         spin_unlock_bucket(qd->qd_hash);
0131 
0132         gfs2_assert_warn(sdp, !qd->qd_change);
0133         gfs2_assert_warn(sdp, !qd->qd_slot_count);
0134         gfs2_assert_warn(sdp, !qd->qd_bh_count);
0135 
0136         gfs2_glock_put(qd->qd_gl);
0137         atomic_dec(&sdp->sd_quota_count);
0138 
0139         /* Delete it from the common reclaim list */
0140         call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
0141     }
0142 }
0143 
0144 
0145 static enum lru_status gfs2_qd_isolate(struct list_head *item,
0146         struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
0147 {
0148     struct list_head *dispose = arg;
0149     struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
0150 
0151     if (!spin_trylock(&qd->qd_lockref.lock))
0152         return LRU_SKIP;
0153 
0154     if (qd->qd_lockref.count == 0) {
0155         lockref_mark_dead(&qd->qd_lockref);
0156         list_lru_isolate_move(lru, &qd->qd_lru, dispose);
0157     }
0158 
0159     spin_unlock(&qd->qd_lockref.lock);
0160     return LRU_REMOVED;
0161 }
0162 
0163 static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
0164                      struct shrink_control *sc)
0165 {
0166     LIST_HEAD(dispose);
0167     unsigned long freed;
0168 
0169     if (!(sc->gfp_mask & __GFP_FS))
0170         return SHRINK_STOP;
0171 
0172     freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
0173                      gfs2_qd_isolate, &dispose);
0174 
0175     gfs2_qd_dispose(&dispose);
0176 
0177     return freed;
0178 }
0179 
0180 static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
0181                       struct shrink_control *sc)
0182 {
0183     return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
0184 }
0185 
0186 struct shrinker gfs2_qd_shrinker = {
0187     .count_objects = gfs2_qd_shrink_count,
0188     .scan_objects = gfs2_qd_shrink_scan,
0189     .seeks = DEFAULT_SEEKS,
0190     .flags = SHRINKER_NUMA_AWARE,
0191 };
0192 
0193 
0194 static u64 qd2index(struct gfs2_quota_data *qd)
0195 {
0196     struct kqid qid = qd->qd_id;
0197     return (2 * (u64)from_kqid(&init_user_ns, qid)) +
0198         ((qid.type == USRQUOTA) ? 0 : 1);
0199 }
0200 
0201 static u64 qd2offset(struct gfs2_quota_data *qd)
0202 {
0203     u64 offset;
0204 
0205     offset = qd2index(qd);
0206     offset *= sizeof(struct gfs2_quota);
0207 
0208     return offset;
0209 }
0210 
0211 static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
0212 {
0213     struct gfs2_quota_data *qd;
0214     int error;
0215 
0216     qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
0217     if (!qd)
0218         return NULL;
0219 
0220     qd->qd_sbd = sdp;
0221     qd->qd_lockref.count = 1;
0222     spin_lock_init(&qd->qd_lockref.lock);
0223     qd->qd_id = qid;
0224     qd->qd_slot = -1;
0225     INIT_LIST_HEAD(&qd->qd_lru);
0226     qd->qd_hash = hash;
0227 
0228     error = gfs2_glock_get(sdp, qd2index(qd),
0229                   &gfs2_quota_glops, CREATE, &qd->qd_gl);
0230     if (error)
0231         goto fail;
0232 
0233     return qd;
0234 
0235 fail:
0236     kmem_cache_free(gfs2_quotad_cachep, qd);
0237     return NULL;
0238 }
0239 
0240 static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
0241                              const struct gfs2_sbd *sdp,
0242                              struct kqid qid)
0243 {
0244     struct gfs2_quota_data *qd;
0245     struct hlist_bl_node *h;
0246 
0247     hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
0248         if (!qid_eq(qd->qd_id, qid))
0249             continue;
0250         if (qd->qd_sbd != sdp)
0251             continue;
0252         if (lockref_get_not_dead(&qd->qd_lockref)) {
0253             list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
0254             return qd;
0255         }
0256     }
0257 
0258     return NULL;
0259 }
0260 
0261 
0262 static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
0263           struct gfs2_quota_data **qdp)
0264 {
0265     struct gfs2_quota_data *qd, *new_qd;
0266     unsigned int hash = gfs2_qd_hash(sdp, qid);
0267 
0268     rcu_read_lock();
0269     *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
0270     rcu_read_unlock();
0271 
0272     if (qd)
0273         return 0;
0274 
0275     new_qd = qd_alloc(hash, sdp, qid);
0276     if (!new_qd)
0277         return -ENOMEM;
0278 
0279     spin_lock(&qd_lock);
0280     spin_lock_bucket(hash);
0281     *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
0282     if (qd == NULL) {
0283         *qdp = new_qd;
0284         list_add(&new_qd->qd_list, &sdp->sd_quota_list);
0285         hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
0286         atomic_inc(&sdp->sd_quota_count);
0287     }
0288     spin_unlock_bucket(hash);
0289     spin_unlock(&qd_lock);
0290 
0291     if (qd) {
0292         gfs2_glock_put(new_qd->qd_gl);
0293         kmem_cache_free(gfs2_quotad_cachep, new_qd);
0294     }
0295 
0296     return 0;
0297 }
0298 
0299 
0300 static void qd_hold(struct gfs2_quota_data *qd)
0301 {
0302     struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
0303     gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
0304     lockref_get(&qd->qd_lockref);
0305 }
0306 
0307 static void qd_put(struct gfs2_quota_data *qd)
0308 {
0309     if (lockref_put_or_lock(&qd->qd_lockref))
0310         return;
0311 
0312     qd->qd_lockref.count = 0;
0313     list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
0314     spin_unlock(&qd->qd_lockref.lock);
0315 
0316 }
0317 
0318 static int slot_get(struct gfs2_quota_data *qd)
0319 {
0320     struct gfs2_sbd *sdp = qd->qd_sbd;
0321     unsigned int bit;
0322     int error = 0;
0323 
0324     spin_lock(&sdp->sd_bitmap_lock);
0325     if (qd->qd_slot_count != 0)
0326         goto out;
0327 
0328     error = -ENOSPC;
0329     bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
0330     if (bit < sdp->sd_quota_slots) {
0331         set_bit(bit, sdp->sd_quota_bitmap);
0332         qd->qd_slot = bit;
0333         error = 0;
0334 out:
0335         qd->qd_slot_count++;
0336     }
0337     spin_unlock(&sdp->sd_bitmap_lock);
0338 
0339     return error;
0340 }
0341 
0342 static void slot_hold(struct gfs2_quota_data *qd)
0343 {
0344     struct gfs2_sbd *sdp = qd->qd_sbd;
0345 
0346     spin_lock(&sdp->sd_bitmap_lock);
0347     gfs2_assert(sdp, qd->qd_slot_count);
0348     qd->qd_slot_count++;
0349     spin_unlock(&sdp->sd_bitmap_lock);
0350 }
0351 
0352 static void slot_put(struct gfs2_quota_data *qd)
0353 {
0354     struct gfs2_sbd *sdp = qd->qd_sbd;
0355 
0356     spin_lock(&sdp->sd_bitmap_lock);
0357     gfs2_assert(sdp, qd->qd_slot_count);
0358     if (!--qd->qd_slot_count) {
0359         BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
0360         qd->qd_slot = -1;
0361     }
0362     spin_unlock(&sdp->sd_bitmap_lock);
0363 }
0364 
0365 static int bh_get(struct gfs2_quota_data *qd)
0366 {
0367     struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
0368     struct inode *inode = sdp->sd_qc_inode;
0369     struct gfs2_inode *ip = GFS2_I(inode);
0370     unsigned int block, offset;
0371     struct buffer_head *bh;
0372     struct iomap iomap = { };
0373     int error;
0374 
0375     mutex_lock(&sdp->sd_quota_mutex);
0376 
0377     if (qd->qd_bh_count++) {
0378         mutex_unlock(&sdp->sd_quota_mutex);
0379         return 0;
0380     }
0381 
0382     block = qd->qd_slot / sdp->sd_qc_per_block;
0383     offset = qd->qd_slot % sdp->sd_qc_per_block;
0384 
0385     error = gfs2_iomap_get(inode,
0386                    (loff_t)block << inode->i_blkbits,
0387                    i_blocksize(inode), &iomap);
0388     if (error)
0389         goto fail;
0390     error = -ENOENT;
0391     if (iomap.type != IOMAP_MAPPED)
0392         goto fail;
0393 
0394     error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits,
0395                    DIO_WAIT, 0, &bh);
0396     if (error)
0397         goto fail;
0398     error = -EIO;
0399     if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
0400         goto fail_brelse;
0401 
0402     qd->qd_bh = bh;
0403     qd->qd_bh_qc = (struct gfs2_quota_change *)
0404         (bh->b_data + sizeof(struct gfs2_meta_header) +
0405          offset * sizeof(struct gfs2_quota_change));
0406 
0407     mutex_unlock(&sdp->sd_quota_mutex);
0408 
0409     return 0;
0410 
0411 fail_brelse:
0412     brelse(bh);
0413 fail:
0414     qd->qd_bh_count--;
0415     mutex_unlock(&sdp->sd_quota_mutex);
0416     return error;
0417 }
0418 
0419 static void bh_put(struct gfs2_quota_data *qd)
0420 {
0421     struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
0422 
0423     mutex_lock(&sdp->sd_quota_mutex);
0424     gfs2_assert(sdp, qd->qd_bh_count);
0425     if (!--qd->qd_bh_count) {
0426         brelse(qd->qd_bh);
0427         qd->qd_bh = NULL;
0428         qd->qd_bh_qc = NULL;
0429     }
0430     mutex_unlock(&sdp->sd_quota_mutex);
0431 }
0432 
0433 static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
0434              u64 *sync_gen)
0435 {
0436     if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
0437         !test_bit(QDF_CHANGE, &qd->qd_flags) ||
0438         (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
0439         return 0;
0440 
0441     if (!lockref_get_not_dead(&qd->qd_lockref))
0442         return 0;
0443 
0444     list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
0445     set_bit(QDF_LOCKED, &qd->qd_flags);
0446     qd->qd_change_sync = qd->qd_change;
0447     slot_hold(qd);
0448     return 1;
0449 }
0450 
0451 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
0452 {
0453     struct gfs2_quota_data *qd = NULL, *iter;
0454     int error;
0455 
0456     *qdp = NULL;
0457 
0458     if (sb_rdonly(sdp->sd_vfs))
0459         return 0;
0460 
0461     spin_lock(&qd_lock);
0462 
0463     list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
0464         if (qd_check_sync(sdp, iter, &sdp->sd_quota_sync_gen)) {
0465             qd = iter;
0466             break;
0467         }
0468     }
0469 
0470     spin_unlock(&qd_lock);
0471 
0472     if (qd) {
0473         gfs2_assert_warn(sdp, qd->qd_change_sync);
0474         error = bh_get(qd);
0475         if (error) {
0476             clear_bit(QDF_LOCKED, &qd->qd_flags);
0477             slot_put(qd);
0478             qd_put(qd);
0479             return error;
0480         }
0481     }
0482 
0483     *qdp = qd;
0484 
0485     return 0;
0486 }
0487 
0488 static void qd_unlock(struct gfs2_quota_data *qd)
0489 {
0490     gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
0491              test_bit(QDF_LOCKED, &qd->qd_flags));
0492     clear_bit(QDF_LOCKED, &qd->qd_flags);
0493     bh_put(qd);
0494     slot_put(qd);
0495     qd_put(qd);
0496 }
0497 
0498 static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
0499             struct gfs2_quota_data **qdp)
0500 {
0501     int error;
0502 
0503     error = qd_get(sdp, qid, qdp);
0504     if (error)
0505         return error;
0506 
0507     error = slot_get(*qdp);
0508     if (error)
0509         goto fail;
0510 
0511     error = bh_get(*qdp);
0512     if (error)
0513         goto fail_slot;
0514 
0515     return 0;
0516 
0517 fail_slot:
0518     slot_put(*qdp);
0519 fail:
0520     qd_put(*qdp);
0521     return error;
0522 }
0523 
0524 static void qdsb_put(struct gfs2_quota_data *qd)
0525 {
0526     bh_put(qd);
0527     slot_put(qd);
0528     qd_put(qd);
0529 }
0530 
0531 /**
0532  * gfs2_qa_get - make sure we have a quota allocations data structure,
0533  *               if necessary
0534  * @ip: the inode for this reservation
0535  */
0536 int gfs2_qa_get(struct gfs2_inode *ip)
0537 {
0538     struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
0539     struct inode *inode = &ip->i_inode;
0540 
0541     if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
0542         return 0;
0543 
0544     spin_lock(&inode->i_lock);
0545     if (ip->i_qadata == NULL) {
0546         struct gfs2_qadata *tmp;
0547 
0548         spin_unlock(&inode->i_lock);
0549         tmp = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
0550         if (!tmp)
0551             return -ENOMEM;
0552 
0553         spin_lock(&inode->i_lock);
0554         if (ip->i_qadata == NULL)
0555             ip->i_qadata = tmp;
0556         else
0557             kmem_cache_free(gfs2_qadata_cachep, tmp);
0558     }
0559     ip->i_qadata->qa_ref++;
0560     spin_unlock(&inode->i_lock);
0561     return 0;
0562 }
0563 
0564 void gfs2_qa_put(struct gfs2_inode *ip)
0565 {
0566     struct inode *inode = &ip->i_inode;
0567 
0568     spin_lock(&inode->i_lock);
0569     if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
0570         kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
0571         ip->i_qadata = NULL;
0572     }
0573     spin_unlock(&inode->i_lock);
0574 }
0575 
0576 int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
0577 {
0578     struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
0579     struct gfs2_quota_data **qd;
0580     int error;
0581 
0582     if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
0583         return 0;
0584 
0585     error = gfs2_qa_get(ip);
0586     if (error)
0587         return error;
0588 
0589     qd = ip->i_qadata->qa_qd;
0590 
0591     if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
0592         gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
0593         error = -EIO;
0594         goto out;
0595     }
0596 
0597     error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
0598     if (error)
0599         goto out_unhold;
0600     ip->i_qadata->qa_qd_num++;
0601     qd++;
0602 
0603     error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
0604     if (error)
0605         goto out_unhold;
0606     ip->i_qadata->qa_qd_num++;
0607     qd++;
0608 
0609     if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
0610         !uid_eq(uid, ip->i_inode.i_uid)) {
0611         error = qdsb_get(sdp, make_kqid_uid(uid), qd);
0612         if (error)
0613             goto out_unhold;
0614         ip->i_qadata->qa_qd_num++;
0615         qd++;
0616     }
0617 
0618     if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
0619         !gid_eq(gid, ip->i_inode.i_gid)) {
0620         error = qdsb_get(sdp, make_kqid_gid(gid), qd);
0621         if (error)
0622             goto out_unhold;
0623         ip->i_qadata->qa_qd_num++;
0624         qd++;
0625     }
0626 
0627 out_unhold:
0628     if (error)
0629         gfs2_quota_unhold(ip);
0630 out:
0631     return error;
0632 }
0633 
0634 void gfs2_quota_unhold(struct gfs2_inode *ip)
0635 {
0636     struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
0637     u32 x;
0638 
0639     if (ip->i_qadata == NULL)
0640         return;
0641 
0642     gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
0643 
0644     for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
0645         qdsb_put(ip->i_qadata->qa_qd[x]);
0646         ip->i_qadata->qa_qd[x] = NULL;
0647     }
0648     ip->i_qadata->qa_qd_num = 0;
0649     gfs2_qa_put(ip);
0650 }
0651 
0652 static int sort_qd(const void *a, const void *b)
0653 {
0654     const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
0655     const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
0656 
0657     if (qid_lt(qd_a->qd_id, qd_b->qd_id))
0658         return -1;
0659     if (qid_lt(qd_b->qd_id, qd_a->qd_id))
0660         return 1;
0661     return 0;
0662 }
0663 
0664 static void do_qc(struct gfs2_quota_data *qd, s64 change)
0665 {
0666     struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
0667     struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
0668     struct gfs2_quota_change *qc = qd->qd_bh_qc;
0669     s64 x;
0670 
0671     mutex_lock(&sdp->sd_quota_mutex);
0672     gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
0673 
0674     if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
0675         qc->qc_change = 0;
0676         qc->qc_flags = 0;
0677         if (qd->qd_id.type == USRQUOTA)
0678             qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
0679         qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
0680     }
0681 
0682     x = be64_to_cpu(qc->qc_change) + change;
0683     qc->qc_change = cpu_to_be64(x);
0684 
0685     spin_lock(&qd_lock);
0686     qd->qd_change = x;
0687     spin_unlock(&qd_lock);
0688 
0689     if (!x) {
0690         gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
0691         clear_bit(QDF_CHANGE, &qd->qd_flags);
0692         qc->qc_flags = 0;
0693         qc->qc_id = 0;
0694         slot_put(qd);
0695         qd_put(qd);
0696     } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
0697         qd_hold(qd);
0698         slot_hold(qd);
0699     }
0700 
0701     if (change < 0) /* Reset quiet flag if we freed some blocks */
0702         clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
0703     mutex_unlock(&sdp->sd_quota_mutex);
0704 }
0705 
0706 static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
0707                   unsigned off, void *buf, unsigned bytes)
0708 {
0709     struct inode *inode = &ip->i_inode;
0710     struct gfs2_sbd *sdp = GFS2_SB(inode);
0711     struct address_space *mapping = inode->i_mapping;
0712     struct page *page;
0713     struct buffer_head *bh;
0714     void *kaddr;
0715     u64 blk;
0716     unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
0717     unsigned to_write = bytes, pg_off = off;
0718     int done = 0;
0719 
0720     blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
0721     boff = off % bsize;
0722 
0723     page = find_or_create_page(mapping, index, GFP_NOFS);
0724     if (!page)
0725         return -ENOMEM;
0726     if (!page_has_buffers(page))
0727         create_empty_buffers(page, bsize, 0);
0728 
0729     bh = page_buffers(page);
0730     while (!done) {
0731         /* Find the beginning block within the page */
0732         if (pg_off >= ((bnum * bsize) + bsize)) {
0733             bh = bh->b_this_page;
0734             bnum++;
0735             blk++;
0736             continue;
0737         }
0738         if (!buffer_mapped(bh)) {
0739             gfs2_block_map(inode, blk, bh, 1);
0740             if (!buffer_mapped(bh))
0741                 goto unlock_out;
0742             /* If it's a newly allocated disk block, zero it */
0743             if (buffer_new(bh))
0744                 zero_user(page, bnum * bsize, bh->b_size);
0745         }
0746         if (PageUptodate(page))
0747             set_buffer_uptodate(bh);
0748         if (!buffer_uptodate(bh)) {
0749             ll_rw_block(REQ_OP_READ | REQ_META | REQ_PRIO, 1, &bh);
0750             wait_on_buffer(bh);
0751             if (!buffer_uptodate(bh))
0752                 goto unlock_out;
0753         }
0754         if (gfs2_is_jdata(ip))
0755             gfs2_trans_add_data(ip->i_gl, bh);
0756         else
0757             gfs2_ordered_add_inode(ip);
0758 
0759         /* If we need to write to the next block as well */
0760         if (to_write > (bsize - boff)) {
0761             pg_off += (bsize - boff);
0762             to_write -= (bsize - boff);
0763             boff = pg_off % bsize;
0764             continue;
0765         }
0766         done = 1;
0767     }
0768 
0769     /* Write to the page, now that we have setup the buffer(s) */
0770     kaddr = kmap_atomic(page);
0771     memcpy(kaddr + off, buf, bytes);
0772     flush_dcache_page(page);
0773     kunmap_atomic(kaddr);
0774     unlock_page(page);
0775     put_page(page);
0776 
0777     return 0;
0778 
0779 unlock_out:
0780     unlock_page(page);
0781     put_page(page);
0782     return -EIO;
0783 }
0784 
0785 static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
0786                  loff_t loc)
0787 {
0788     unsigned long pg_beg;
0789     unsigned pg_off, nbytes, overflow = 0;
0790     int pg_oflow = 0, error;
0791     void *ptr;
0792 
0793     nbytes = sizeof(struct gfs2_quota);
0794 
0795     pg_beg = loc >> PAGE_SHIFT;
0796     pg_off = offset_in_page(loc);
0797 
0798     /* If the quota straddles a page boundary, split the write in two */
0799     if ((pg_off + nbytes) > PAGE_SIZE) {
0800         pg_oflow = 1;
0801         overflow = (pg_off + nbytes) - PAGE_SIZE;
0802     }
0803 
0804     ptr = qp;
0805     error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr,
0806                        nbytes - overflow);
0807     /* If there's an overflow, write the remaining bytes to the next page */
0808     if (!error && pg_oflow)
0809         error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0,
0810                            ptr + nbytes - overflow,
0811                            overflow);
0812     return error;
0813 }
0814 
0815 /**
0816  * gfs2_adjust_quota - adjust record of current block usage
0817  * @ip: The quota inode
0818  * @loc: Offset of the entry in the quota file
0819  * @change: The amount of usage change to record
0820  * @qd: The quota data
0821  * @fdq: The updated limits to record
0822  *
0823  * This function was mostly borrowed from gfs2_block_truncate_page which was
0824  * in turn mostly borrowed from ext3
0825  *
0826  * Returns: 0 or -ve on error
0827  */
0828 
0829 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
0830                  s64 change, struct gfs2_quota_data *qd,
0831                  struct qc_dqblk *fdq)
0832 {
0833     struct inode *inode = &ip->i_inode;
0834     struct gfs2_sbd *sdp = GFS2_SB(inode);
0835     struct gfs2_quota q;
0836     int err;
0837     u64 size;
0838 
0839     if (gfs2_is_stuffed(ip)) {
0840         err = gfs2_unstuff_dinode(ip);
0841         if (err)
0842             return err;
0843     }
0844 
0845     memset(&q, 0, sizeof(struct gfs2_quota));
0846     err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
0847     if (err < 0)
0848         return err;
0849 
0850     loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
0851     err = -EIO;
0852     be64_add_cpu(&q.qu_value, change);
0853     if (((s64)be64_to_cpu(q.qu_value)) < 0)
0854         q.qu_value = 0; /* Never go negative on quota usage */
0855     qd->qd_qb.qb_value = q.qu_value;
0856     if (fdq) {
0857         if (fdq->d_fieldmask & QC_SPC_SOFT) {
0858             q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
0859             qd->qd_qb.qb_warn = q.qu_warn;
0860         }
0861         if (fdq->d_fieldmask & QC_SPC_HARD) {
0862             q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
0863             qd->qd_qb.qb_limit = q.qu_limit;
0864         }
0865         if (fdq->d_fieldmask & QC_SPACE) {
0866             q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
0867             qd->qd_qb.qb_value = q.qu_value;
0868         }
0869     }
0870 
0871     err = gfs2_write_disk_quota(ip, &q, loc);
0872     if (!err) {
0873         size = loc + sizeof(struct gfs2_quota);
0874         if (size > inode->i_size)
0875             i_size_write(inode, size);
0876         inode->i_mtime = inode->i_atime = current_time(inode);
0877         mark_inode_dirty(inode);
0878         set_bit(QDF_REFRESH, &qd->qd_flags);
0879     }
0880 
0881     return err;
0882 }
0883 
0884 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
0885 {
0886     struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
0887     struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
0888     struct gfs2_alloc_parms ap = { .aflags = 0, };
0889     unsigned int data_blocks, ind_blocks;
0890     struct gfs2_holder *ghs, i_gh;
0891     unsigned int qx, x;
0892     struct gfs2_quota_data *qd;
0893     unsigned reserved;
0894     loff_t offset;
0895     unsigned int nalloc = 0, blocks;
0896     int error;
0897 
0898     error = gfs2_qa_get(ip);
0899     if (error)
0900         return error;
0901 
0902     gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
0903                   &data_blocks, &ind_blocks);
0904 
0905     ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
0906     if (!ghs) {
0907         error = -ENOMEM;
0908         goto out;
0909     }
0910 
0911     sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
0912     inode_lock(&ip->i_inode);
0913     for (qx = 0; qx < num_qd; qx++) {
0914         error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
0915                        GL_NOCACHE, &ghs[qx]);
0916         if (error)
0917             goto out_dq;
0918     }
0919 
0920     error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
0921     if (error)
0922         goto out_dq;
0923 
0924     for (x = 0; x < num_qd; x++) {
0925         offset = qd2offset(qda[x]);
0926         if (gfs2_write_alloc_required(ip, offset,
0927                           sizeof(struct gfs2_quota)))
0928             nalloc++;
0929     }
0930 
0931     /* 
0932      * 1 blk for unstuffing inode if stuffed. We add this extra
0933      * block to the reservation unconditionally. If the inode
0934      * doesn't need unstuffing, the block will be released to the 
0935      * rgrp since it won't be allocated during the transaction
0936      */
0937     /* +3 in the end for unstuffing block, inode size update block
0938      * and another block in case quota straddles page boundary and 
0939      * two blocks need to be updated instead of 1 */
0940     blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
0941 
0942     reserved = 1 + (nalloc * (data_blocks + ind_blocks));
0943     ap.target = reserved;
0944     error = gfs2_inplace_reserve(ip, &ap);
0945     if (error)
0946         goto out_alloc;
0947 
0948     if (nalloc)
0949         blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
0950 
0951     error = gfs2_trans_begin(sdp, blocks, 0);
0952     if (error)
0953         goto out_ipres;
0954 
0955     for (x = 0; x < num_qd; x++) {
0956         qd = qda[x];
0957         offset = qd2offset(qd);
0958         error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
0959         if (error)
0960             goto out_end_trans;
0961 
0962         do_qc(qd, -qd->qd_change_sync);
0963         set_bit(QDF_REFRESH, &qd->qd_flags);
0964     }
0965 
0966     error = 0;
0967 
0968 out_end_trans:
0969     gfs2_trans_end(sdp);
0970 out_ipres:
0971     gfs2_inplace_release(ip);
0972 out_alloc:
0973     gfs2_glock_dq_uninit(&i_gh);
0974 out_dq:
0975     while (qx--)
0976         gfs2_glock_dq_uninit(&ghs[qx]);
0977     inode_unlock(&ip->i_inode);
0978     kfree(ghs);
0979     gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
0980                GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
0981 out:
0982     gfs2_qa_put(ip);
0983     return error;
0984 }
0985 
0986 static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
0987 {
0988     struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
0989     struct gfs2_quota q;
0990     struct gfs2_quota_lvb *qlvb;
0991     loff_t pos;
0992     int error;
0993 
0994     memset(&q, 0, sizeof(struct gfs2_quota));
0995     pos = qd2offset(qd);
0996     error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
0997     if (error < 0)
0998         return error;
0999 
1000     qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1001     qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
1002     qlvb->__pad = 0;
1003     qlvb->qb_limit = q.qu_limit;
1004     qlvb->qb_warn = q.qu_warn;
1005     qlvb->qb_value = q.qu_value;
1006     qd->qd_qb = *qlvb;
1007 
1008     return 0;
1009 }
1010 
1011 static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
1012             struct gfs2_holder *q_gh)
1013 {
1014     struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1015     struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1016     struct gfs2_holder i_gh;
1017     int error;
1018 
1019 restart:
1020     error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
1021     if (error)
1022         return error;
1023 
1024     if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
1025         force_refresh = FORCE;
1026 
1027     qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1028 
1029     if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
1030         gfs2_glock_dq_uninit(q_gh);
1031         error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
1032                        GL_NOCACHE, q_gh);
1033         if (error)
1034             return error;
1035 
1036         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
1037         if (error)
1038             goto fail;
1039 
1040         error = update_qd(sdp, qd);
1041         if (error)
1042             goto fail_gunlock;
1043 
1044         gfs2_glock_dq_uninit(&i_gh);
1045         gfs2_glock_dq_uninit(q_gh);
1046         force_refresh = 0;
1047         goto restart;
1048     }
1049 
1050     return 0;
1051 
1052 fail_gunlock:
1053     gfs2_glock_dq_uninit(&i_gh);
1054 fail:
1055     gfs2_glock_dq_uninit(q_gh);
1056     return error;
1057 }
1058 
1059 int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
1060 {
1061     struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1062     struct gfs2_quota_data *qd;
1063     u32 x;
1064     int error = 0;
1065 
1066     if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1067         return 0;
1068 
1069     error = gfs2_quota_hold(ip, uid, gid);
1070     if (error)
1071         return error;
1072 
1073     sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
1074          sizeof(struct gfs2_quota_data *), sort_qd, NULL);
1075 
1076     for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1077         qd = ip->i_qadata->qa_qd[x];
1078         error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
1079         if (error)
1080             break;
1081     }
1082 
1083     if (!error)
1084         set_bit(GIF_QD_LOCKED, &ip->i_flags);
1085     else {
1086         while (x--)
1087             gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1088         gfs2_quota_unhold(ip);
1089     }
1090 
1091     return error;
1092 }
1093 
1094 static int need_sync(struct gfs2_quota_data *qd)
1095 {
1096     struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1097     struct gfs2_tune *gt = &sdp->sd_tune;
1098     s64 value;
1099     unsigned int num, den;
1100     int do_sync = 1;
1101 
1102     if (!qd->qd_qb.qb_limit)
1103         return 0;
1104 
1105     spin_lock(&qd_lock);
1106     value = qd->qd_change;
1107     spin_unlock(&qd_lock);
1108 
1109     spin_lock(&gt->gt_spin);
1110     num = gt->gt_quota_scale_num;
1111     den = gt->gt_quota_scale_den;
1112     spin_unlock(&gt->gt_spin);
1113 
1114     if (value < 0)
1115         do_sync = 0;
1116     else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
1117          (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1118         do_sync = 0;
1119     else {
1120         value *= gfs2_jindex_size(sdp) * num;
1121         value = div_s64(value, den);
1122         value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
1123         if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1124             do_sync = 0;
1125     }
1126 
1127     return do_sync;
1128 }
1129 
1130 void gfs2_quota_unlock(struct gfs2_inode *ip)
1131 {
1132     struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1133     struct gfs2_quota_data *qda[4];
1134     unsigned int count = 0;
1135     u32 x;
1136     int found;
1137 
1138     if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1139         return;
1140 
1141     for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1142         struct gfs2_quota_data *qd;
1143         int sync;
1144 
1145         qd = ip->i_qadata->qa_qd[x];
1146         sync = need_sync(qd);
1147 
1148         gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1149         if (!sync)
1150             continue;
1151 
1152         spin_lock(&qd_lock);
1153         found = qd_check_sync(sdp, qd, NULL);
1154         spin_unlock(&qd_lock);
1155 
1156         if (!found)
1157             continue;
1158 
1159         gfs2_assert_warn(sdp, qd->qd_change_sync);
1160         if (bh_get(qd)) {
1161             clear_bit(QDF_LOCKED, &qd->qd_flags);
1162             slot_put(qd);
1163             qd_put(qd);
1164             continue;
1165         }
1166 
1167         qda[count++] = qd;
1168     }
1169 
1170     if (count) {
1171         do_sync(count, qda);
1172         for (x = 0; x < count; x++)
1173             qd_unlock(qda[x]);
1174     }
1175 
1176     gfs2_quota_unhold(ip);
1177 }
1178 
1179 #define MAX_LINE 256
1180 
1181 static int print_message(struct gfs2_quota_data *qd, char *type)
1182 {
1183     struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1184 
1185     fs_info(sdp, "quota %s for %s %u\n",
1186         type,
1187         (qd->qd_id.type == USRQUOTA) ? "user" : "group",
1188         from_kqid(&init_user_ns, qd->qd_id));
1189 
1190     return 0;
1191 }
1192 
1193 /**
1194  * gfs2_quota_check - check if allocating new blocks will exceed quota
1195  * @ip:  The inode for which this check is being performed
1196  * @uid: The uid to check against
1197  * @gid: The gid to check against
1198  * @ap:  The allocation parameters. ap->target contains the requested
1199  *       blocks. ap->min_target, if set, contains the minimum blks
1200  *       requested.
1201  *
1202  * Returns: 0 on success.
1203  *                  min_req = ap->min_target ? ap->min_target : ap->target;
1204  *                  quota must allow at least min_req blks for success and
1205  *                  ap->allowed is set to the number of blocks allowed
1206  *
1207  *          -EDQUOT otherwise, quota violation. ap->allowed is set to number
1208  *                  of blocks available.
1209  */
1210 int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
1211              struct gfs2_alloc_parms *ap)
1212 {
1213     struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1214     struct gfs2_quota_data *qd;
1215     s64 value, warn, limit;
1216     u32 x;
1217     int error = 0;
1218 
1219     ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
1220     if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1221         return 0;
1222 
1223     for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1224         qd = ip->i_qadata->qa_qd[x];
1225 
1226         if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1227               qid_eq(qd->qd_id, make_kqid_gid(gid))))
1228             continue;
1229 
1230         warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
1231         limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
1232         value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1233         spin_lock(&qd_lock);
1234         value += qd->qd_change;
1235         spin_unlock(&qd_lock);
1236 
1237         if (limit > 0 && (limit - value) < ap->allowed)
1238             ap->allowed = limit - value;
1239         /* If we can't meet the target */
1240         if (limit && limit < (value + (s64)ap->target)) {
1241             /* If no min_target specified or we don't meet
1242              * min_target, return -EDQUOT */
1243             if (!ap->min_target || ap->min_target > ap->allowed) {
1244                 if (!test_and_set_bit(QDF_QMSG_QUIET,
1245                               &qd->qd_flags)) {
1246                     print_message(qd, "exceeded");
1247                     quota_send_warning(qd->qd_id,
1248                                sdp->sd_vfs->s_dev,
1249                                QUOTA_NL_BHARDWARN);
1250                 }
1251                 error = -EDQUOT;
1252                 break;
1253             }
1254         } else if (warn && warn < value &&
1255                time_after_eq(jiffies, qd->qd_last_warn +
1256                      gfs2_tune_get(sdp, gt_quota_warn_period)
1257                      * HZ)) {
1258             quota_send_warning(qd->qd_id,
1259                        sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1260             error = print_message(qd, "warning");
1261             qd->qd_last_warn = jiffies;
1262         }
1263     }
1264     return error;
1265 }
1266 
1267 void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1268                kuid_t uid, kgid_t gid)
1269 {
1270     struct gfs2_quota_data *qd;
1271     u32 x;
1272     struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1273 
1274     if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON ||
1275         gfs2_assert_warn(sdp, change))
1276         return;
1277     if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1278         return;
1279 
1280     if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
1281                  ip->i_qadata->qa_ref > 0))
1282         return;
1283     for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1284         qd = ip->i_qadata->qa_qd[x];
1285 
1286         if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1287             qid_eq(qd->qd_id, make_kqid_gid(gid))) {
1288             do_qc(qd, change);
1289         }
1290     }
1291 }
1292 
1293 int gfs2_quota_sync(struct super_block *sb, int type)
1294 {
1295     struct gfs2_sbd *sdp = sb->s_fs_info;
1296     struct gfs2_quota_data **qda;
1297     unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
1298     unsigned int num_qd;
1299     unsigned int x;
1300     int error = 0;
1301 
1302     qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1303     if (!qda)
1304         return -ENOMEM;
1305 
1306     mutex_lock(&sdp->sd_quota_sync_mutex);
1307     sdp->sd_quota_sync_gen++;
1308 
1309     do {
1310         num_qd = 0;
1311 
1312         for (;;) {
1313             error = qd_fish(sdp, qda + num_qd);
1314             if (error || !qda[num_qd])
1315                 break;
1316             if (++num_qd == max_qd)
1317                 break;
1318         }
1319 
1320         if (num_qd) {
1321             if (!error)
1322                 error = do_sync(num_qd, qda);
1323             if (!error)
1324                 for (x = 0; x < num_qd; x++)
1325                     qda[x]->qd_sync_gen =
1326                         sdp->sd_quota_sync_gen;
1327 
1328             for (x = 0; x < num_qd; x++)
1329                 qd_unlock(qda[x]);
1330         }
1331     } while (!error && num_qd == max_qd);
1332 
1333     mutex_unlock(&sdp->sd_quota_sync_mutex);
1334     kfree(qda);
1335 
1336     return error;
1337 }
1338 
1339 int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
1340 {
1341     struct gfs2_quota_data *qd;
1342     struct gfs2_holder q_gh;
1343     int error;
1344 
1345     error = qd_get(sdp, qid, &qd);
1346     if (error)
1347         return error;
1348 
1349     error = do_glock(qd, FORCE, &q_gh);
1350     if (!error)
1351         gfs2_glock_dq_uninit(&q_gh);
1352 
1353     qd_put(qd);
1354     return error;
1355 }
1356 
1357 int gfs2_quota_init(struct gfs2_sbd *sdp)
1358 {
1359     struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1360     u64 size = i_size_read(sdp->sd_qc_inode);
1361     unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1362     unsigned int x, slot = 0;
1363     unsigned int found = 0;
1364     unsigned int hash;
1365     unsigned int bm_size;
1366     u64 dblock;
1367     u32 extlen = 0;
1368     int error;
1369 
1370     if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1371         return -EIO;
1372 
1373     sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1374     bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1375     bm_size *= sizeof(unsigned long);
1376     error = -ENOMEM;
1377     sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
1378     if (sdp->sd_quota_bitmap == NULL)
1379         sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
1380                          __GFP_ZERO);
1381     if (!sdp->sd_quota_bitmap)
1382         return error;
1383 
1384     for (x = 0; x < blocks; x++) {
1385         struct buffer_head *bh;
1386         const struct gfs2_quota_change *qc;
1387         unsigned int y;
1388 
1389         if (!extlen) {
1390             extlen = 32;
1391             error = gfs2_get_extent(&ip->i_inode, x, &dblock, &extlen);
1392             if (error)
1393                 goto fail;
1394         }
1395         error = -EIO;
1396         bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1397         if (!bh)
1398             goto fail;
1399         if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1400             brelse(bh);
1401             goto fail;
1402         }
1403 
1404         qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
1405         for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1406              y++, slot++) {
1407             struct gfs2_quota_data *qd;
1408             s64 qc_change = be64_to_cpu(qc->qc_change);
1409             u32 qc_flags = be32_to_cpu(qc->qc_flags);
1410             enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
1411                         USRQUOTA : GRPQUOTA;
1412             struct kqid qc_id = make_kqid(&init_user_ns, qtype,
1413                               be32_to_cpu(qc->qc_id));
1414             qc++;
1415             if (!qc_change)
1416                 continue;
1417 
1418             hash = gfs2_qd_hash(sdp, qc_id);
1419             qd = qd_alloc(hash, sdp, qc_id);
1420             if (qd == NULL) {
1421                 brelse(bh);
1422                 goto fail;
1423             }
1424 
1425             set_bit(QDF_CHANGE, &qd->qd_flags);
1426             qd->qd_change = qc_change;
1427             qd->qd_slot = slot;
1428             qd->qd_slot_count = 1;
1429 
1430             spin_lock(&qd_lock);
1431             BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
1432             list_add(&qd->qd_list, &sdp->sd_quota_list);
1433             atomic_inc(&sdp->sd_quota_count);
1434             spin_unlock(&qd_lock);
1435 
1436             spin_lock_bucket(hash);
1437             hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
1438             spin_unlock_bucket(hash);
1439 
1440             found++;
1441         }
1442 
1443         brelse(bh);
1444         dblock++;
1445         extlen--;
1446     }
1447 
1448     if (found)
1449         fs_info(sdp, "found %u quota changes\n", found);
1450 
1451     return 0;
1452 
1453 fail:
1454     gfs2_quota_cleanup(sdp);
1455     return error;
1456 }
1457 
1458 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1459 {
1460     struct list_head *head = &sdp->sd_quota_list;
1461     struct gfs2_quota_data *qd;
1462 
1463     spin_lock(&qd_lock);
1464     while (!list_empty(head)) {
1465         qd = list_last_entry(head, struct gfs2_quota_data, qd_list);
1466 
1467         list_del(&qd->qd_list);
1468 
1469         /* Also remove if this qd exists in the reclaim list */
1470         list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
1471         atomic_dec(&sdp->sd_quota_count);
1472         spin_unlock(&qd_lock);
1473 
1474         spin_lock_bucket(qd->qd_hash);
1475         hlist_bl_del_rcu(&qd->qd_hlist);
1476         spin_unlock_bucket(qd->qd_hash);
1477 
1478         gfs2_assert_warn(sdp, !qd->qd_change);
1479         gfs2_assert_warn(sdp, !qd->qd_slot_count);
1480         gfs2_assert_warn(sdp, !qd->qd_bh_count);
1481 
1482         gfs2_glock_put(qd->qd_gl);
1483         call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
1484 
1485         spin_lock(&qd_lock);
1486     }
1487     spin_unlock(&qd_lock);
1488 
1489     gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1490 
1491     kvfree(sdp->sd_quota_bitmap);
1492     sdp->sd_quota_bitmap = NULL;
1493 }
1494 
1495 static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1496 {
1497     if (error == 0 || error == -EROFS)
1498         return;
1499     if (!gfs2_withdrawn(sdp)) {
1500         if (!cmpxchg(&sdp->sd_log_error, 0, error))
1501             fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1502         wake_up(&sdp->sd_logd_waitq);
1503     }
1504 }
1505 
1506 static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1507                    int (*fxn)(struct super_block *sb, int type),
1508                    unsigned long t, unsigned long *timeo,
1509                    unsigned int *new_timeo)
1510 {
1511     if (t >= *timeo) {
1512         int error = fxn(sdp->sd_vfs, 0);
1513         quotad_error(sdp, msg, error);
1514         *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1515     } else {
1516         *timeo -= t;
1517     }
1518 }
1519 
1520 void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1521     if (!sdp->sd_statfs_force_sync) {
1522         sdp->sd_statfs_force_sync = 1;
1523         wake_up(&sdp->sd_quota_wait);
1524     }
1525 }
1526 
1527 
1528 /**
1529  * gfs2_quotad - Write cached quota changes into the quota file
1530  * @data: Pointer to GFS2 superblock
1531  *
1532  */
1533 
1534 int gfs2_quotad(void *data)
1535 {
1536     struct gfs2_sbd *sdp = data;
1537     struct gfs2_tune *tune = &sdp->sd_tune;
1538     unsigned long statfs_timeo = 0;
1539     unsigned long quotad_timeo = 0;
1540     unsigned long t = 0;
1541     DEFINE_WAIT(wait);
1542 
1543     while (!kthread_should_stop()) {
1544 
1545         if (gfs2_withdrawn(sdp))
1546             goto bypass;
1547         /* Update the master statfs file */
1548         if (sdp->sd_statfs_force_sync) {
1549             int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1550             quotad_error(sdp, "statfs", error);
1551             statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1552         }
1553         else
1554             quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1555                        &statfs_timeo,
1556                        &tune->gt_statfs_quantum);
1557 
1558         /* Update quota file */
1559         quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
1560                    &quotad_timeo, &tune->gt_quota_quantum);
1561 
1562         try_to_freeze();
1563 
1564 bypass:
1565         t = min(quotad_timeo, statfs_timeo);
1566 
1567         prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1568         if (!sdp->sd_statfs_force_sync)
1569             t -= schedule_timeout(t);
1570         else
1571             t = 0;
1572         finish_wait(&sdp->sd_quota_wait, &wait);
1573     }
1574 
1575     return 0;
1576 }
1577 
1578 static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
1579 {
1580     struct gfs2_sbd *sdp = sb->s_fs_info;
1581 
1582     memset(state, 0, sizeof(*state));
1583 
1584     switch (sdp->sd_args.ar_quota) {
1585     case GFS2_QUOTA_ON:
1586         state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
1587         state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
1588         fallthrough;
1589     case GFS2_QUOTA_ACCOUNT:
1590         state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
1591                           QCI_SYSFILE;
1592         state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
1593                           QCI_SYSFILE;
1594         break;
1595     case GFS2_QUOTA_OFF:
1596         break;
1597     }
1598     if (sdp->sd_quota_inode) {
1599         state->s_state[USRQUOTA].ino =
1600                     GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1601         state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
1602     }
1603     state->s_state[USRQUOTA].nextents = 1;  /* unsupported */
1604     state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
1605     state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
1606     return 0;
1607 }
1608 
1609 static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1610               struct qc_dqblk *fdq)
1611 {
1612     struct gfs2_sbd *sdp = sb->s_fs_info;
1613     struct gfs2_quota_lvb *qlvb;
1614     struct gfs2_quota_data *qd;
1615     struct gfs2_holder q_gh;
1616     int error;
1617 
1618     memset(fdq, 0, sizeof(*fdq));
1619 
1620     if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1621         return -ESRCH; /* Crazy XFS error code */
1622 
1623     if ((qid.type != USRQUOTA) &&
1624         (qid.type != GRPQUOTA))
1625         return -EINVAL;
1626 
1627     error = qd_get(sdp, qid, &qd);
1628     if (error)
1629         return error;
1630     error = do_glock(qd, FORCE, &q_gh);
1631     if (error)
1632         goto out;
1633 
1634     qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1635     fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
1636     fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
1637     fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
1638 
1639     gfs2_glock_dq_uninit(&q_gh);
1640 out:
1641     qd_put(qd);
1642     return error;
1643 }
1644 
1645 /* GFS2 only supports a subset of the XFS fields */
1646 #define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
1647 
1648 static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1649               struct qc_dqblk *fdq)
1650 {
1651     struct gfs2_sbd *sdp = sb->s_fs_info;
1652     struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1653     struct gfs2_quota_data *qd;
1654     struct gfs2_holder q_gh, i_gh;
1655     unsigned int data_blocks, ind_blocks;
1656     unsigned int blocks = 0;
1657     int alloc_required;
1658     loff_t offset;
1659     int error;
1660 
1661     if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1662         return -ESRCH; /* Crazy XFS error code */
1663 
1664     if ((qid.type != USRQUOTA) &&
1665         (qid.type != GRPQUOTA))
1666         return -EINVAL;
1667 
1668     if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1669         return -EINVAL;
1670 
1671     error = qd_get(sdp, qid, &qd);
1672     if (error)
1673         return error;
1674 
1675     error = gfs2_qa_get(ip);
1676     if (error)
1677         goto out_put;
1678 
1679     inode_lock(&ip->i_inode);
1680     error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1681     if (error)
1682         goto out_unlockput;
1683     error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1684     if (error)
1685         goto out_q;
1686 
1687     /* Check for existing entry, if none then alloc new blocks */
1688     error = update_qd(sdp, qd);
1689     if (error)
1690         goto out_i;
1691 
1692     /* If nothing has changed, this is a no-op */
1693     if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
1694         ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1695         fdq->d_fieldmask ^= QC_SPC_SOFT;
1696 
1697     if ((fdq->d_fieldmask & QC_SPC_HARD) &&
1698         ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1699         fdq->d_fieldmask ^= QC_SPC_HARD;
1700 
1701     if ((fdq->d_fieldmask & QC_SPACE) &&
1702         ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1703         fdq->d_fieldmask ^= QC_SPACE;
1704 
1705     if (fdq->d_fieldmask == 0)
1706         goto out_i;
1707 
1708     offset = qd2offset(qd);
1709     alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1710     if (gfs2_is_stuffed(ip))
1711         alloc_required = 1;
1712     if (alloc_required) {
1713         struct gfs2_alloc_parms ap = { .aflags = 0, };
1714         gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1715                        &data_blocks, &ind_blocks);
1716         blocks = 1 + data_blocks + ind_blocks;
1717         ap.target = blocks;
1718         error = gfs2_inplace_reserve(ip, &ap);
1719         if (error)
1720             goto out_i;
1721         blocks += gfs2_rg_blocks(ip, blocks);
1722     }
1723 
1724     /* Some quotas span block boundaries and can update two blocks,
1725        adding an extra block to the transaction to handle such quotas */
1726     error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1727     if (error)
1728         goto out_release;
1729 
1730     /* Apply changes */
1731     error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1732     if (!error)
1733         clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
1734 
1735     gfs2_trans_end(sdp);
1736 out_release:
1737     if (alloc_required)
1738         gfs2_inplace_release(ip);
1739 out_i:
1740     gfs2_glock_dq_uninit(&i_gh);
1741 out_q:
1742     gfs2_glock_dq_uninit(&q_gh);
1743 out_unlockput:
1744     gfs2_qa_put(ip);
1745     inode_unlock(&ip->i_inode);
1746 out_put:
1747     qd_put(qd);
1748     return error;
1749 }
1750 
1751 const struct quotactl_ops gfs2_quotactl_ops = {
1752     .quota_sync     = gfs2_quota_sync,
1753     .get_state  = gfs2_quota_get_state,
1754     .get_dqblk  = gfs2_get_dqblk,
1755     .set_dqblk  = gfs2_set_dqblk,
1756 };
1757 
1758 void __init gfs2_quota_hash_init(void)
1759 {
1760     unsigned i;
1761 
1762     for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
1763         INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
1764 }