cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xfs_trans_dquot.c (20868B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2000-2002 Silicon Graphics, Inc.
      4 * All Rights Reserved.
      5 */
      6#include "xfs.h"
      7#include "xfs_fs.h"
      8#include "xfs_shared.h"
      9#include "xfs_format.h"
     10#include "xfs_log_format.h"
     11#include "xfs_trans_resv.h"
     12#include "xfs_mount.h"
     13#include "xfs_inode.h"
     14#include "xfs_trans.h"
     15#include "xfs_trans_priv.h"
     16#include "xfs_quota.h"
     17#include "xfs_qm.h"
     18#include "xfs_trace.h"
     19#include "xfs_error.h"
     20
     21STATIC void	xfs_trans_alloc_dqinfo(xfs_trans_t *);
     22
     23/*
     24 * Add the locked dquot to the transaction.
     25 * The dquot must be locked, and it cannot be associated with any
     26 * transaction.
     27 */
     28void
     29xfs_trans_dqjoin(
     30	struct xfs_trans	*tp,
     31	struct xfs_dquot	*dqp)
     32{
     33	ASSERT(XFS_DQ_IS_LOCKED(dqp));
     34	ASSERT(dqp->q_logitem.qli_dquot == dqp);
     35
     36	/*
     37	 * Get a log_item_desc to point at the new item.
     38	 */
     39	xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
     40}
     41
     42/*
     43 * This is called to mark the dquot as needing
     44 * to be logged when the transaction is committed.  The dquot must
     45 * already be associated with the given transaction.
     46 * Note that it marks the entire transaction as dirty. In the ordinary
     47 * case, this gets called via xfs_trans_commit, after the transaction
     48 * is already dirty. However, there's nothing stop this from getting
     49 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
     50 * flag.
     51 */
     52void
     53xfs_trans_log_dquot(
     54	struct xfs_trans	*tp,
     55	struct xfs_dquot	*dqp)
     56{
     57	ASSERT(XFS_DQ_IS_LOCKED(dqp));
     58
     59	/* Upgrade the dquot to bigtime format if possible. */
     60	if (dqp->q_id != 0 &&
     61	    xfs_has_bigtime(tp->t_mountp) &&
     62	    !(dqp->q_type & XFS_DQTYPE_BIGTIME))
     63		dqp->q_type |= XFS_DQTYPE_BIGTIME;
     64
     65	tp->t_flags |= XFS_TRANS_DIRTY;
     66	set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags);
     67}
     68
     69/*
     70 * Carry forward whatever is left of the quota blk reservation to
     71 * the spanky new transaction
     72 */
     73void
     74xfs_trans_dup_dqinfo(
     75	struct xfs_trans	*otp,
     76	struct xfs_trans	*ntp)
     77{
     78	struct xfs_dqtrx	*oq, *nq;
     79	int			i, j;
     80	struct xfs_dqtrx	*oqa, *nqa;
     81	uint64_t		blk_res_used;
     82
     83	if (!otp->t_dqinfo)
     84		return;
     85
     86	xfs_trans_alloc_dqinfo(ntp);
     87
     88	for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
     89		oqa = otp->t_dqinfo->dqs[j];
     90		nqa = ntp->t_dqinfo->dqs[j];
     91		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
     92			blk_res_used = 0;
     93
     94			if (oqa[i].qt_dquot == NULL)
     95				break;
     96			oq = &oqa[i];
     97			nq = &nqa[i];
     98
     99			if (oq->qt_blk_res && oq->qt_bcount_delta > 0)
    100				blk_res_used = oq->qt_bcount_delta;
    101
    102			nq->qt_dquot = oq->qt_dquot;
    103			nq->qt_bcount_delta = nq->qt_icount_delta = 0;
    104			nq->qt_rtbcount_delta = 0;
    105
    106			/*
    107			 * Transfer whatever is left of the reservations.
    108			 */
    109			nq->qt_blk_res = oq->qt_blk_res - blk_res_used;
    110			oq->qt_blk_res = blk_res_used;
    111
    112			nq->qt_rtblk_res = oq->qt_rtblk_res -
    113				oq->qt_rtblk_res_used;
    114			oq->qt_rtblk_res = oq->qt_rtblk_res_used;
    115
    116			nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
    117			oq->qt_ino_res = oq->qt_ino_res_used;
    118
    119		}
    120	}
    121}
    122
    123/*
    124 * Wrap around mod_dquot to account for both user and group quotas.
    125 */
    126void
    127xfs_trans_mod_dquot_byino(
    128	xfs_trans_t	*tp,
    129	xfs_inode_t	*ip,
    130	uint		field,
    131	int64_t		delta)
    132{
    133	xfs_mount_t	*mp = tp->t_mountp;
    134
    135	if (!XFS_IS_QUOTA_ON(mp) ||
    136	    xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
    137		return;
    138
    139	if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
    140		(void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
    141	if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
    142		(void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
    143	if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
    144		(void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta);
    145}
    146
    147STATIC struct xfs_dqtrx *
    148xfs_trans_get_dqtrx(
    149	struct xfs_trans	*tp,
    150	struct xfs_dquot	*dqp)
    151{
    152	int			i;
    153	struct xfs_dqtrx	*qa;
    154
    155	switch (xfs_dquot_type(dqp)) {
    156	case XFS_DQTYPE_USER:
    157		qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
    158		break;
    159	case XFS_DQTYPE_GROUP:
    160		qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
    161		break;
    162	case XFS_DQTYPE_PROJ:
    163		qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
    164		break;
    165	default:
    166		return NULL;
    167	}
    168
    169	for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
    170		if (qa[i].qt_dquot == NULL ||
    171		    qa[i].qt_dquot == dqp)
    172			return &qa[i];
    173	}
    174
    175	return NULL;
    176}
    177
    178/*
    179 * Make the changes in the transaction structure.
    180 * The moral equivalent to xfs_trans_mod_sb().
    181 * We don't touch any fields in the dquot, so we don't care
    182 * if it's locked or not (most of the time it won't be).
    183 */
    184void
    185xfs_trans_mod_dquot(
    186	struct xfs_trans	*tp,
    187	struct xfs_dquot	*dqp,
    188	uint			field,
    189	int64_t			delta)
    190{
    191	struct xfs_dqtrx	*qtrx;
    192
    193	ASSERT(tp);
    194	ASSERT(XFS_IS_QUOTA_ON(tp->t_mountp));
    195	qtrx = NULL;
    196
    197	if (!delta)
    198		return;
    199
    200	if (tp->t_dqinfo == NULL)
    201		xfs_trans_alloc_dqinfo(tp);
    202	/*
    203	 * Find either the first free slot or the slot that belongs
    204	 * to this dquot.
    205	 */
    206	qtrx = xfs_trans_get_dqtrx(tp, dqp);
    207	ASSERT(qtrx);
    208	if (qtrx->qt_dquot == NULL)
    209		qtrx->qt_dquot = dqp;
    210
    211	trace_xfs_trans_mod_dquot_before(qtrx);
    212	trace_xfs_trans_mod_dquot(tp, dqp, field, delta);
    213
    214	switch (field) {
    215	/* regular disk blk reservation */
    216	case XFS_TRANS_DQ_RES_BLKS:
    217		qtrx->qt_blk_res += delta;
    218		break;
    219
    220	/* inode reservation */
    221	case XFS_TRANS_DQ_RES_INOS:
    222		qtrx->qt_ino_res += delta;
    223		break;
    224
    225	/* disk blocks used. */
    226	case XFS_TRANS_DQ_BCOUNT:
    227		qtrx->qt_bcount_delta += delta;
    228		break;
    229
    230	case XFS_TRANS_DQ_DELBCOUNT:
    231		qtrx->qt_delbcnt_delta += delta;
    232		break;
    233
    234	/* Inode Count */
    235	case XFS_TRANS_DQ_ICOUNT:
    236		if (qtrx->qt_ino_res && delta > 0) {
    237			qtrx->qt_ino_res_used += delta;
    238			ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
    239		}
    240		qtrx->qt_icount_delta += delta;
    241		break;
    242
    243	/* rtblk reservation */
    244	case XFS_TRANS_DQ_RES_RTBLKS:
    245		qtrx->qt_rtblk_res += delta;
    246		break;
    247
    248	/* rtblk count */
    249	case XFS_TRANS_DQ_RTBCOUNT:
    250		if (qtrx->qt_rtblk_res && delta > 0) {
    251			qtrx->qt_rtblk_res_used += delta;
    252			ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
    253		}
    254		qtrx->qt_rtbcount_delta += delta;
    255		break;
    256
    257	case XFS_TRANS_DQ_DELRTBCOUNT:
    258		qtrx->qt_delrtb_delta += delta;
    259		break;
    260
    261	default:
    262		ASSERT(0);
    263	}
    264
    265	trace_xfs_trans_mod_dquot_after(qtrx);
    266}
    267
    268
    269/*
    270 * Given an array of dqtrx structures, lock all the dquots associated and join
    271 * them to the transaction, provided they have been modified.  We know that the
    272 * highest number of dquots of one type - usr, grp and prj - involved in a
    273 * transaction is 3 so we don't need to make this very generic.
    274 */
    275STATIC void
    276xfs_trans_dqlockedjoin(
    277	struct xfs_trans	*tp,
    278	struct xfs_dqtrx	*q)
    279{
    280	ASSERT(q[0].qt_dquot != NULL);
    281	if (q[1].qt_dquot == NULL) {
    282		xfs_dqlock(q[0].qt_dquot);
    283		xfs_trans_dqjoin(tp, q[0].qt_dquot);
    284	} else {
    285		ASSERT(XFS_QM_TRANS_MAXDQS == 2);
    286		xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
    287		xfs_trans_dqjoin(tp, q[0].qt_dquot);
    288		xfs_trans_dqjoin(tp, q[1].qt_dquot);
    289	}
    290}
    291
    292/* Apply dqtrx changes to the quota reservation counters. */
    293static inline void
    294xfs_apply_quota_reservation_deltas(
    295	struct xfs_dquot_res	*res,
    296	uint64_t		reserved,
    297	int64_t			res_used,
    298	int64_t			count_delta)
    299{
    300	if (reserved != 0) {
    301		/*
    302		 * Subtle math here: If reserved > res_used (the normal case),
    303		 * we're simply subtracting the unused transaction quota
    304		 * reservation from the dquot reservation.
    305		 *
    306		 * If, however, res_used > reserved, then we have allocated
    307		 * more quota blocks than were reserved for the transaction.
    308		 * We must add that excess to the dquot reservation since it
    309		 * tracks (usage + resv) and by definition we didn't reserve
    310		 * that excess.
    311		 */
    312		res->reserved -= abs(reserved - res_used);
    313	} else if (count_delta != 0) {
    314		/*
    315		 * These blks were never reserved, either inside a transaction
    316		 * or outside one (in a delayed allocation). Also, this isn't
    317		 * always a negative number since we sometimes deliberately
    318		 * skip quota reservations.
    319		 */
    320		res->reserved += count_delta;
    321	}
    322}
    323
    324/*
    325 * Called by xfs_trans_commit() and similar in spirit to
    326 * xfs_trans_apply_sb_deltas().
    327 * Go thru all the dquots belonging to this transaction and modify the
    328 * INCORE dquot to reflect the actual usages.
    329 * Unreserve just the reservations done by this transaction.
    330 * dquot is still left locked at exit.
    331 */
    332void
    333xfs_trans_apply_dquot_deltas(
    334	struct xfs_trans	*tp)
    335{
    336	int			i, j;
    337	struct xfs_dquot	*dqp;
    338	struct xfs_dqtrx	*qtrx, *qa;
    339	int64_t			totalbdelta;
    340	int64_t			totalrtbdelta;
    341
    342	if (!tp->t_dqinfo)
    343		return;
    344
    345	ASSERT(tp->t_dqinfo);
    346	for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
    347		qa = tp->t_dqinfo->dqs[j];
    348		if (qa[0].qt_dquot == NULL)
    349			continue;
    350
    351		/*
    352		 * Lock all of the dquots and join them to the transaction.
    353		 */
    354		xfs_trans_dqlockedjoin(tp, qa);
    355
    356		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
    357			uint64_t	blk_res_used;
    358
    359			qtrx = &qa[i];
    360			/*
    361			 * The array of dquots is filled
    362			 * sequentially, not sparsely.
    363			 */
    364			if ((dqp = qtrx->qt_dquot) == NULL)
    365				break;
    366
    367			ASSERT(XFS_DQ_IS_LOCKED(dqp));
    368
    369			/*
    370			 * adjust the actual number of blocks used
    371			 */
    372
    373			/*
    374			 * The issue here is - sometimes we don't make a blkquota
    375			 * reservation intentionally to be fair to users
    376			 * (when the amount is small). On the other hand,
    377			 * delayed allocs do make reservations, but that's
    378			 * outside of a transaction, so we have no
    379			 * idea how much was really reserved.
    380			 * So, here we've accumulated delayed allocation blks and
    381			 * non-delay blks. The assumption is that the
    382			 * delayed ones are always reserved (outside of a
    383			 * transaction), and the others may or may not have
    384			 * quota reservations.
    385			 */
    386			totalbdelta = qtrx->qt_bcount_delta +
    387				qtrx->qt_delbcnt_delta;
    388			totalrtbdelta = qtrx->qt_rtbcount_delta +
    389				qtrx->qt_delrtb_delta;
    390
    391			if (totalbdelta != 0 || totalrtbdelta != 0 ||
    392			    qtrx->qt_icount_delta != 0) {
    393				trace_xfs_trans_apply_dquot_deltas_before(dqp);
    394				trace_xfs_trans_apply_dquot_deltas(qtrx);
    395			}
    396
    397#ifdef DEBUG
    398			if (totalbdelta < 0)
    399				ASSERT(dqp->q_blk.count >= -totalbdelta);
    400
    401			if (totalrtbdelta < 0)
    402				ASSERT(dqp->q_rtb.count >= -totalrtbdelta);
    403
    404			if (qtrx->qt_icount_delta < 0)
    405				ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta);
    406#endif
    407			if (totalbdelta)
    408				dqp->q_blk.count += totalbdelta;
    409
    410			if (qtrx->qt_icount_delta)
    411				dqp->q_ino.count += qtrx->qt_icount_delta;
    412
    413			if (totalrtbdelta)
    414				dqp->q_rtb.count += totalrtbdelta;
    415
    416			if (totalbdelta != 0 || totalrtbdelta != 0 ||
    417			    qtrx->qt_icount_delta != 0)
    418				trace_xfs_trans_apply_dquot_deltas_after(dqp);
    419
    420			/*
    421			 * Get any default limits in use.
    422			 * Start/reset the timer(s) if needed.
    423			 */
    424			if (dqp->q_id) {
    425				xfs_qm_adjust_dqlimits(dqp);
    426				xfs_qm_adjust_dqtimers(dqp);
    427			}
    428
    429			dqp->q_flags |= XFS_DQFLAG_DIRTY;
    430			/*
    431			 * add this to the list of items to get logged
    432			 */
    433			xfs_trans_log_dquot(tp, dqp);
    434			/*
    435			 * Take off what's left of the original reservation.
    436			 * In case of delayed allocations, there's no
    437			 * reservation that a transaction structure knows of.
    438			 */
    439			blk_res_used = max_t(int64_t, 0, qtrx->qt_bcount_delta);
    440			xfs_apply_quota_reservation_deltas(&dqp->q_blk,
    441					qtrx->qt_blk_res, blk_res_used,
    442					qtrx->qt_bcount_delta);
    443
    444			/*
    445			 * Adjust the RT reservation.
    446			 */
    447			xfs_apply_quota_reservation_deltas(&dqp->q_rtb,
    448					qtrx->qt_rtblk_res,
    449					qtrx->qt_rtblk_res_used,
    450					qtrx->qt_rtbcount_delta);
    451
    452			/*
    453			 * Adjust the inode reservation.
    454			 */
    455			ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
    456			xfs_apply_quota_reservation_deltas(&dqp->q_ino,
    457					qtrx->qt_ino_res,
    458					qtrx->qt_ino_res_used,
    459					qtrx->qt_icount_delta);
    460
    461			ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
    462			ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
    463			ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
    464		}
    465	}
    466}
    467
    468/*
    469 * Release the reservations, and adjust the dquots accordingly.
    470 * This is called only when the transaction is being aborted. If by
    471 * any chance we have done dquot modifications incore (ie. deltas) already,
    472 * we simply throw those away, since that's the expected behavior
    473 * when a transaction is curtailed without a commit.
    474 */
    475void
    476xfs_trans_unreserve_and_mod_dquots(
    477	struct xfs_trans	*tp)
    478{
    479	int			i, j;
    480	struct xfs_dquot	*dqp;
    481	struct xfs_dqtrx	*qtrx, *qa;
    482	bool			locked;
    483
    484	if (!tp->t_dqinfo)
    485		return;
    486
    487	for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
    488		qa = tp->t_dqinfo->dqs[j];
    489
    490		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
    491			qtrx = &qa[i];
    492			/*
    493			 * We assume that the array of dquots is filled
    494			 * sequentially, not sparsely.
    495			 */
    496			if ((dqp = qtrx->qt_dquot) == NULL)
    497				break;
    498			/*
    499			 * Unreserve the original reservation. We don't care
    500			 * about the number of blocks used field, or deltas.
    501			 * Also we don't bother to zero the fields.
    502			 */
    503			locked = false;
    504			if (qtrx->qt_blk_res) {
    505				xfs_dqlock(dqp);
    506				locked = true;
    507				dqp->q_blk.reserved -=
    508					(xfs_qcnt_t)qtrx->qt_blk_res;
    509			}
    510			if (qtrx->qt_ino_res) {
    511				if (!locked) {
    512					xfs_dqlock(dqp);
    513					locked = true;
    514				}
    515				dqp->q_ino.reserved -=
    516					(xfs_qcnt_t)qtrx->qt_ino_res;
    517			}
    518
    519			if (qtrx->qt_rtblk_res) {
    520				if (!locked) {
    521					xfs_dqlock(dqp);
    522					locked = true;
    523				}
    524				dqp->q_rtb.reserved -=
    525					(xfs_qcnt_t)qtrx->qt_rtblk_res;
    526			}
    527			if (locked)
    528				xfs_dqunlock(dqp);
    529
    530		}
    531	}
    532}
    533
    534STATIC void
    535xfs_quota_warn(
    536	struct xfs_mount	*mp,
    537	struct xfs_dquot	*dqp,
    538	int			type)
    539{
    540	enum quota_type		qtype;
    541
    542	switch (xfs_dquot_type(dqp)) {
    543	case XFS_DQTYPE_PROJ:
    544		qtype = PRJQUOTA;
    545		break;
    546	case XFS_DQTYPE_USER:
    547		qtype = USRQUOTA;
    548		break;
    549	case XFS_DQTYPE_GROUP:
    550		qtype = GRPQUOTA;
    551		break;
    552	default:
    553		return;
    554	}
    555
    556	quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id),
    557			   mp->m_super->s_dev, type);
    558}
    559
    560/*
    561 * Decide if we can make an additional reservation against a quota resource.
    562 * Returns an inode QUOTA_NL_ warning code and whether or not it's fatal.
    563 *
    564 * Note that we assume that the numeric difference between the inode and block
    565 * warning codes will always be 3 since it's userspace ABI now, and will never
    566 * decrease the quota reservation, so the *BELOW messages are irrelevant.
    567 */
    568static inline int
    569xfs_dqresv_check(
    570	struct xfs_dquot_res	*res,
    571	struct xfs_quota_limits	*qlim,
    572	int64_t			delta,
    573	bool			*fatal)
    574{
    575	xfs_qcnt_t		hardlimit = res->hardlimit;
    576	xfs_qcnt_t		softlimit = res->softlimit;
    577	xfs_qcnt_t		total_count = res->reserved + delta;
    578
    579	BUILD_BUG_ON(QUOTA_NL_BHARDWARN     != QUOTA_NL_IHARDWARN + 3);
    580	BUILD_BUG_ON(QUOTA_NL_BSOFTLONGWARN != QUOTA_NL_ISOFTLONGWARN + 3);
    581	BUILD_BUG_ON(QUOTA_NL_BSOFTWARN     != QUOTA_NL_ISOFTWARN + 3);
    582
    583	*fatal = false;
    584	if (delta <= 0)
    585		return QUOTA_NL_NOWARN;
    586
    587	if (!hardlimit)
    588		hardlimit = qlim->hard;
    589	if (!softlimit)
    590		softlimit = qlim->soft;
    591
    592	if (hardlimit && total_count > hardlimit) {
    593		*fatal = true;
    594		return QUOTA_NL_IHARDWARN;
    595	}
    596
    597	if (softlimit && total_count > softlimit) {
    598		time64_t	now = ktime_get_real_seconds();
    599
    600		if (res->timer != 0 && now > res->timer) {
    601			*fatal = true;
    602			return QUOTA_NL_ISOFTLONGWARN;
    603		}
    604
    605		return QUOTA_NL_ISOFTWARN;
    606	}
    607
    608	return QUOTA_NL_NOWARN;
    609}
    610
    611/*
    612 * This reserves disk blocks and inodes against a dquot.
    613 * Flags indicate if the dquot is to be locked here and also
    614 * if the blk reservation is for RT or regular blocks.
    615 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
    616 */
    617STATIC int
    618xfs_trans_dqresv(
    619	struct xfs_trans	*tp,
    620	struct xfs_mount	*mp,
    621	struct xfs_dquot	*dqp,
    622	int64_t			nblks,
    623	long			ninos,
    624	uint			flags)
    625{
    626	struct xfs_quotainfo	*q = mp->m_quotainfo;
    627	struct xfs_def_quota	*defq;
    628	struct xfs_dquot_res	*blkres;
    629	struct xfs_quota_limits	*qlim;
    630
    631	xfs_dqlock(dqp);
    632
    633	defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
    634
    635	if (flags & XFS_TRANS_DQ_RES_BLKS) {
    636		blkres = &dqp->q_blk;
    637		qlim = &defq->blk;
    638	} else {
    639		blkres = &dqp->q_rtb;
    640		qlim = &defq->rtb;
    641	}
    642
    643	if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id &&
    644	    xfs_dquot_is_enforced(dqp)) {
    645		int		quota_nl;
    646		bool		fatal;
    647
    648		/*
    649		 * dquot is locked already. See if we'd go over the hardlimit
    650		 * or exceed the timelimit if we'd reserve resources.
    651		 */
    652		quota_nl = xfs_dqresv_check(blkres, qlim, nblks, &fatal);
    653		if (quota_nl != QUOTA_NL_NOWARN) {
    654			/*
    655			 * Quota block warning codes are 3 more than the inode
    656			 * codes, which we check above.
    657			 */
    658			xfs_quota_warn(mp, dqp, quota_nl + 3);
    659			if (fatal)
    660				goto error_return;
    661		}
    662
    663		quota_nl = xfs_dqresv_check(&dqp->q_ino, &defq->ino, ninos,
    664				&fatal);
    665		if (quota_nl != QUOTA_NL_NOWARN) {
    666			xfs_quota_warn(mp, dqp, quota_nl);
    667			if (fatal)
    668				goto error_return;
    669		}
    670	}
    671
    672	/*
    673	 * Change the reservation, but not the actual usage.
    674	 * Note that q_blk.reserved = q_blk.count + resv
    675	 */
    676	blkres->reserved += (xfs_qcnt_t)nblks;
    677	dqp->q_ino.reserved += (xfs_qcnt_t)ninos;
    678
    679	/*
    680	 * note the reservation amt in the trans struct too,
    681	 * so that the transaction knows how much was reserved by
    682	 * it against this particular dquot.
    683	 * We don't do this when we are reserving for a delayed allocation,
    684	 * because we don't have the luxury of a transaction envelope then.
    685	 */
    686	if (tp) {
    687		ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
    688		xfs_trans_mod_dquot(tp, dqp, flags & XFS_QMOPT_RESBLK_MASK,
    689				    nblks);
    690		xfs_trans_mod_dquot(tp, dqp, XFS_TRANS_DQ_RES_INOS, ninos);
    691	}
    692
    693	if (XFS_IS_CORRUPT(mp, dqp->q_blk.reserved < dqp->q_blk.count) ||
    694	    XFS_IS_CORRUPT(mp, dqp->q_rtb.reserved < dqp->q_rtb.count) ||
    695	    XFS_IS_CORRUPT(mp, dqp->q_ino.reserved < dqp->q_ino.count))
    696		goto error_corrupt;
    697
    698	xfs_dqunlock(dqp);
    699	return 0;
    700
    701error_return:
    702	xfs_dqunlock(dqp);
    703	if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ)
    704		return -ENOSPC;
    705	return -EDQUOT;
    706error_corrupt:
    707	xfs_dqunlock(dqp);
    708	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
    709	return -EFSCORRUPTED;
    710}
    711
    712
    713/*
    714 * Given dquot(s), make disk block and/or inode reservations against them.
    715 * The fact that this does the reservation against user, group and
    716 * project quotas is important, because this follows a all-or-nothing
    717 * approach.
    718 *
    719 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
    720 *	   XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT.  Used by pquota.
    721 *	   XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
    722 *	   XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
    723 * dquots are unlocked on return, if they were not locked by caller.
    724 */
    725int
    726xfs_trans_reserve_quota_bydquots(
    727	struct xfs_trans	*tp,
    728	struct xfs_mount	*mp,
    729	struct xfs_dquot	*udqp,
    730	struct xfs_dquot	*gdqp,
    731	struct xfs_dquot	*pdqp,
    732	int64_t			nblks,
    733	long			ninos,
    734	uint			flags)
    735{
    736	int		error;
    737
    738	if (!XFS_IS_QUOTA_ON(mp))
    739		return 0;
    740
    741	ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
    742
    743	if (udqp) {
    744		error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags);
    745		if (error)
    746			return error;
    747	}
    748
    749	if (gdqp) {
    750		error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
    751		if (error)
    752			goto unwind_usr;
    753	}
    754
    755	if (pdqp) {
    756		error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags);
    757		if (error)
    758			goto unwind_grp;
    759	}
    760
    761	/*
    762	 * Didn't change anything critical, so, no need to log
    763	 */
    764	return 0;
    765
    766unwind_grp:
    767	flags |= XFS_QMOPT_FORCE_RES;
    768	if (gdqp)
    769		xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags);
    770unwind_usr:
    771	flags |= XFS_QMOPT_FORCE_RES;
    772	if (udqp)
    773		xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags);
    774	return error;
    775}
    776
    777
    778/*
    779 * Lock the dquot and change the reservation if we can.
    780 * This doesn't change the actual usage, just the reservation.
    781 * The inode sent in is locked.
    782 */
    783int
    784xfs_trans_reserve_quota_nblks(
    785	struct xfs_trans	*tp,
    786	struct xfs_inode	*ip,
    787	int64_t			dblocks,
    788	int64_t			rblocks,
    789	bool			force)
    790{
    791	struct xfs_mount	*mp = ip->i_mount;
    792	unsigned int		qflags = 0;
    793	int			error;
    794
    795	if (!XFS_IS_QUOTA_ON(mp))
    796		return 0;
    797
    798	ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
    799	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
    800
    801	if (force)
    802		qflags |= XFS_QMOPT_FORCE_RES;
    803
    804	/* Reserve data device quota against the inode's dquots. */
    805	error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
    806			ip->i_gdquot, ip->i_pdquot, dblocks, 0,
    807			XFS_QMOPT_RES_REGBLKS | qflags);
    808	if (error)
    809		return error;
    810
    811	/* Do the same but for realtime blocks. */
    812	error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
    813			ip->i_gdquot, ip->i_pdquot, rblocks, 0,
    814			XFS_QMOPT_RES_RTBLKS | qflags);
    815	if (error) {
    816		xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
    817				ip->i_gdquot, ip->i_pdquot, -dblocks, 0,
    818				XFS_QMOPT_RES_REGBLKS);
    819		return error;
    820	}
    821
    822	return 0;
    823}
    824
    825/* Change the quota reservations for an inode creation activity. */
    826int
    827xfs_trans_reserve_quota_icreate(
    828	struct xfs_trans	*tp,
    829	struct xfs_dquot	*udqp,
    830	struct xfs_dquot	*gdqp,
    831	struct xfs_dquot	*pdqp,
    832	int64_t			dblocks)
    833{
    834	struct xfs_mount	*mp = tp->t_mountp;
    835
    836	if (!XFS_IS_QUOTA_ON(mp))
    837		return 0;
    838
    839	return xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, pdqp,
    840			dblocks, 1, XFS_QMOPT_RES_REGBLKS);
    841}
    842
    843STATIC void
    844xfs_trans_alloc_dqinfo(
    845	xfs_trans_t	*tp)
    846{
    847	tp->t_dqinfo = kmem_cache_zalloc(xfs_dqtrx_cache,
    848					 GFP_KERNEL | __GFP_NOFAIL);
    849}
    850
    851void
    852xfs_trans_free_dqinfo(
    853	xfs_trans_t	*tp)
    854{
    855	if (!tp->t_dqinfo)
    856		return;
    857	kmem_cache_free(xfs_dqtrx_cache, tp->t_dqinfo);
    858	tp->t_dqinfo = NULL;
    859}