diff options
Diffstat (limited to 'fs/xfs/quota')
-rw-r--r-- | fs/xfs/quota/xfs_dquot.c | 193 | ||||
-rw-r--r-- | fs/xfs/quota/xfs_dquot.h | 35 | ||||
-rw-r--r-- | fs/xfs/quota/xfs_dquot_item.c | 30 | ||||
-rw-r--r-- | fs/xfs/quota/xfs_qm.c | 609 | ||||
-rw-r--r-- | fs/xfs/quota/xfs_qm.h | 23 | ||||
-rw-r--r-- | fs/xfs/quota/xfs_qm_stats.c | 2 | ||||
-rw-r--r-- | fs/xfs/quota/xfs_qm_syscalls.c | 165 | ||||
-rw-r--r-- | fs/xfs/quota/xfs_quota_priv.h | 102 | ||||
-rw-r--r-- | fs/xfs/quota/xfs_trans_dquot.c | 29 |
9 files changed, 431 insertions, 757 deletions
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index 5f79dd78626b..b89ec5df0129 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c @@ -101,7 +101,7 @@ xfs_qm_dqinit( * No need to re-initialize these if this is a reclaimed dquot. */ if (brandnewdquot) { - dqp->dq_flnext = dqp->dq_flprev = dqp; + INIT_LIST_HEAD(&dqp->q_freelist); mutex_init(&dqp->q_qlock); init_waitqueue_head(&dqp->q_pinwait); @@ -119,20 +119,20 @@ xfs_qm_dqinit( * Only the q_core portion was zeroed in dqreclaim_one(). * So, we need to reset others. */ - dqp->q_nrefs = 0; - dqp->q_blkno = 0; - dqp->MPL_NEXT = dqp->HL_NEXT = NULL; - dqp->HL_PREVP = dqp->MPL_PREVP = NULL; - dqp->q_bufoffset = 0; - dqp->q_fileoffset = 0; - dqp->q_transp = NULL; - dqp->q_gdquot = NULL; - dqp->q_res_bcount = 0; - dqp->q_res_icount = 0; - dqp->q_res_rtbcount = 0; - atomic_set(&dqp->q_pincount, 0); - dqp->q_hash = NULL; - ASSERT(dqp->dq_flnext == dqp->dq_flprev); + dqp->q_nrefs = 0; + dqp->q_blkno = 0; + INIT_LIST_HEAD(&dqp->q_mplist); + INIT_LIST_HEAD(&dqp->q_hashlist); + dqp->q_bufoffset = 0; + dqp->q_fileoffset = 0; + dqp->q_transp = NULL; + dqp->q_gdquot = NULL; + dqp->q_res_bcount = 0; + dqp->q_res_icount = 0; + dqp->q_res_rtbcount = 0; + atomic_set(&dqp->q_pincount, 0); + dqp->q_hash = NULL; + ASSERT(list_empty(&dqp->q_freelist)); trace_xfs_dqreuse(dqp); } @@ -158,7 +158,7 @@ void xfs_qm_dqdestroy( xfs_dquot_t *dqp) { - ASSERT(! XFS_DQ_IS_ON_FREELIST(dqp)); + ASSERT(list_empty(&dqp->q_freelist)); mutex_destroy(&dqp->q_qlock); sv_destroy(&dqp->q_pinwait); @@ -252,7 +252,7 @@ xfs_qm_adjust_dqtimers( (be64_to_cpu(d->d_bcount) >= be64_to_cpu(d->d_blk_hardlimit)))) { d->d_btimer = cpu_to_be32(get_seconds() + - XFS_QI_BTIMELIMIT(mp)); + mp->m_quotainfo->qi_btimelimit); } else { d->d_bwarns = 0; } @@ -275,7 +275,7 @@ xfs_qm_adjust_dqtimers( (be64_to_cpu(d->d_icount) >= be64_to_cpu(d->d_ino_hardlimit)))) { d->d_itimer = cpu_to_be32(get_seconds() + - XFS_QI_ITIMELIMIT(mp)); + mp->m_quotainfo->qi_itimelimit); } else { d->d_iwarns = 0; } @@ -298,7 +298,7 @@ xfs_qm_adjust_dqtimers( (be64_to_cpu(d->d_rtbcount) >= be64_to_cpu(d->d_rtb_hardlimit)))) { d->d_rtbtimer = cpu_to_be32(get_seconds() + - XFS_QI_RTBTIMELIMIT(mp)); + mp->m_quotainfo->qi_rtbtimelimit); } else { d->d_rtbwarns = 0; } @@ -325,6 +325,7 @@ xfs_qm_init_dquot_blk( uint type, xfs_buf_t *bp) { + struct xfs_quotainfo *q = mp->m_quotainfo; xfs_dqblk_t *d; int curid, i; @@ -337,16 +338,16 @@ xfs_qm_init_dquot_blk( /* * ID of the first dquot in the block - id's are zero based. */ - curid = id - (id % XFS_QM_DQPERBLK(mp)); + curid = id - (id % q->qi_dqperchunk); ASSERT(curid >= 0); - memset(d, 0, BBTOB(XFS_QI_DQCHUNKLEN(mp))); - for (i = 0; i < XFS_QM_DQPERBLK(mp); i++, d++, curid++) + memset(d, 0, BBTOB(q->qi_dqchunklen)); + for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) xfs_qm_dqinit_core(curid, type, d); xfs_trans_dquot_buf(tp, bp, (type & XFS_DQ_USER ? XFS_BLI_UDQUOT_BUF : ((type & XFS_DQ_PROJ) ? XFS_BLI_PDQUOT_BUF : XFS_BLI_GDQUOT_BUF))); - xfs_trans_log_buf(tp, bp, 0, BBTOB(XFS_QI_DQCHUNKLEN(mp)) - 1); + xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1); } @@ -419,7 +420,7 @@ xfs_qm_dqalloc( /* now we can just get the buffer (there's nothing to read yet) */ bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno, - XFS_QI_DQCHUNKLEN(mp), + mp->m_quotainfo->qi_dqchunklen, 0); if (!bp || (error = XFS_BUF_GETERROR(bp))) goto error1; @@ -500,7 +501,8 @@ xfs_qm_dqtobp( */ if (dqp->q_blkno == (xfs_daddr_t) 0) { /* We use the id as an index */ - dqp->q_fileoffset = (xfs_fileoff_t)id / XFS_QM_DQPERBLK(mp); + dqp->q_fileoffset = (xfs_fileoff_t)id / + mp->m_quotainfo->qi_dqperchunk; nmaps = 1; quotip = XFS_DQ_TO_QIP(dqp); xfs_ilock(quotip, XFS_ILOCK_SHARED); @@ -529,7 +531,7 @@ xfs_qm_dqtobp( /* * offset of dquot in the (fixed sized) dquot chunk. */ - dqp->q_bufoffset = (id % XFS_QM_DQPERBLK(mp)) * + dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * sizeof(xfs_dqblk_t); if (map.br_startblock == HOLESTARTBLOCK) { /* @@ -559,15 +561,13 @@ xfs_qm_dqtobp( * Read in the buffer, unless we've just done the allocation * (in which case we already have the buf). */ - if (! newdquot) { + if (!newdquot) { trace_xfs_dqtobp_read(dqp); - if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, - dqp->q_blkno, - XFS_QI_DQCHUNKLEN(mp), - 0, &bp))) { - return (error); - } + error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, + dqp->q_blkno, + mp->m_quotainfo->qi_dqchunklen, + 0, &bp); if (error || !bp) return XFS_ERROR(error); } @@ -689,14 +689,14 @@ xfs_qm_idtodq( tp = NULL; if (flags & XFS_QMOPT_DQALLOC) { tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC); - if ((error = xfs_trans_reserve(tp, - XFS_QM_DQALLOC_SPACE_RES(mp), - XFS_WRITE_LOG_RES(mp) + - BBTOB(XFS_QI_DQCHUNKLEN(mp)) - 1 + - 128, - 0, - XFS_TRANS_PERM_LOG_RES, - XFS_WRITE_LOG_COUNT))) { + error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp), + XFS_WRITE_LOG_RES(mp) + + BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 + + 128, + 0, + XFS_TRANS_PERM_LOG_RES, + XFS_WRITE_LOG_COUNT); + if (error) { cancelflags = 0; goto error0; } @@ -751,7 +751,6 @@ xfs_qm_dqlookup( { xfs_dquot_t *dqp; uint flist_locked; - xfs_dquot_t *d; ASSERT(mutex_is_locked(&qh->qh_lock)); @@ -760,7 +759,7 @@ xfs_qm_dqlookup( /* * Traverse the hashchain looking for a match */ - for (dqp = qh->qh_next; dqp != NULL; dqp = dqp->HL_NEXT) { + list_for_each_entry(dqp, &qh->qh_list, q_hashlist) { /* * We already have the hashlock. We don't need the * dqlock to look at the id field of the dquot, since the @@ -772,12 +771,12 @@ xfs_qm_dqlookup( /* * All in core dquots must be on the dqlist of mp */ - ASSERT(dqp->MPL_PREVP != NULL); + ASSERT(!list_empty(&dqp->q_mplist)); xfs_dqlock(dqp); if (dqp->q_nrefs == 0) { - ASSERT (XFS_DQ_IS_ON_FREELIST(dqp)); - if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) { + ASSERT(!list_empty(&dqp->q_freelist)); + if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) { trace_xfs_dqlookup_want(dqp); /* @@ -787,7 +786,7 @@ xfs_qm_dqlookup( */ dqp->dq_flags |= XFS_DQ_WANT; xfs_dqunlock(dqp); - xfs_qm_freelist_lock(xfs_Gqm); + mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); xfs_dqlock(dqp); dqp->dq_flags &= ~(XFS_DQ_WANT); } @@ -802,46 +801,28 @@ xfs_qm_dqlookup( if (flist_locked) { if (dqp->q_nrefs != 0) { - xfs_qm_freelist_unlock(xfs_Gqm); + mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); flist_locked = B_FALSE; } else { - /* - * take it off the freelist - */ + /* take it off the freelist */ trace_xfs_dqlookup_freelist(dqp); - XQM_FREELIST_REMOVE(dqp); - /* xfs_qm_freelist_print(&(xfs_Gqm-> - qm_dqfreelist), - "after removal"); */ + list_del_init(&dqp->q_freelist); + xfs_Gqm->qm_dqfrlist_cnt--; } } - /* - * grab a reference - */ XFS_DQHOLD(dqp); if (flist_locked) - xfs_qm_freelist_unlock(xfs_Gqm); + mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); /* * move the dquot to the front of the hashchain */ ASSERT(mutex_is_locked(&qh->qh_lock)); - if (dqp->HL_PREVP != &qh->qh_next) { - trace_xfs_dqlookup_move(dqp); - if ((d = dqp->HL_NEXT)) - d->HL_PREVP = dqp->HL_PREVP; - *(dqp->HL_PREVP) = d; - d = qh->qh_next; - d->HL_PREVP = &dqp->HL_NEXT; - dqp->HL_NEXT = d; - dqp->HL_PREVP = &qh->qh_next; - qh->qh_next = dqp; - } + list_move(&dqp->q_hashlist, &qh->qh_list); trace_xfs_dqlookup_done(dqp); *O_dqpp = dqp; - ASSERT(mutex_is_locked(&qh->qh_lock)); - return (0); + return 0; } } @@ -975,16 +956,17 @@ xfs_qm_dqget( */ if (ip) { xfs_ilock(ip, XFS_ILOCK_EXCL); - if (! XFS_IS_DQTYPE_ON(mp, type)) { - /* inode stays locked on return */ - xfs_qm_dqdestroy(dqp); - return XFS_ERROR(ESRCH); - } + /* * A dquot could be attached to this inode by now, since * we had dropped the ilock. */ if (type == XFS_DQ_USER) { + if (!XFS_IS_UQUOTA_ON(mp)) { + /* inode stays locked on return */ + xfs_qm_dqdestroy(dqp); + return XFS_ERROR(ESRCH); + } if (ip->i_udquot) { xfs_qm_dqdestroy(dqp); dqp = ip->i_udquot; @@ -992,6 +974,11 @@ xfs_qm_dqget( goto dqret; } } else { + if (!XFS_IS_OQUOTA_ON(mp)) { + /* inode stays locked on return */ + xfs_qm_dqdestroy(dqp); + return XFS_ERROR(ESRCH); + } if (ip->i_gdquot) { xfs_qm_dqdestroy(dqp); dqp = ip->i_gdquot; @@ -1033,13 +1020,14 @@ xfs_qm_dqget( */ ASSERT(mutex_is_locked(&h->qh_lock)); dqp->q_hash = h; - XQM_HASHLIST_INSERT(h, dqp); + list_add(&dqp->q_hashlist, &h->qh_list); + h->qh_version++; /* * Attach this dquot to this filesystem's list of all dquots, * kept inside the mount structure in m_quotainfo field */ - xfs_qm_mplist_lock(mp); + mutex_lock(&mp->m_quotainfo->qi_dqlist_lock); /* * We return a locked dquot to the caller, with a reference taken @@ -1047,9 +1035,9 @@ xfs_qm_dqget( xfs_dqlock(dqp); dqp->q_nrefs = 1; - XQM_MPLIST_INSERT(&(XFS_QI_MPL_LIST(mp)), dqp); - - xfs_qm_mplist_unlock(mp); + list_add(&dqp->q_mplist, &mp->m_quotainfo->qi_dqlist); + mp->m_quotainfo->qi_dquots++; + mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); mutex_unlock(&h->qh_lock); dqret: ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); @@ -1086,10 +1074,10 @@ xfs_qm_dqput( * drop the dqlock and acquire the freelist and dqlock * in the right order; but try to get it out-of-order first */ - if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) { + if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) { trace_xfs_dqput_wait(dqp); xfs_dqunlock(dqp); - xfs_qm_freelist_lock(xfs_Gqm); + mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); xfs_dqlock(dqp); } @@ -1100,10 +1088,8 @@ xfs_qm_dqput( if (--dqp->q_nrefs == 0) { trace_xfs_dqput_free(dqp); - /* - * insert at end of the freelist. - */ - XQM_FREELIST_INSERT(&(xfs_Gqm->qm_dqfreelist), dqp); + list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist); + xfs_Gqm->qm_dqfrlist_cnt++; /* * If we just added a udquot to the freelist, then @@ -1118,10 +1104,6 @@ xfs_qm_dqput( xfs_dqlock(gdqp); dqp->q_gdquot = NULL; } - - /* xfs_qm_freelist_print(&(xfs_Gqm->qm_dqfreelist), - "@@@@@++ Free list (after append) @@@@@+"); - */ } xfs_dqunlock(dqp); @@ -1133,7 +1115,7 @@ xfs_qm_dqput( break; dqp = gdqp; } - xfs_qm_freelist_unlock(xfs_Gqm); + mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); } /* @@ -1386,10 +1368,10 @@ int xfs_qm_dqpurge( xfs_dquot_t *dqp) { - xfs_dqhash_t *thishash; + xfs_dqhash_t *qh = dqp->q_hash; xfs_mount_t *mp = dqp->q_mount; - ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp)); + ASSERT(mutex_is_locked(&mp->m_quotainfo->qi_dqlist_lock)); ASSERT(mutex_is_locked(&dqp->q_hash->qh_lock)); xfs_dqlock(dqp); @@ -1407,7 +1389,7 @@ xfs_qm_dqpurge( return (1); } - ASSERT(XFS_DQ_IS_ON_FREELIST(dqp)); + ASSERT(!list_empty(&dqp->q_freelist)); /* * If we're turning off quotas, we have to make sure that, for @@ -1452,14 +1434,16 @@ xfs_qm_dqpurge( ASSERT(XFS_FORCED_SHUTDOWN(mp) || !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); - thishash = dqp->q_hash; - XQM_HASHLIST_REMOVE(thishash, dqp); - XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(mp)), dqp); + list_del_init(&dqp->q_hashlist); + qh->qh_version++; + list_del_init(&dqp->q_mplist); + mp->m_quotainfo->qi_dqreclaims++; + mp->m_quotainfo->qi_dquots--; /* * XXX Move this to the front of the freelist, if we can get the * freelist lock. */ - ASSERT(XFS_DQ_IS_ON_FREELIST(dqp)); + ASSERT(!list_empty(&dqp->q_freelist)); dqp->q_mount = NULL; dqp->q_hash = NULL; @@ -1467,7 +1451,7 @@ xfs_qm_dqpurge( memset(&dqp->q_core, 0, sizeof(dqp->q_core)); xfs_dqfunlock(dqp); xfs_dqunlock(dqp); - mutex_unlock(&thishash->qh_lock); + mutex_unlock(&qh->qh_lock); return (0); } @@ -1517,6 +1501,7 @@ void xfs_qm_dqflock_pushbuf_wait( xfs_dquot_t *dqp) { + xfs_mount_t *mp = dqp->q_mount; xfs_buf_t *bp; /* @@ -1525,14 +1510,14 @@ xfs_qm_dqflock_pushbuf_wait( * out immediately. We'll be able to acquire * the flush lock when the I/O completes. */ - bp = xfs_incore(dqp->q_mount->m_ddev_targp, dqp->q_blkno, - XFS_QI_DQCHUNKLEN(dqp->q_mount), XBF_TRYLOCK); + bp = xfs_incore(mp->m_ddev_targp, dqp->q_blkno, + mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); if (!bp) goto out_lock; if (XFS_BUF_ISDELAYWRITE(bp)) { if (XFS_BUF_ISPINNED(bp)) - xfs_log_force(dqp->q_mount, 0); + xfs_log_force(mp, 0); xfs_buf_delwri_promote(bp); wake_up_process(bp->b_target->bt_task); } diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h index a0f7da586d1b..5da3a23b820d 100644 --- a/fs/xfs/quota/xfs_dquot.h +++ b/fs/xfs/quota/xfs_dquot.h @@ -33,40 +33,23 @@ * The hash chain headers (hash buckets) */ typedef struct xfs_dqhash { - struct xfs_dquot *qh_next; + struct list_head qh_list; struct mutex qh_lock; uint qh_version; /* ever increasing version */ uint qh_nelems; /* number of dquots on the list */ } xfs_dqhash_t; -typedef struct xfs_dqlink { - struct xfs_dquot *ql_next; /* forward link */ - struct xfs_dquot **ql_prevp; /* pointer to prev ql_next */ -} xfs_dqlink_t; - struct xfs_mount; struct xfs_trans; /* - * This is the marker which is designed to occupy the first few - * bytes of the xfs_dquot_t structure. Even inside this, the freelist pointers - * must come first. - * This serves as the marker ("sentinel") when we have to restart list - * iterations because of locking considerations. - */ -typedef struct xfs_dqmarker { - struct xfs_dquot*dqm_flnext; /* link to freelist: must be first */ - struct xfs_dquot*dqm_flprev; - xfs_dqlink_t dqm_mplist; /* link to mount's list of dquots */ - xfs_dqlink_t dqm_hashlist; /* link to the hash chain */ - uint dqm_flags; /* various flags (XFS_DQ_*) */ -} xfs_dqmarker_t; - -/* * The incore dquot structure */ typedef struct xfs_dquot { - xfs_dqmarker_t q_lists; /* list ptrs, q_flags (marker) */ + uint dq_flags; /* various flags (XFS_DQ_*) */ + struct list_head q_freelist; /* global free list of dquots */ + struct list_head q_mplist; /* mount's list of dquots */ + struct list_head q_hashlist; /* gloabl hash list of dquots */ xfs_dqhash_t *q_hash; /* the hashchain header */ struct xfs_mount*q_mount; /* filesystem this relates to */ struct xfs_trans*q_transp; /* trans this belongs to currently */ @@ -87,13 +70,6 @@ typedef struct xfs_dquot { wait_queue_head_t q_pinwait; /* dquot pinning wait queue */ } xfs_dquot_t; - -#define dq_flnext q_lists.dqm_flnext -#define dq_flprev q_lists.dqm_flprev -#define dq_mplist q_lists.dqm_mplist -#define dq_hashlist q_lists.dqm_hashlist -#define dq_flags q_lists.dqm_flags - /* * Lock hierarchy for q_qlock: * XFS_QLOCK_NORMAL is the implicit default, @@ -127,7 +103,6 @@ static inline void xfs_dqfunlock(xfs_dquot_t *dqp) } #define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock))) -#define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp)) #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) #define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) #define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ) diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c index 4e4ee9a57194..8d89a24ae324 100644 --- a/fs/xfs/quota/xfs_dquot_item.c +++ b/fs/xfs/quota/xfs_dquot_item.c @@ -107,8 +107,7 @@ xfs_qm_dquot_logitem_pin( /* ARGSUSED */ STATIC void xfs_qm_dquot_logitem_unpin( - xfs_dq_logitem_t *logitem, - int stale) + xfs_dq_logitem_t *logitem) { xfs_dquot_t *dqp = logitem->qli_dquot; @@ -123,7 +122,7 @@ xfs_qm_dquot_logitem_unpin_remove( xfs_dq_logitem_t *logitem, xfs_trans_t *tp) { - xfs_qm_dquot_logitem_unpin(logitem, 0); + xfs_qm_dquot_logitem_unpin(logitem); } /* @@ -228,7 +227,7 @@ xfs_qm_dquot_logitem_pushbuf( } mp = dqp->q_mount; bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno, - XFS_QI_DQCHUNKLEN(mp), XBF_TRYLOCK); + mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); xfs_dqunlock(dqp); if (!bp) return; @@ -329,8 +328,7 @@ static struct xfs_item_ops xfs_dquot_item_ops = { .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) xfs_qm_dquot_logitem_format, .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_pin, - .iop_unpin = (void(*)(xfs_log_item_t*, int)) - xfs_qm_dquot_logitem_unpin, + .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unpin, .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*)) xfs_qm_dquot_logitem_unpin_remove, .iop_trylock = (uint(*)(xfs_log_item_t*)) @@ -357,9 +355,8 @@ xfs_qm_dquot_logitem_init( xfs_dq_logitem_t *lp; lp = &dqp->q_logitem; - lp->qli_item.li_type = XFS_LI_DQUOT; - lp->qli_item.li_ops = &xfs_dquot_item_ops; - lp->qli_item.li_mountp = dqp->q_mount; + xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT, + &xfs_dquot_item_ops); lp->qli_dquot = dqp; lp->qli_format.qlf_type = XFS_LI_DQUOT; lp->qli_format.qlf_id = be32_to_cpu(dqp->q_core.d_id); @@ -426,7 +423,7 @@ xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t *qf) */ /*ARGSUSED*/ STATIC void -xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf, int stale) +xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf) { return; } @@ -537,8 +534,7 @@ static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = { .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) xfs_qm_qoff_logitem_format, .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin, - .iop_unpin = (void(*)(xfs_log_item_t* ,int)) - xfs_qm_qoff_logitem_unpin, + .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unpin, .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*)) xfs_qm_qoff_logitem_unpin_remove, .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock, @@ -559,8 +555,7 @@ static struct xfs_item_ops xfs_qm_qoff_logitem_ops = { .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) xfs_qm_qoff_logitem_format, .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin, - .iop_unpin = (void(*)(xfs_log_item_t*, int)) - xfs_qm_qoff_logitem_unpin, + .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unpin, .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*)) xfs_qm_qoff_logitem_unpin_remove, .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock, @@ -586,11 +581,8 @@ xfs_qm_qoff_logitem_init( qf = (xfs_qoff_logitem_t*) kmem_zalloc(sizeof(xfs_qoff_logitem_t), KM_SLEEP); - qf->qql_item.li_type = XFS_LI_QUOTAOFF; - if (start) - qf->qql_item.li_ops = &xfs_qm_qoffend_logitem_ops; - else - qf->qql_item.li_ops = &xfs_qm_qoff_logitem_ops; + xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ? + &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops); qf->qql_item.li_mountp = mp; qf->qql_format.qf_type = XFS_LI_QUOTAOFF; qf->qql_format.qf_flags = flags; diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 417e61e3d9dd..38e764146644 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -67,9 +67,6 @@ static cred_t xfs_zerocr; STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int); STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); -STATIC void xfs_qm_freelist_init(xfs_frlist_t *); -STATIC void xfs_qm_freelist_destroy(xfs_frlist_t *); - STATIC int xfs_qm_init_quotainos(xfs_mount_t *); STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); STATIC int xfs_qm_shake(int, gfp_t); @@ -84,21 +81,25 @@ extern struct mutex qcheck_lock; #endif #ifdef QUOTADEBUG -#define XQM_LIST_PRINT(l, NXT, title) \ -{ \ - xfs_dquot_t *dqp; int i = 0; \ - cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \ - for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) { \ - cmn_err(CE_DEBUG, " %d. \"%d (%s)\" " \ - "bcnt = %d, icnt = %d, refs = %d", \ - ++i, (int) be32_to_cpu(dqp->q_core.d_id), \ - DQFLAGTO_TYPESTR(dqp), \ - (int) be64_to_cpu(dqp->q_core.d_bcount), \ - (int) be64_to_cpu(dqp->q_core.d_icount), \ - (int) dqp->q_nrefs); } \ +static void +xfs_qm_dquot_list_print( + struct xfs_mount *mp) +{ + xfs_dquot_t *dqp; + int i = 0; + + list_for_each_entry(dqp, &mp->m_quotainfo->qi_dqlist_lock, qi_mplist) { + cmn_err(CE_DEBUG, " %d. \"%d (%s)\" " + "bcnt = %lld, icnt = %lld, refs = %d", + i++, be32_to_cpu(dqp->q_core.d_id), + DQFLAGTO_TYPESTR(dqp), + (long long)be64_to_cpu(dqp->q_core.d_bcount), + (long long)be64_to_cpu(dqp->q_core.d_icount), + dqp->q_nrefs); + } } #else -#define XQM_LIST_PRINT(l, NXT, title) do { } while (0) +static void xfs_qm_dquot_list_print(struct xfs_mount *mp) { } #endif /* @@ -144,7 +145,9 @@ xfs_Gqm_init(void) /* * Freelist of all dquots of all file systems */ - xfs_qm_freelist_init(&(xqm->qm_dqfreelist)); + INIT_LIST_HEAD(&xqm->qm_dqfrlist); + xqm->qm_dqfrlist_cnt = 0; + mutex_init(&xqm->qm_dqfrlist_lock); /* * dquot zone. we register our own low-memory callback. @@ -189,6 +192,7 @@ STATIC void xfs_qm_destroy( struct xfs_qm *xqm) { + struct xfs_dquot *dqp, *n; int hsize, i; ASSERT(xqm != NULL); @@ -204,7 +208,21 @@ xfs_qm_destroy( xqm->qm_usr_dqhtable = NULL; xqm->qm_grp_dqhtable = NULL; xqm->qm_dqhashmask = 0; - xfs_qm_freelist_destroy(&(xqm->qm_dqfreelist)); + + /* frlist cleanup */ + mutex_lock(&xqm->qm_dqfrlist_lock); + list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) { + xfs_dqlock(dqp); +#ifdef QUOTADEBUG + cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp); +#endif + list_del_init(&dqp->q_freelist); + xfs_Gqm->qm_dqfrlist_cnt--; + xfs_dqunlock(dqp); + xfs_qm_dqdestroy(dqp); + } + mutex_unlock(&xqm->qm_dqfrlist_lock); + mutex_destroy(&xqm->qm_dqfrlist_lock); #ifdef DEBUG mutex_destroy(&qcheck_lock); #endif @@ -256,7 +274,7 @@ STATIC void xfs_qm_rele_quotafs_ref( struct xfs_mount *mp) { - xfs_dquot_t *dqp, *nextdqp; + xfs_dquot_t *dqp, *n; ASSERT(xfs_Gqm); ASSERT(xfs_Gqm->qm_nrefs > 0); @@ -264,26 +282,24 @@ xfs_qm_rele_quotafs_ref( /* * Go thru the freelist and destroy all inactive dquots. */ - xfs_qm_freelist_lock(xfs_Gqm); + mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); - for (dqp = xfs_Gqm->qm_dqfreelist.qh_next; - dqp != (xfs_dquot_t *)&(xfs_Gqm->qm_dqfreelist); ) { + list_for_each_entry_safe(dqp, n, &xfs_Gqm->qm_dqfrlist, q_freelist) { xfs_dqlock(dqp); - nextdqp = dqp->dq_flnext; if (dqp->dq_flags & XFS_DQ_INACTIVE) { ASSERT(dqp->q_mount == NULL); ASSERT(! XFS_DQ_IS_DIRTY(dqp)); - ASSERT(dqp->HL_PREVP == NULL); - ASSERT(dqp->MPL_PREVP == NULL); - XQM_FREELIST_REMOVE(dqp); + ASSERT(list_empty(&dqp->q_hashlist)); + ASSERT(list_empty(&dqp->q_mplist)); + list_del_init(&dqp->q_freelist); + xfs_Gqm->qm_dqfrlist_cnt--; xfs_dqunlock(dqp); xfs_qm_dqdestroy(dqp); } else { xfs_dqunlock(dqp); } - dqp = nextdqp; } - xfs_qm_freelist_unlock(xfs_Gqm); + mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); /* * Destroy the entire XQM. If somebody mounts with quotaon, this'll @@ -305,7 +321,7 @@ xfs_qm_unmount( struct xfs_mount *mp) { if (mp->m_quotainfo) { - xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING); + xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); xfs_qm_destroy_quotainfo(mp); } } @@ -449,20 +465,21 @@ xfs_qm_unmount_quotas( */ STATIC int xfs_qm_dqflush_all( - xfs_mount_t *mp, - int sync_mode) + struct xfs_mount *mp, + int sync_mode) { - int recl; - xfs_dquot_t *dqp; - int niters; - int error; + struct xfs_quotainfo *q = mp->m_quotainfo; + int recl; + struct xfs_dquot *dqp; + int niters; + int error; - if (mp->m_quotainfo == NULL) + if (!q) return 0; niters = 0; again: - xfs_qm_mplist_lock(mp); - FOREACH_DQUOT_IN_MP(dqp, mp) { + mutex_lock(&q->qi_dqlist_lock); + list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { xfs_dqlock(dqp); if (! XFS_DQ_IS_DIRTY(dqp)) { xfs_dqunlock(dqp); @@ -470,7 +487,7 @@ again: } /* XXX a sentinel would be better */ - recl = XFS_QI_MPLRECLAIMS(mp); + recl = q->qi_dqreclaims; if (!xfs_dqflock_nowait(dqp)) { /* * If we can't grab the flush lock then check @@ -485,21 +502,21 @@ again: * Let go of the mplist lock. We don't want to hold it * across a disk write. */ - xfs_qm_mplist_unlock(mp); + mutex_unlock(&q->qi_dqlist_lock); error = xfs_qm_dqflush(dqp, sync_mode); xfs_dqunlock(dqp); if (error) return error; - xfs_qm_mplist_lock(mp); - if (recl != XFS_QI_MPLRECLAIMS(mp)) { - xfs_qm_mplist_unlock(mp); + mutex_lock(&q->qi_dqlist_lock); + if (recl != q->qi_dqreclaims) { + mutex_unlock(&q->qi_dqlist_lock); /* XXX restart limit */ goto again; } } - xfs_qm_mplist_unlock(mp); + mutex_unlock(&q->qi_dqlist_lock); /* return ! busy */ return 0; } @@ -509,15 +526,15 @@ again: */ STATIC void xfs_qm_detach_gdquots( - xfs_mount_t *mp) + struct xfs_mount *mp) { - xfs_dquot_t *dqp, *gdqp; - int nrecl; + struct xfs_quotainfo *q = mp->m_quotainfo; + struct xfs_dquot *dqp, *gdqp; + int nrecl; again: - ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp)); - dqp = XFS_QI_MPLNEXT(mp); - while (dqp) { + ASSERT(mutex_is_locked(&q->qi_dqlist_lock)); + list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { xfs_dqlock(dqp); if ((gdqp = dqp->q_gdquot)) { xfs_dqlock(gdqp); @@ -530,15 +547,14 @@ xfs_qm_detach_gdquots( * Can't hold the mplist lock across a dqput. * XXXmust convert to marker based iterations here. */ - nrecl = XFS_QI_MPLRECLAIMS(mp); - xfs_qm_mplist_unlock(mp); + nrecl = q->qi_dqreclaims; + mutex_unlock(&q->qi_dqlist_lock); xfs_qm_dqput(gdqp); - xfs_qm_mplist_lock(mp); - if (nrecl != XFS_QI_MPLRECLAIMS(mp)) + mutex_lock(&q->qi_dqlist_lock); + if (nrecl != q->qi_dqreclaims) goto again; } - dqp = dqp->MPL_NEXT; } } @@ -550,23 +566,23 @@ xfs_qm_detach_gdquots( */ STATIC int xfs_qm_dqpurge_int( - xfs_mount_t *mp, - uint flags) /* QUOTAOFF/UMOUNTING/UQUOTA/PQUOTA/GQUOTA */ + struct xfs_mount *mp, + uint flags) { - xfs_dquot_t *dqp; - uint dqtype; - int nrecl; - xfs_dquot_t *nextdqp; - int nmisses; + struct xfs_quotainfo *q = mp->m_quotainfo; + struct xfs_dquot *dqp, *n; + uint dqtype; + int nrecl; + int nmisses; - if (mp->m_quotainfo == NULL) + if (!q) return 0; dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0; dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0; dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0; - xfs_qm_mplist_lock(mp); + mutex_lock(&q->qi_dqlist_lock); /* * In the first pass through all incore dquots of this filesystem, @@ -578,28 +594,25 @@ xfs_qm_dqpurge_int( again: nmisses = 0; - ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp)); + ASSERT(mutex_is_locked(&q->qi_dqlist_lock)); /* * Try to get rid of all of the unwanted dquots. The idea is to * get them off mplist and hashlist, but leave them on freelist. */ - dqp = XFS_QI_MPLNEXT(mp); - while (dqp) { + list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) { /* * It's OK to look at the type without taking dqlock here. * We're holding the mplist lock here, and that's needed for * a dqreclaim. */ - if ((dqp->dq_flags & dqtype) == 0) { - dqp = dqp->MPL_NEXT; + if ((dqp->dq_flags & dqtype) == 0) continue; - } if (!mutex_trylock(&dqp->q_hash->qh_lock)) { - nrecl = XFS_QI_MPLRECLAIMS(mp); - xfs_qm_mplist_unlock(mp); + nrecl = q->qi_dqreclaims; + mutex_unlock(&q->qi_dqlist_lock); mutex_lock(&dqp->q_hash->qh_lock); - xfs_qm_mplist_lock(mp); + mutex_lock(&q->qi_dqlist_lock); /* * XXXTheoretically, we can get into a very long @@ -607,7 +620,7 @@ xfs_qm_dqpurge_int( * No one can be adding dquots to the mplist at * this point, but somebody might be taking things off. */ - if (nrecl != XFS_QI_MPLRECLAIMS(mp)) { + if (nrecl != q->qi_dqreclaims) { mutex_unlock(&dqp->q_hash->qh_lock); goto again; } @@ -617,11 +630,9 @@ xfs_qm_dqpurge_int( * Take the dquot off the mplist and hashlist. It may remain on * freelist in INACTIVE state. */ - nextdqp = dqp->MPL_NEXT; nmisses += xfs_qm_dqpurge(dqp); - dqp = nextdqp; } - xfs_qm_mplist_unlock(mp); + mutex_unlock(&q->qi_dqlist_lock); return nmisses; } @@ -921,12 +932,13 @@ xfs_qm_dqdetach( int xfs_qm_sync( - xfs_mount_t *mp, - int flags) + struct xfs_mount *mp, + int flags) { - int recl, restarts; - xfs_dquot_t *dqp; - int error; + struct xfs_quotainfo *q = mp->m_quotainfo; + int recl, restarts; + struct xfs_dquot *dqp; + int error; if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) return 0; @@ -934,18 +946,19 @@ xfs_qm_sync( restarts = 0; again: - xfs_qm_mplist_lock(mp); + mutex_lock(&q->qi_dqlist_lock); /* * dqpurge_all() also takes the mplist lock and iterate thru all dquots * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared * when we have the mplist lock, we know that dquots will be consistent * as long as we have it locked. */ - if (! XFS_IS_QUOTA_ON(mp)) { - xfs_qm_mplist_unlock(mp); + if (!XFS_IS_QUOTA_ON(mp)) { + mutex_unlock(&q->qi_dqlist_lock); return 0; } - FOREACH_DQUOT_IN_MP(dqp, mp) { + ASSERT(mutex_is_locked(&q->qi_dqlist_lock)); + list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { /* * If this is vfs_sync calling, then skip the dquots that * don't 'seem' to be dirty. ie. don't acquire dqlock. @@ -969,7 +982,7 @@ xfs_qm_sync( } /* XXX a sentinel would be better */ - recl = XFS_QI_MPLRECLAIMS(mp); + recl = q->qi_dqreclaims; if (!xfs_dqflock_nowait(dqp)) { if (flags & SYNC_TRYLOCK) { xfs_dqunlock(dqp); @@ -989,7 +1002,7 @@ xfs_qm_sync( * Let go of the mplist lock. We don't want to hold it * across a disk write */ - xfs_qm_mplist_unlock(mp); + mutex_unlock(&q->qi_dqlist_lock); error = xfs_qm_dqflush(dqp, flags); xfs_dqunlock(dqp); if (error && XFS_FORCED_SHUTDOWN(mp)) @@ -997,17 +1010,17 @@ xfs_qm_sync( else if (error) return error; - xfs_qm_mplist_lock(mp); - if (recl != XFS_QI_MPLRECLAIMS(mp)) { + mutex_lock(&q->qi_dqlist_lock); + if (recl != q->qi_dqreclaims) { if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS) break; - xfs_qm_mplist_unlock(mp); + mutex_unlock(&q->qi_dqlist_lock); goto again; } } - xfs_qm_mplist_unlock(mp); + mutex_unlock(&q->qi_dqlist_lock); return 0; } @@ -1052,8 +1065,9 @@ xfs_qm_init_quotainfo( return error; } - xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0); - lockdep_set_class(&qinf->qi_dqlist.qh_lock, &xfs_quota_mplist_class); + INIT_LIST_HEAD(&qinf->qi_dqlist); + mutex_init(&qinf->qi_dqlist_lock); + lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class); qinf->qi_dqreclaims = 0; @@ -1150,7 +1164,8 @@ xfs_qm_destroy_quotainfo( */ xfs_qm_rele_quotafs_ref(mp); - xfs_qm_list_destroy(&qi->qi_dqlist); + ASSERT(list_empty(&qi->qi_dqlist)); + mutex_destroy(&qi->qi_dqlist_lock); if (qi->qi_uquotaip) { IRELE(qi->qi_uquotaip); @@ -1177,7 +1192,7 @@ xfs_qm_list_init( int n) { mutex_init(&list->qh_lock); - list->qh_next = NULL; + INIT_LIST_HEAD(&list->qh_list); list->qh_version = 0; list->qh_nelems = 0; } @@ -1316,9 +1331,6 @@ xfs_qm_qino_alloc( */ spin_lock(&mp->m_sb_lock); if (flags & XFS_QMOPT_SBVERSION) { -#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) - unsigned oldv = mp->m_sb.sb_versionnum; -#endif ASSERT(!xfs_sb_version_hasquota(&mp->m_sb)); ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) == @@ -1331,11 +1343,6 @@ xfs_qm_qino_alloc( /* qflags will get updated _after_ quotacheck */ mp->m_sb.sb_qflags = 0; -#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) - cmn_err(CE_NOTE, - "Old superblock version %x, converting to %x.", - oldv, mp->m_sb.sb_versionnum); -#endif } if (flags & XFS_QMOPT_UQUOTA) mp->m_sb.sb_uquotino = (*ip)->i_ino; @@ -1371,10 +1378,10 @@ xfs_qm_reset_dqcounts( #ifdef DEBUG j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); do_div(j, sizeof(xfs_dqblk_t)); - ASSERT(XFS_QM_DQPERBLK(mp) == j); + ASSERT(mp->m_quotainfo->qi_dqperchunk == j); #endif ddq = (xfs_disk_dquot_t *)XFS_BUF_PTR(bp); - for (j = 0; j < XFS_QM_DQPERBLK(mp); j++) { + for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { /* * Do a sanity check, and if needed, repair the dqblk. Don't * output any warnings because it's perfectly possible to @@ -1429,7 +1436,7 @@ xfs_qm_dqiter_bufs( while (blkcnt--) { error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, bno), - (int)XFS_QI_DQCHUNKLEN(mp), 0, &bp); + mp->m_quotainfo->qi_dqchunklen, 0, &bp); if (error) break; @@ -1439,7 +1446,7 @@ xfs_qm_dqiter_bufs( * goto the next block. */ bno++; - firstid += XFS_QM_DQPERBLK(mp); + firstid += mp->m_quotainfo->qi_dqperchunk; } return error; } @@ -1505,7 +1512,7 @@ xfs_qm_dqiterate( continue; firstid = (xfs_dqid_t) map[i].br_startoff * - XFS_QM_DQPERBLK(mp); + mp->m_quotainfo->qi_dqperchunk; /* * Do a read-ahead on the next extent. */ @@ -1516,7 +1523,7 @@ xfs_qm_dqiterate( while (rablkcnt--) { xfs_baread(mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, rablkno), - (int)XFS_QI_DQCHUNKLEN(mp)); + mp->m_quotainfo->qi_dqchunklen); rablkno++; } } @@ -1576,8 +1583,10 @@ xfs_qm_quotacheck_dqadjust( /* * Set default limits, adjust timers (since we changed usages) + * + * There are no timers for the default values set in the root dquot. */ - if (! XFS_IS_SUSER_DQUOT(dqp)) { + if (dqp->q_core.d_id) { xfs_qm_adjust_dqlimits(dqp->q_mount, &dqp->q_core); xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core); } @@ -1747,14 +1756,14 @@ xfs_qm_quotacheck( lastino = 0; flags = 0; - ASSERT(XFS_QI_UQIP(mp) || XFS_QI_GQIP(mp)); + ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip); ASSERT(XFS_IS_QUOTA_RUNNING(mp)); /* * There should be no cached dquots. The (simplistic) quotacheck * algorithm doesn't like that. */ - ASSERT(XFS_QI_MPLNDQUOTS(mp) == 0); + ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist)); cmn_err(CE_NOTE, "XFS quotacheck %s: Please wait.", mp->m_fsname); @@ -1763,15 +1772,19 @@ xfs_qm_quotacheck( * their counters to zero. We need a clean slate. * We don't log our changes till later. */ - if ((uip = XFS_QI_UQIP(mp))) { - if ((error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA))) + uip = mp->m_quotainfo->qi_uquotaip; + if (uip) { + error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA); + if (error) goto error_return; flags |= XFS_UQUOTA_CHKD; } - if ((gip = XFS_QI_GQIP(mp))) { - if ((error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ? - XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA))) + gip = mp->m_quotainfo->qi_gquotaip; + if (gip) { + error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ? + XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA); + if (error) goto error_return; flags |= XFS_OQUOTA_CHKD; } @@ -1804,7 +1817,7 @@ xfs_qm_quotacheck( * at this point (because we intentionally didn't in dqget_noattach). */ if (error) { - xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_QUOTAOFF); + xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); goto error_return; } @@ -1825,7 +1838,7 @@ xfs_qm_quotacheck( mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD); mp->m_qflags |= flags; - XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp)), MPL_NEXT, "++++ Mp list +++"); + xfs_qm_dquot_list_print(mp); error_return: if (error) { @@ -1920,59 +1933,53 @@ xfs_qm_init_quotainos( } } - XFS_QI_UQIP(mp) = uip; - XFS_QI_GQIP(mp) = gip; + mp->m_quotainfo->qi_uquotaip = uip; + mp->m_quotainfo->qi_gquotaip = gip; return 0; } + /* - * Traverse the freelist of dquots and attempt to reclaim a maximum of - * 'howmany' dquots. This operation races with dqlookup(), and attempts to - * favor the lookup function ... - * XXXsup merge this with qm_reclaim_one(). + * Just pop the least recently used dquot off the freelist and + * recycle it. The returned dquot is locked. */ -STATIC int -xfs_qm_shake_freelist( - int howmany) +STATIC xfs_dquot_t * +xfs_qm_dqreclaim_one(void) { - int nreclaimed; - xfs_dqhash_t *hash; - xfs_dquot_t *dqp, *nextdqp; + xfs_dquot_t *dqpout; + xfs_dquot_t *dqp; int restarts; - int nflushes; - - if (howmany <= 0) - return 0; - nreclaimed = 0; restarts = 0; - nflushes = 0; + dqpout = NULL; -#ifdef QUOTADEBUG - cmn_err(CE_DEBUG, "Shake free 0x%x", howmany); -#endif - /* lock order is : hashchainlock, freelistlock, mplistlock */ - tryagain: - xfs_qm_freelist_lock(xfs_Gqm); + /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */ +startagain: + mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); - for (dqp = xfs_Gqm->qm_dqfreelist.qh_next; - ((dqp != (xfs_dquot_t *) &xfs_Gqm->qm_dqfreelist) && - nreclaimed < howmany); ) { + list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) { + struct xfs_mount *mp = dqp->q_mount; xfs_dqlock(dqp); /* * We are racing with dqlookup here. Naturally we don't - * want to reclaim a dquot that lookup wants. + * want to reclaim a dquot that lookup wants. We release the + * freelist lock and start over, so that lookup will grab + * both the dquot and the freelistlock. */ if (dqp->dq_flags & XFS_DQ_WANT) { + ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE)); + + trace_xfs_dqreclaim_want(dqp); + xfs_dqunlock(dqp); - xfs_qm_freelist_unlock(xfs_Gqm); + mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) - return nreclaimed; + return NULL; XQM_STATS_INC(xqmstats.xs_qm_dqwants); - goto tryagain; + goto startagain; } /* @@ -1981,23 +1988,27 @@ xfs_qm_shake_freelist( * life easier. */ if (dqp->dq_flags & XFS_DQ_INACTIVE) { - ASSERT(dqp->q_mount == NULL); + ASSERT(mp == NULL); ASSERT(! XFS_DQ_IS_DIRTY(dqp)); - ASSERT(dqp->HL_PREVP == NULL); - ASSERT(dqp->MPL_PREVP == NULL); + ASSERT(list_empty(&dqp->q_hashlist)); + ASSERT(list_empty(&dqp->q_mplist)); + list_del_init(&dqp->q_freelist); + xfs_Gqm->qm_dqfrlist_cnt--; + xfs_dqunlock(dqp); + dqpout = dqp; XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims); - nextdqp = dqp->dq_flnext; - goto off_freelist; + break; } - ASSERT(dqp->MPL_PREVP); + ASSERT(dqp->q_hash); + ASSERT(!list_empty(&dqp->q_mplist)); + /* * Try to grab the flush lock. If this dquot is in the process of * getting flushed to disk, we don't want to reclaim it. */ if (!xfs_dqflock_nowait(dqp)) { xfs_dqunlock(dqp); - dqp = dqp->dq_flnext; continue; } @@ -2010,21 +2021,21 @@ xfs_qm_shake_freelist( if (XFS_DQ_IS_DIRTY(dqp)) { int error; - trace_xfs_dqshake_dirty(dqp); + trace_xfs_dqreclaim_dirty(dqp); /* * We flush it delayed write, so don't bother - * releasing the mplock. + * releasing the freelist lock. */ error = xfs_qm_dqflush(dqp, 0); if (error) { - xfs_fs_cmn_err(CE_WARN, dqp->q_mount, - "xfs_qm_dqflush_all: dquot %p flush failed", dqp); + xfs_fs_cmn_err(CE_WARN, mp, + "xfs_qm_dqreclaim: dquot %p flush failed", dqp); } xfs_dqunlock(dqp); /* dqflush unlocks dqflock */ - dqp = dqp->dq_flnext; continue; } + /* * We're trying to get the hashlock out of order. This races * with dqlookup; so, we giveup and goto the next dquot if @@ -2033,56 +2044,74 @@ xfs_qm_shake_freelist( * waiting for the freelist lock. */ if (!mutex_trylock(&dqp->q_hash->qh_lock)) { - xfs_dqfunlock(dqp); - xfs_dqunlock(dqp); - dqp = dqp->dq_flnext; - continue; + restarts++; + goto dqfunlock; } + /* * This races with dquot allocation code as well as dqflush_all * and reclaim code. So, if we failed to grab the mplist lock, * giveup everything and start over. */ - hash = dqp->q_hash; - ASSERT(hash); - if (! xfs_qm_mplist_nowait(dqp->q_mount)) { - /* XXX put a sentinel so that we can come back here */ + if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) { + restarts++; + mutex_unlock(&dqp->q_hash->qh_lock); xfs_dqfunlock(dqp); xfs_dqunlock(dqp); - mutex_unlock(&hash->qh_lock); - xfs_qm_freelist_unlock(xfs_Gqm); - if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) - return nreclaimed; - goto tryagain; + mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); + if (restarts++ >= XFS_QM_RECLAIM_MAX_RESTARTS) + return NULL; + goto startagain; } - trace_xfs_dqshake_unlink(dqp); - -#ifdef QUOTADEBUG - cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n", - dqp, be32_to_cpu(dqp->q_core.d_id)); -#endif ASSERT(dqp->q_nrefs == 0); - nextdqp = dqp->dq_flnext; - XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp); - XQM_HASHLIST_REMOVE(hash, dqp); + list_del_init(&dqp->q_mplist); + mp->m_quotainfo->qi_dquots--; + mp->m_quotainfo->qi_dqreclaims++; + list_del_init(&dqp->q_hashlist); + dqp->q_hash->qh_version++; + list_del_init(&dqp->q_freelist); + xfs_Gqm->qm_dqfrlist_cnt--; + dqpout = dqp; + mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); + mutex_unlock(&dqp->q_hash->qh_lock); +dqfunlock: xfs_dqfunlock(dqp); - xfs_qm_mplist_unlock(dqp->q_mount); - mutex_unlock(&hash->qh_lock); - - off_freelist: - XQM_FREELIST_REMOVE(dqp); xfs_dqunlock(dqp); - nreclaimed++; - XQM_STATS_INC(xqmstats.xs_qm_dqshake_reclaims); + if (dqpout) + break; + if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) + return NULL; + } + mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); + return dqpout; +} + +/* + * Traverse the freelist of dquots and attempt to reclaim a maximum of + * 'howmany' dquots. This operation races with dqlookup(), and attempts to + * favor the lookup function ... + */ +STATIC int +xfs_qm_shake_freelist( + int howmany) +{ + int nreclaimed = 0; + xfs_dquot_t *dqp; + + if (howmany <= 0) + return 0; + + while (nreclaimed < howmany) { + dqp = xfs_qm_dqreclaim_one(); + if (!dqp) + return nreclaimed; xfs_qm_dqdestroy(dqp); - dqp = nextdqp; + nreclaimed++; } - xfs_qm_freelist_unlock(xfs_Gqm); return nreclaimed; } - /* * The kmem_shake interface is invoked when memory is running low. */ @@ -2097,7 +2126,7 @@ xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask) if (!xfs_Gqm) return 0; - nfree = xfs_Gqm->qm_dqfreelist.qh_nelems; /* free dquots */ + nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */ /* incore dquots in all f/s's */ ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree; @@ -2113,131 +2142,6 @@ xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask) } -/* - * Just pop the least recently used dquot off the freelist and - * recycle it. The returned dquot is locked. - */ -STATIC xfs_dquot_t * -xfs_qm_dqreclaim_one(void) -{ - xfs_dquot_t *dqpout; - xfs_dquot_t *dqp; - int restarts; - int nflushes; - - restarts = 0; - dqpout = NULL; - nflushes = 0; - - /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */ - startagain: - xfs_qm_freelist_lock(xfs_Gqm); - - FOREACH_DQUOT_IN_FREELIST(dqp, &(xfs_Gqm->qm_dqfreelist)) { - xfs_dqlock(dqp); - - /* - * We are racing with dqlookup here. Naturally we don't - * want to reclaim a dquot that lookup wants. We release the - * freelist lock and start over, so that lookup will grab - * both the dquot and the freelistlock. - */ - if (dqp->dq_flags & XFS_DQ_WANT) { - ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE)); - - trace_xfs_dqreclaim_want(dqp); - - xfs_dqunlock(dqp); - xfs_qm_freelist_unlock(xfs_Gqm); - if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) - return NULL; - XQM_STATS_INC(xqmstats.xs_qm_dqwants); - goto startagain; - } - - /* - * If the dquot is inactive, we are assured that it is - * not on the mplist or the hashlist, and that makes our - * life easier. - */ - if (dqp->dq_flags & XFS_DQ_INACTIVE) { - ASSERT(dqp->q_mount == NULL); - ASSERT(! XFS_DQ_IS_DIRTY(dqp)); - ASSERT(dqp->HL_PREVP == NULL); - ASSERT(dqp->MPL_PREVP == NULL); - XQM_FREELIST_REMOVE(dqp); - xfs_dqunlock(dqp); - dqpout = dqp; - XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims); - break; - } - - ASSERT(dqp->q_hash); - ASSERT(dqp->MPL_PREVP); - - /* - * Try to grab the flush lock. If this dquot is in the process of - * getting flushed to disk, we don't want to reclaim it. - */ - if (!xfs_dqflock_nowait(dqp)) { - xfs_dqunlock(dqp); - continue; - } - - /* - * We have the flush lock so we know that this is not in the - * process of being flushed. So, if this is dirty, flush it - * DELWRI so that we don't get a freelist infested with - * dirty dquots. - */ - if (XFS_DQ_IS_DIRTY(dqp)) { - int error; - - trace_xfs_dqreclaim_dirty(dqp); - - /* - * We flush it delayed write, so don't bother - * releasing the freelist lock. - */ - error = xfs_qm_dqflush(dqp, 0); - if (error) { - xfs_fs_cmn_err(CE_WARN, dqp->q_mount, - "xfs_qm_dqreclaim: dquot %p flush failed", dqp); - } - xfs_dqunlock(dqp); /* dqflush unlocks dqflock */ - continue; - } - - if (! xfs_qm_mplist_nowait(dqp->q_mount)) { - xfs_dqfunlock(dqp); - xfs_dqunlock(dqp); - continue; - } - - if (!mutex_trylock(&dqp->q_hash->qh_lock)) - goto mplistunlock; - - trace_xfs_dqreclaim_unlink(dqp); - - ASSERT(dqp->q_nrefs == 0); - XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp); - XQM_HASHLIST_REMOVE(dqp->q_hash, dqp); - XQM_FREELIST_REMOVE(dqp); - dqpout = dqp; - mutex_unlock(&dqp->q_hash->qh_lock); - mplistunlock: - xfs_qm_mplist_unlock(dqp->q_mount); - xfs_dqfunlock(dqp); - xfs_dqunlock(dqp); - if (dqpout) - break; - } - - xfs_qm_freelist_unlock(xfs_Gqm); - return dqpout; -} - - /*------------------------------------------------------------------*/ /* @@ -2662,66 +2566,3 @@ xfs_qm_vop_create_dqattach( } } -/* ------------- list stuff -----------------*/ -STATIC void -xfs_qm_freelist_init(xfs_frlist_t *ql) -{ - ql->qh_next = ql->qh_prev = (xfs_dquot_t *) ql; - mutex_init(&ql->qh_lock); - ql->qh_version = 0; - ql->qh_nelems = 0; -} - -STATIC void -xfs_qm_freelist_destroy(xfs_frlist_t *ql) -{ - xfs_dquot_t *dqp, *nextdqp; - - mutex_lock(&ql->qh_lock); - for (dqp = ql->qh_next; - dqp != (xfs_dquot_t *)ql; ) { - xfs_dqlock(dqp); - nextdqp = dqp->dq_flnext; -#ifdef QUOTADEBUG - cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp); -#endif - XQM_FREELIST_REMOVE(dqp); - xfs_dqunlock(dqp); - xfs_qm_dqdestroy(dqp); - dqp = nextdqp; - } - mutex_unlock(&ql->qh_lock); - mutex_destroy(&ql->qh_lock); - - ASSERT(ql->qh_nelems == 0); -} - -STATIC void -xfs_qm_freelist_insert(xfs_frlist_t *ql, xfs_dquot_t *dq) -{ - dq->dq_flnext = ql->qh_next; - dq->dq_flprev = (xfs_dquot_t *)ql; - ql->qh_next = dq; - dq->dq_flnext->dq_flprev = dq; - xfs_Gqm->qm_dqfreelist.qh_nelems++; - xfs_Gqm->qm_dqfreelist.qh_version++; -} - -void -xfs_qm_freelist_unlink(xfs_dquot_t *dq) -{ - xfs_dquot_t *next = dq->dq_flnext; - xfs_dquot_t *prev = dq->dq_flprev; - - next->dq_flprev = prev; - prev->dq_flnext = next; - dq->dq_flnext = dq->dq_flprev = dq; - xfs_Gqm->qm_dqfreelist.qh_nelems--; - xfs_Gqm->qm_dqfreelist.qh_version++; -} - -void -xfs_qm_freelist_append(xfs_frlist_t *ql, xfs_dquot_t *dq) -{ - xfs_qm_freelist_insert((xfs_frlist_t *)ql->qh_prev, dq); -} diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h index 495564b8af38..c9446f1c726d 100644 --- a/fs/xfs/quota/xfs_qm.h +++ b/fs/xfs/quota/xfs_qm.h @@ -72,17 +72,6 @@ extern kmem_zone_t *qm_dqtrxzone; #define XFS_QM_MAX_DQCLUSTER_LOGSZ 3 typedef xfs_dqhash_t xfs_dqlist_t; -/* - * The freelist head. The first two fields match the first two in the - * xfs_dquot_t structure (in xfs_dqmarker_t) - */ -typedef struct xfs_frlist { - struct xfs_dquot *qh_next; - struct xfs_dquot *qh_prev; - struct mutex qh_lock; - uint qh_version; - uint qh_nelems; -} xfs_frlist_t; /* * Quota Manager (global) structure. Lives only in core. @@ -91,7 +80,9 @@ typedef struct xfs_qm { xfs_dqlist_t *qm_usr_dqhtable;/* udquot hash table */ xfs_dqlist_t *qm_grp_dqhtable;/* gdquot hash table */ uint qm_dqhashmask; /* # buckets in dq hashtab - 1 */ - xfs_frlist_t qm_dqfreelist; /* freelist of dquots */ + struct list_head qm_dqfrlist; /* freelist of dquots */ + struct mutex qm_dqfrlist_lock; + int qm_dqfrlist_cnt; atomic_t qm_totaldquots; /* total incore dquots */ uint qm_nrefs; /* file systems with quota on */ int qm_dqfree_ratio;/* ratio of free to inuse dquots */ @@ -106,7 +97,9 @@ typedef struct xfs_qm { typedef struct xfs_quotainfo { xfs_inode_t *qi_uquotaip; /* user quota inode */ xfs_inode_t *qi_gquotaip; /* group quota inode */ - xfs_dqlist_t qi_dqlist; /* all dquots in filesys */ + struct list_head qi_dqlist; /* all dquots in filesys */ + struct mutex qi_dqlist_lock; + int qi_dquots; int qi_dqreclaims; /* a change here indicates a removal in the dqlist */ time_t qi_btimelimit; /* limit for blks timer */ @@ -175,10 +168,6 @@ extern int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *); extern int xfs_qm_scall_quotaon(xfs_mount_t *, uint); extern int xfs_qm_scall_quotaoff(xfs_mount_t *, uint); -/* list stuff */ -extern void xfs_qm_freelist_append(xfs_frlist_t *, xfs_dquot_t *); -extern void xfs_qm_freelist_unlink(xfs_dquot_t *); - #ifdef DEBUG extern int xfs_qm_internalqcheck(xfs_mount_t *); #else diff --git a/fs/xfs/quota/xfs_qm_stats.c b/fs/xfs/quota/xfs_qm_stats.c index 83e7ea3e25fa..3d1fc79532e2 100644 --- a/fs/xfs/quota/xfs_qm_stats.c +++ b/fs/xfs/quota/xfs_qm_stats.c @@ -55,7 +55,7 @@ static int xqm_proc_show(struct seq_file *m, void *v) ndquot, xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0, xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0, - xfs_Gqm? xfs_Gqm->qm_dqfreelist.qh_nelems : 0); + xfs_Gqm? xfs_Gqm->qm_dqfrlist_cnt : 0); return 0; } diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index 5d0ee8d492db..92b002f1805f 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c @@ -79,6 +79,7 @@ xfs_qm_scall_quotaoff( xfs_mount_t *mp, uint flags) { + struct xfs_quotainfo *q = mp->m_quotainfo; uint dqtype; int error; uint inactivate_flags; @@ -102,11 +103,8 @@ xfs_qm_scall_quotaoff( * critical thing. * If quotaoff, then we must be dealing with the root filesystem. */ - ASSERT(mp->m_quotainfo); - if (mp->m_quotainfo) - mutex_lock(&(XFS_QI_QOFFLOCK(mp))); - - ASSERT(mp->m_quotainfo); + ASSERT(q); + mutex_lock(&q->qi_quotaofflock); /* * If we're just turning off quota enforcement, change mp and go. @@ -117,7 +115,7 @@ xfs_qm_scall_quotaoff( spin_lock(&mp->m_sb_lock); mp->m_sb.sb_qflags = mp->m_qflags; spin_unlock(&mp->m_sb_lock); - mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + mutex_unlock(&q->qi_quotaofflock); /* XXX what to do if error ? Revert back to old vals incore ? */ error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS); @@ -150,10 +148,8 @@ xfs_qm_scall_quotaoff( * Nothing to do? Don't complain. This happens when we're just * turning off quota enforcement. */ - if ((mp->m_qflags & flags) == 0) { - mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); - return (0); - } + if ((mp->m_qflags & flags) == 0) + goto out_unlock; /* * Write the LI_QUOTAOFF log record, and do SB changes atomically, @@ -162,7 +158,7 @@ xfs_qm_scall_quotaoff( */ error = xfs_qm_log_quotaoff(mp, &qoffstart, flags); if (error) - goto out_error; + goto out_unlock; /* * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct @@ -204,7 +200,7 @@ xfs_qm_scall_quotaoff( * So, if we couldn't purge all the dquots from the filesystem, * we can't get rid of the incore data structures. */ - while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype|XFS_QMOPT_QUOTAOFF))) + while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype))) delay(10 * nculprits); /* @@ -222,7 +218,7 @@ xfs_qm_scall_quotaoff( if (error) { /* We're screwed now. Shutdown is the only option. */ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); - goto out_error; + goto out_unlock; } /* @@ -230,27 +226,26 @@ xfs_qm_scall_quotaoff( */ if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) || ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) { - mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + mutex_unlock(&q->qi_quotaofflock); xfs_qm_destroy_quotainfo(mp); return (0); } /* - * Release our quotainode references, and vn_purge them, - * if we don't need them anymore. + * Release our quotainode references if we don't need them anymore. */ - if ((dqtype & XFS_QMOPT_UQUOTA) && XFS_QI_UQIP(mp)) { - IRELE(XFS_QI_UQIP(mp)); - XFS_QI_UQIP(mp) = NULL; + if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) { + IRELE(q->qi_uquotaip); + q->qi_uquotaip = NULL; } - if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && XFS_QI_GQIP(mp)) { - IRELE(XFS_QI_GQIP(mp)); - XFS_QI_GQIP(mp) = NULL; + if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) { + IRELE(q->qi_gquotaip); + q->qi_gquotaip = NULL; } -out_error: - mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); - return (error); +out_unlock: + mutex_unlock(&q->qi_quotaofflock); + return error; } int @@ -379,9 +374,9 @@ xfs_qm_scall_quotaon( /* * Switch on quota enforcement in core. */ - mutex_lock(&(XFS_QI_QOFFLOCK(mp))); + mutex_lock(&mp->m_quotainfo->qi_quotaofflock); mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD); - mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + mutex_unlock(&mp->m_quotainfo->qi_quotaofflock); return (0); } @@ -392,11 +387,12 @@ xfs_qm_scall_quotaon( */ int xfs_qm_scall_getqstat( - xfs_mount_t *mp, - fs_quota_stat_t *out) + struct xfs_mount *mp, + struct fs_quota_stat *out) { - xfs_inode_t *uip, *gip; - boolean_t tempuqip, tempgqip; + struct xfs_quotainfo *q = mp->m_quotainfo; + struct xfs_inode *uip, *gip; + boolean_t tempuqip, tempgqip; uip = gip = NULL; tempuqip = tempgqip = B_FALSE; @@ -415,9 +411,9 @@ xfs_qm_scall_getqstat( out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino; out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; - if (mp->m_quotainfo) { - uip = mp->m_quotainfo->qi_uquotaip; - gip = mp->m_quotainfo->qi_gquotaip; + if (q) { + uip = q->qi_uquotaip; + gip = q->qi_gquotaip; } if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, @@ -441,17 +437,20 @@ xfs_qm_scall_getqstat( if (tempgqip) IRELE(gip); } - if (mp->m_quotainfo) { - out->qs_incoredqs = XFS_QI_MPLNDQUOTS(mp); - out->qs_btimelimit = XFS_QI_BTIMELIMIT(mp); - out->qs_itimelimit = XFS_QI_ITIMELIMIT(mp); - out->qs_rtbtimelimit = XFS_QI_RTBTIMELIMIT(mp); - out->qs_bwarnlimit = XFS_QI_BWARNLIMIT(mp); - out->qs_iwarnlimit = XFS_QI_IWARNLIMIT(mp); + if (q) { + out->qs_incoredqs = q->qi_dquots; + out->qs_btimelimit = q->qi_btimelimit; + out->qs_itimelimit = q->qi_itimelimit; + out->qs_rtbtimelimit = q->qi_rtbtimelimit; + out->qs_bwarnlimit = q->qi_bwarnlimit; + out->qs_iwarnlimit = q->qi_iwarnlimit; } - return (0); + return 0; } +#define XFS_DQ_MASK \ + (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK) + /* * Adjust quota limits, and start/stop timers accordingly. */ @@ -462,15 +461,17 @@ xfs_qm_scall_setqlim( uint type, fs_disk_quota_t *newlim) { + struct xfs_quotainfo *q = mp->m_quotainfo; xfs_disk_dquot_t *ddq; xfs_dquot_t *dqp; xfs_trans_t *tp; int error; xfs_qcnt_t hard, soft; - if ((newlim->d_fieldmask & - (FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK|FS_DQ_WARNS_MASK)) == 0) - return (0); + if (newlim->d_fieldmask & ~XFS_DQ_MASK) + return EINVAL; + if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) + return 0; tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128, @@ -485,7 +486,7 @@ xfs_qm_scall_setqlim( * a quotaoff from happening). (XXXThis doesn't currently happen * because we take the vfslock before calling xfs_qm_sysent). */ - mutex_lock(&(XFS_QI_QOFFLOCK(mp))); + mutex_lock(&q->qi_quotaofflock); /* * Get the dquot (locked), and join it to the transaction. @@ -493,9 +494,8 @@ xfs_qm_scall_setqlim( */ if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) { xfs_trans_cancel(tp, XFS_TRANS_ABORT); - mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); ASSERT(error != ENOENT); - return (error); + goto out_unlock; } xfs_trans_dqjoin(tp, dqp); ddq = &dqp->q_core; @@ -513,8 +513,8 @@ xfs_qm_scall_setqlim( ddq->d_blk_hardlimit = cpu_to_be64(hard); ddq->d_blk_softlimit = cpu_to_be64(soft); if (id == 0) { - mp->m_quotainfo->qi_bhardlimit = hard; - mp->m_quotainfo->qi_bsoftlimit = soft; + q->qi_bhardlimit = hard; + q->qi_bsoftlimit = soft; } } else { qdprintk("blkhard %Ld < blksoft %Ld\n", hard, soft); @@ -529,8 +529,8 @@ xfs_qm_scall_setqlim( ddq->d_rtb_hardlimit = cpu_to_be64(hard); ddq->d_rtb_softlimit = cpu_to_be64(soft); if (id == 0) { - mp->m_quotainfo->qi_rtbhardlimit = hard; - mp->m_quotainfo->qi_rtbsoftlimit = soft; + q->qi_rtbhardlimit = hard; + q->qi_rtbsoftlimit = soft; } } else { qdprintk("rtbhard %Ld < rtbsoft %Ld\n", hard, soft); @@ -546,8 +546,8 @@ xfs_qm_scall_setqlim( ddq->d_ino_hardlimit = cpu_to_be64(hard); ddq->d_ino_softlimit = cpu_to_be64(soft); if (id == 0) { - mp->m_quotainfo->qi_ihardlimit = hard; - mp->m_quotainfo->qi_isoftlimit = soft; + q->qi_ihardlimit = hard; + q->qi_isoftlimit = soft; } } else { qdprintk("ihard %Ld < isoft %Ld\n", hard, soft); @@ -572,23 +572,23 @@ xfs_qm_scall_setqlim( * for warnings. */ if (newlim->d_fieldmask & FS_DQ_BTIMER) { - mp->m_quotainfo->qi_btimelimit = newlim->d_btimer; + q->qi_btimelimit = newlim->d_btimer; ddq->d_btimer = cpu_to_be32(newlim->d_btimer); } if (newlim->d_fieldmask & FS_DQ_ITIMER) { - mp->m_quotainfo->qi_itimelimit = newlim->d_itimer; + q->qi_itimelimit = newlim->d_itimer; ddq->d_itimer = cpu_to_be32(newlim->d_itimer); } if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { - mp->m_quotainfo->qi_rtbtimelimit = newlim->d_rtbtimer; + q->qi_rtbtimelimit = newlim->d_rtbtimer; ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); } if (newlim->d_fieldmask & FS_DQ_BWARNS) - mp->m_quotainfo->qi_bwarnlimit = newlim->d_bwarns; + q->qi_bwarnlimit = newlim->d_bwarns; if (newlim->d_fieldmask & FS_DQ_IWARNS) - mp->m_quotainfo->qi_iwarnlimit = newlim->d_iwarns; + q->qi_iwarnlimit = newlim->d_iwarns; if (newlim->d_fieldmask & FS_DQ_RTBWARNS) - mp->m_quotainfo->qi_rtbwarnlimit = newlim->d_rtbwarns; + q->qi_rtbwarnlimit = newlim->d_rtbwarns; } else { /* * If the user is now over quota, start the timelimit. @@ -605,8 +605,9 @@ xfs_qm_scall_setqlim( error = xfs_trans_commit(tp, 0); xfs_qm_dqprint(dqp); xfs_qm_dqrele(dqp); - mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); + out_unlock: + mutex_unlock(&q->qi_quotaofflock); return error; } @@ -853,7 +854,8 @@ xfs_dqrele_inode( int error; /* skip quota inodes */ - if (ip == XFS_QI_UQIP(ip->i_mount) || ip == XFS_QI_GQIP(ip->i_mount)) { + if (ip == ip->i_mount->m_quotainfo->qi_uquotaip || + ip == ip->i_mount->m_quotainfo->qi_gquotaip) { ASSERT(ip->i_udquot == NULL); ASSERT(ip->i_gdquot == NULL); read_unlock(&pag->pag_ici_lock); @@ -891,7 +893,8 @@ xfs_qm_dqrele_all_inodes( uint flags) { ASSERT(mp->m_quotainfo); - xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG, 0); + xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, + XFS_ICI_NO_TAG, 0, NULL); } /*------------------------------------------------------------------------*/ @@ -930,7 +933,8 @@ struct mutex qcheck_lock; } typedef struct dqtest { - xfs_dqmarker_t q_lists; + uint dq_flags; /* various flags (XFS_DQ_*) */ + struct list_head q_hashlist; xfs_dqhash_t *q_hash; /* the hashchain header */ xfs_mount_t *q_mount; /* filesystem this relates to */ xfs_dqid_t d_id; /* user id or group id */ @@ -941,14 +945,9 @@ typedef struct dqtest { STATIC void xfs_qm_hashinsert(xfs_dqhash_t *h, xfs_dqtest_t *dqp) { - xfs_dquot_t *d; - if (((d) = (h)->qh_next)) - (d)->HL_PREVP = &((dqp)->HL_NEXT); - (dqp)->HL_NEXT = d; - (dqp)->HL_PREVP = &((h)->qh_next); - (h)->qh_next = (xfs_dquot_t *)dqp; - (h)->qh_version++; - (h)->qh_nelems++; + list_add(&dqp->q_hashlist, &h->qh_list); + h->qh_version++; + h->qh_nelems++; } STATIC void xfs_qm_dqtest_print( @@ -1060,9 +1059,7 @@ xfs_qm_internalqcheck_dqget( xfs_dqhash_t *h; h = DQTEST_HASH(mp, id, type); - for (d = (xfs_dqtest_t *) h->qh_next; d != NULL; - d = (xfs_dqtest_t *) d->HL_NEXT) { - /* DQTEST_LIST_PRINT(h, HL_NEXT, "@@@@@ dqtestlist @@@@@"); */ + list_for_each_entry(d, &h->qh_list, q_hashlist) { if (d->d_id == id && mp == d->q_mount) { *O_dq = d; return (0); @@ -1073,6 +1070,7 @@ xfs_qm_internalqcheck_dqget( d->d_id = id; d->q_mount = mp; d->q_hash = h; + INIT_LIST_HEAD(&d->q_hashlist); xfs_qm_hashinsert(h, d); *O_dq = d; return (0); @@ -1179,8 +1177,6 @@ xfs_qm_internalqcheck( xfs_ino_t lastino; int done, count; int i; - xfs_dqtest_t *d, *e; - xfs_dqhash_t *h1; int error; lastino = 0; @@ -1220,19 +1216,18 @@ xfs_qm_internalqcheck( } cmn_err(CE_DEBUG, "Checking results against system dquots"); for (i = 0; i < qmtest_hashmask; i++) { - h1 = &qmtest_udqtab[i]; - for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) { + xfs_dqtest_t *d, *n; + xfs_dqhash_t *h; + + h = &qmtest_udqtab[i]; + list_for_each_entry_safe(d, n, &h->qh_list, q_hashlist) { xfs_dqtest_cmp(d); - e = (xfs_dqtest_t *) d->HL_NEXT; kmem_free(d); - d = e; } - h1 = &qmtest_gdqtab[i]; - for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) { + h = &qmtest_gdqtab[i]; + list_for_each_entry_safe(d, n, &h->qh_list, q_hashlist) { xfs_dqtest_cmp(d); - e = (xfs_dqtest_t *) d->HL_NEXT; kmem_free(d); - d = e; } } diff --git a/fs/xfs/quota/xfs_quota_priv.h b/fs/xfs/quota/xfs_quota_priv.h index 8286b2842b6b..94a3d927d716 100644 --- a/fs/xfs/quota/xfs_quota_priv.h +++ b/fs/xfs/quota/xfs_quota_priv.h @@ -24,43 +24,6 @@ */ #define XFS_DQITER_MAP_SIZE 10 -/* Number of dquots that fit in to a dquot block */ -#define XFS_QM_DQPERBLK(mp) ((mp)->m_quotainfo->qi_dqperchunk) - -#define XFS_DQ_IS_ADDEDTO_TRX(t, d) ((d)->q_transp == (t)) - -#define XFS_QI_MPLRECLAIMS(mp) ((mp)->m_quotainfo->qi_dqreclaims) -#define XFS_QI_UQIP(mp) ((mp)->m_quotainfo->qi_uquotaip) -#define XFS_QI_GQIP(mp) ((mp)->m_quotainfo->qi_gquotaip) -#define XFS_QI_DQCHUNKLEN(mp) ((mp)->m_quotainfo->qi_dqchunklen) -#define XFS_QI_BTIMELIMIT(mp) ((mp)->m_quotainfo->qi_btimelimit) -#define XFS_QI_RTBTIMELIMIT(mp) ((mp)->m_quotainfo->qi_rtbtimelimit) -#define XFS_QI_ITIMELIMIT(mp) ((mp)->m_quotainfo->qi_itimelimit) -#define XFS_QI_BWARNLIMIT(mp) ((mp)->m_quotainfo->qi_bwarnlimit) -#define XFS_QI_RTBWARNLIMIT(mp) ((mp)->m_quotainfo->qi_rtbwarnlimit) -#define XFS_QI_IWARNLIMIT(mp) ((mp)->m_quotainfo->qi_iwarnlimit) -#define XFS_QI_QOFFLOCK(mp) ((mp)->m_quotainfo->qi_quotaofflock) - -#define XFS_QI_MPL_LIST(mp) ((mp)->m_quotainfo->qi_dqlist) -#define XFS_QI_MPLNEXT(mp) ((mp)->m_quotainfo->qi_dqlist.qh_next) -#define XFS_QI_MPLNDQUOTS(mp) ((mp)->m_quotainfo->qi_dqlist.qh_nelems) - -#define xfs_qm_mplist_lock(mp) \ - mutex_lock(&(XFS_QI_MPL_LIST(mp).qh_lock)) -#define xfs_qm_mplist_nowait(mp) \ - mutex_trylock(&(XFS_QI_MPL_LIST(mp).qh_lock)) -#define xfs_qm_mplist_unlock(mp) \ - mutex_unlock(&(XFS_QI_MPL_LIST(mp).qh_lock)) -#define XFS_QM_IS_MPLIST_LOCKED(mp) \ - mutex_is_locked(&(XFS_QI_MPL_LIST(mp).qh_lock)) - -#define xfs_qm_freelist_lock(qm) \ - mutex_lock(&((qm)->qm_dqfreelist.qh_lock)) -#define xfs_qm_freelist_lock_nowait(qm) \ - mutex_trylock(&((qm)->qm_dqfreelist.qh_lock)) -#define xfs_qm_freelist_unlock(qm) \ - mutex_unlock(&((qm)->qm_dqfreelist.qh_lock)) - /* * Hash into a bucket in the dquot hash table, based on <mp, id>. */ @@ -72,9 +35,6 @@ XFS_DQ_HASHVAL(mp, id)) : \ (xfs_Gqm->qm_grp_dqhtable + \ XFS_DQ_HASHVAL(mp, id))) -#define XFS_IS_DQTYPE_ON(mp, type) (type == XFS_DQ_USER ? \ - XFS_IS_UQUOTA_ON(mp) : \ - XFS_IS_OQUOTA_ON(mp)) #define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \ !dqp->q_core.d_blk_hardlimit && \ !dqp->q_core.d_blk_softlimit && \ @@ -86,68 +46,6 @@ !dqp->q_core.d_rtbcount && \ !dqp->q_core.d_icount) -#define HL_PREVP dq_hashlist.ql_prevp -#define HL_NEXT dq_hashlist.ql_next -#define MPL_PREVP dq_mplist.ql_prevp -#define MPL_NEXT dq_mplist.ql_next - - -#define _LIST_REMOVE(h, dqp, PVP, NXT) \ - { \ - xfs_dquot_t *d; \ - if (((d) = (dqp)->NXT)) \ - (d)->PVP = (dqp)->PVP; \ - *((dqp)->PVP) = d; \ - (dqp)->NXT = NULL; \ - (dqp)->PVP = NULL; \ - (h)->qh_version++; \ - (h)->qh_nelems--; \ - } - -#define _LIST_INSERT(h, dqp, PVP, NXT) \ - { \ - xfs_dquot_t *d; \ - if (((d) = (h)->qh_next)) \ - (d)->PVP = &((dqp)->NXT); \ - (dqp)->NXT = d; \ - (dqp)->PVP = &((h)->qh_next); \ - (h)->qh_next = dqp; \ - (h)->qh_version++; \ - (h)->qh_nelems++; \ - } - -#define FOREACH_DQUOT_IN_MP(dqp, mp) \ - for ((dqp) = XFS_QI_MPLNEXT(mp); (dqp) != NULL; (dqp) = (dqp)->MPL_NEXT) - -#define FOREACH_DQUOT_IN_FREELIST(dqp, qlist) \ -for ((dqp) = (qlist)->qh_next; (dqp) != (xfs_dquot_t *)(qlist); \ - (dqp) = (dqp)->dq_flnext) - -#define XQM_HASHLIST_INSERT(h, dqp) \ - _LIST_INSERT(h, dqp, HL_PREVP, HL_NEXT) - -#define XQM_FREELIST_INSERT(h, dqp) \ - xfs_qm_freelist_append(h, dqp) - -#define XQM_MPLIST_INSERT(h, dqp) \ - _LIST_INSERT(h, dqp, MPL_PREVP, MPL_NEXT) - -#define XQM_HASHLIST_REMOVE(h, dqp) \ - _LIST_REMOVE(h, dqp, HL_PREVP, HL_NEXT) -#define XQM_FREELIST_REMOVE(dqp) \ - xfs_qm_freelist_unlink(dqp) -#define XQM_MPLIST_REMOVE(h, dqp) \ - { _LIST_REMOVE(h, dqp, MPL_PREVP, MPL_NEXT); \ - XFS_QI_MPLRECLAIMS((dqp)->q_mount)++; } - -#define XFS_DQ_IS_LOGITEM_INITD(dqp) ((dqp)->q_logitem.qli_dquot == (dqp)) - -#define XFS_QM_DQP_TO_DQACCT(tp, dqp) (XFS_QM_ISUDQ(dqp) ? \ - (tp)->t_dqinfo->dqa_usrdquots : \ - (tp)->t_dqinfo->dqa_grpdquots) -#define XFS_IS_SUSER_DQUOT(dqp) \ - (!((dqp)->q_core.d_id)) - #define DQFLAGTO_TYPESTR(d) (((d)->dq_flags & XFS_DQ_USER) ? "USR" : \ (((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : \ (((d)->dq_flags & XFS_DQ_PROJ) ? "PRJ":"???"))) diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c index c3ab75cb1d9a..061d827da33c 100644 --- a/fs/xfs/quota/xfs_trans_dquot.c +++ b/fs/xfs/quota/xfs_trans_dquot.c @@ -59,12 +59,11 @@ xfs_trans_dqjoin( xfs_trans_t *tp, xfs_dquot_t *dqp) { - xfs_dq_logitem_t *lp; + xfs_dq_logitem_t *lp = &dqp->q_logitem; - ASSERT(! XFS_DQ_IS_ADDEDTO_TRX(tp, dqp)); + ASSERT(dqp->q_transp != tp); ASSERT(XFS_DQ_IS_LOCKED(dqp)); - ASSERT(XFS_DQ_IS_LOGITEM_INITD(dqp)); - lp = &dqp->q_logitem; + ASSERT(lp->qli_dquot == dqp); /* * Get a log_item_desc to point at the new item. @@ -96,7 +95,7 @@ xfs_trans_log_dquot( { xfs_log_item_desc_t *lidp; - ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp, dqp)); + ASSERT(dqp->q_transp == tp); ASSERT(XFS_DQ_IS_LOCKED(dqp)); lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(&dqp->q_logitem)); @@ -198,16 +197,16 @@ xfs_trans_get_dqtrx( int i; xfs_dqtrx_t *qa; - for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { - qa = XFS_QM_DQP_TO_DQACCT(tp, dqp); + qa = XFS_QM_ISUDQ(dqp) ? + tp->t_dqinfo->dqa_usrdquots : tp->t_dqinfo->dqa_grpdquots; + for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { if (qa[i].qt_dquot == NULL || - qa[i].qt_dquot == dqp) { - return (&qa[i]); - } + qa[i].qt_dquot == dqp) + return &qa[i]; } - return (NULL); + return NULL; } /* @@ -381,7 +380,7 @@ xfs_trans_apply_dquot_deltas( break; ASSERT(XFS_DQ_IS_LOCKED(dqp)); - ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp, dqp)); + ASSERT(dqp->q_transp == tp); /* * adjust the actual number of blocks used @@ -639,7 +638,7 @@ xfs_trans_dqresv( softlimit = q->qi_bsoftlimit; timer = be32_to_cpu(dqp->q_core.d_btimer); warns = be16_to_cpu(dqp->q_core.d_bwarns); - warnlimit = XFS_QI_BWARNLIMIT(dqp->q_mount); + warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit; resbcountp = &dqp->q_res_bcount; } else { ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS); @@ -651,7 +650,7 @@ xfs_trans_dqresv( softlimit = q->qi_rtbsoftlimit; timer = be32_to_cpu(dqp->q_core.d_rtbtimer); warns = be16_to_cpu(dqp->q_core.d_rtbwarns); - warnlimit = XFS_QI_RTBWARNLIMIT(dqp->q_mount); + warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit; resbcountp = &dqp->q_res_rtbcount; } @@ -691,7 +690,7 @@ xfs_trans_dqresv( count = be64_to_cpu(dqp->q_core.d_icount); timer = be32_to_cpu(dqp->q_core.d_itimer); warns = be16_to_cpu(dqp->q_core.d_iwarns); - warnlimit = XFS_QI_IWARNLIMIT(dqp->q_mount); + warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit; hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); if (!hardlimit) hardlimit = q->qi_ihardlimit; |