diff options
author | Christoph Hellwig <hch@infradead.org> | 2011-04-24 21:06:17 +0200 |
---|---|---|
committer | Alex Elder <aelder@sgi.com> | 2011-04-28 20:18:09 +0200 |
commit | 8a072a4d4c6a5b6ec32836c467d2996393c76c6f (patch) | |
tree | b21dad1310e4351854b9e2e24feb86beed20d1f7 /fs/xfs/xfs_alloc.c | |
parent | xfs: exact busy extent tracking (diff) | |
download | linux-8a072a4d4c6a5b6ec32836c467d2996393c76c6f.tar.xz linux-8a072a4d4c6a5b6ec32836c467d2996393c76c6f.zip |
xfs: reduce the number of pagb_lock roundtrips in xfs_alloc_clear_busy
Instead of finding the per-ag and then taking and releasing the pagb_lock
for every single busy extent completed sort the list of busy extents and
only switch betweens AGs where nessecary. This becomes especially important
with the online discard support which will hit this lock more often.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_alloc.c')
-rw-r--r-- | fs/xfs/xfs_alloc.c | 56 |
1 files changed, 46 insertions, 10 deletions
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 53157d4d5e8b..44a51a7b4c3a 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -2964,24 +2964,60 @@ fail: *rlen = 0; } -void -xfs_alloc_busy_clear( +static void +xfs_alloc_busy_clear_one( struct xfs_mount *mp, + struct xfs_perag *pag, struct xfs_busy_extent *busyp) { - struct xfs_perag *pag; - - list_del_init(&busyp->list); - - pag = xfs_perag_get(mp, busyp->agno); - spin_lock(&pag->pagb_lock); if (busyp->length) { trace_xfs_alloc_busy_clear(mp, busyp->agno, busyp->bno, busyp->length); rb_erase(&busyp->rb_node, &pag->pagb_tree); } - spin_unlock(&pag->pagb_lock); - xfs_perag_put(pag); + list_del_init(&busyp->list); kmem_free(busyp); } + +void +xfs_alloc_busy_clear( + struct xfs_mount *mp, + struct list_head *list) +{ + struct xfs_busy_extent *busyp, *n; + struct xfs_perag *pag = NULL; + xfs_agnumber_t agno = NULLAGNUMBER; + + list_for_each_entry_safe(busyp, n, list, list) { + if (busyp->agno != agno) { + if (pag) { + spin_unlock(&pag->pagb_lock); + xfs_perag_put(pag); + } + pag = xfs_perag_get(mp, busyp->agno); + spin_lock(&pag->pagb_lock); + agno = busyp->agno; + } + + xfs_alloc_busy_clear_one(mp, pag, busyp); + } + + if (pag) { + spin_unlock(&pag->pagb_lock); + xfs_perag_put(pag); + } +} + +/* + * Callback for list_sort to sort busy extents by the AG they reside in. + */ +int +xfs_busy_extent_ag_cmp( + void *priv, + struct list_head *a, + struct list_head *b) +{ + return container_of(a, struct xfs_busy_extent, list)->agno - + container_of(b, struct xfs_busy_extent, list)->agno; +} |