summaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2009-01-06 04:43:09 +0100
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2009-01-09 07:09:47 +0100
commit0087167c9d5b1273e7e6bbe39a9ab13bdb9a39bb (patch)
treed91aa240f9e83b23b77c44d82fe976168ad882e5 /fs/xfs
parent[XFS] remove old vmap cache (diff)
downloadlinux-0087167c9d5b1273e7e6bbe39a9ab13bdb9a39bb.tar.xz
linux-0087167c9d5b1273e7e6bbe39a9ab13bdb9a39bb.zip
[XFS] use scalable vmap API
Implement XFS's large buffer support with the new vmap APIs. See the vmap rewrite (db64fe02) for some numbers. The biggest improvement that comes from using the new APIs is avoiding the global KVA allocation lock on every call. Signed-off-by: Nick Piggin <npiggin@suse.de> Reviewed-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 0b2177a9fbdc..d71dc44e21ed 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -264,7 +264,7 @@ xfs_buf_free(
uint i;
if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
- vunmap(bp->b_addr - bp->b_offset);
+ vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count);
for (i = 0; i < bp->b_page_count; i++) {
struct page *page = bp->b_pages[i];
@@ -386,8 +386,8 @@ _xfs_buf_map_pages(
bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
bp->b_flags |= XBF_MAPPED;
} else if (flags & XBF_MAPPED) {
- bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
- VM_MAP, PAGE_KERNEL);
+ bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
+ -1, PAGE_KERNEL);
if (unlikely(bp->b_addr == NULL))
return -ENOMEM;
bp->b_addr += bp->b_offset;