diff options
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 33 |
1 files changed, 22 insertions, 11 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 6cbfceabd95d..5287be18633b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -96,7 +96,7 @@ static void __clear_page_buffers(struct page *page) { ClearPagePrivate(page); - page->private = 0; + set_page_private(page, 0); page_cache_release(page); } @@ -396,7 +396,7 @@ asmlinkage long sys_fdatasync(unsigned int fd) * private_lock is contended then so is mapping->tree_lock). */ static struct buffer_head * -__find_get_block_slow(struct block_device *bdev, sector_t block, int unused) +__find_get_block_slow(struct block_device *bdev, sector_t block) { struct inode *bd_inode = bdev->bd_inode; struct address_space *bd_mapping = bd_inode->i_mapping; @@ -502,7 +502,7 @@ static void free_more_memory(void) yield(); for_each_pgdat(pgdat) { - zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones; + zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones; if (*zones) try_to_free_pages(zones, GFP_NOFS); } @@ -1438,7 +1438,7 @@ __find_get_block(struct block_device *bdev, sector_t block, int size) struct buffer_head *bh = lookup_bh_lru(bdev, block, size); if (bh == NULL) { - bh = __find_get_block_slow(bdev, block, size); + bh = __find_get_block_slow(bdev, block); if (bh) bh_lru_install(bh); } @@ -1478,8 +1478,10 @@ EXPORT_SYMBOL(__getblk); void __breadahead(struct block_device *bdev, sector_t block, int size) { struct buffer_head *bh = __getblk(bdev, block, size); - ll_rw_block(READA, 1, &bh); - brelse(bh); + if (likely(bh)) { + ll_rw_block(READA, 1, &bh); + brelse(bh); + } } EXPORT_SYMBOL(__breadahead); @@ -1497,7 +1499,7 @@ __bread(struct block_device *bdev, sector_t block, int size) { struct buffer_head *bh = __getblk(bdev, block, size); - if (!buffer_uptodate(bh)) + if (likely(bh) && !buffer_uptodate(bh)) bh = __bread_slow(bh); return bh; } @@ -1571,7 +1573,7 @@ static inline void discard_buffer(struct buffer_head * bh) * * NOTE: @gfp_mask may go away, and this function may become non-blocking. */ -int try_to_release_page(struct page *page, int gfp_mask) +int try_to_release_page(struct page *page, gfp_t gfp_mask) { struct address_space * const mapping = page->mapping; @@ -1637,6 +1639,15 @@ out: } EXPORT_SYMBOL(block_invalidatepage); +int do_invalidatepage(struct page *page, unsigned long offset) +{ + int (*invalidatepage)(struct page *, unsigned long); + invalidatepage = page->mapping->a_ops->invalidatepage; + if (invalidatepage == NULL) + invalidatepage = block_invalidatepage; + return (*invalidatepage)(page, offset); +} + /* * We attach and possibly dirty the buffers atomically wrt * __set_page_dirty_buffers() via private_lock. try_to_free_buffers @@ -1694,7 +1705,7 @@ void unmap_underlying_metadata(struct block_device *bdev, sector_t block) might_sleep(); - old_bh = __find_get_block_slow(bdev, block, 0); + old_bh = __find_get_block_slow(bdev, block); if (old_bh) { clear_buffer_dirty(old_bh); wait_on_buffer(old_bh); @@ -2696,7 +2707,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block, * they may have been added in ext3_writepage(). Make them * freeable here, so the page does not leak. */ - block_invalidatepage(page, 0); + do_invalidatepage(page, 0); unlock_page(page); return 0; /* don't care */ } @@ -3045,7 +3056,7 @@ static void recalc_bh_state(void) buffer_heads_over_limit = (tot > max_buffer_heads); } -struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags) +struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) { struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); if (ret) { |