From 7eaceaccab5f40bbfda044629a6298616aeaed50 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 10 Mar 2011 08:52:07 +0100 Subject: block: remove per-queue plugging Code has been converted over to the new explicit on-stack plugging, and delay users have been converted to use the new API for that. So lets kill off the old plugging along with aops->sync_page(). Signed-off-by: Jens Axboe --- mm/shmem.c | 1 - 1 file changed, 1 deletion(-) (limited to 'mm/shmem.c') diff --git a/mm/shmem.c b/mm/shmem.c index 5ee67c990602..24d23f5bedf1 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -224,7 +224,6 @@ static const struct vm_operations_struct shmem_vm_ops; static struct backing_dev_info shmem_backing_dev_info __read_mostly = { .ra_pages = 0, /* No readahead */ .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, - .unplug_io_fn = default_unplug_io_fn, }; static LIST_HEAD(shmem_swaplist); -- cgit v1.2.3 From 4c73b1bc6bb14aab7888ebe6bffe957cf7c07fa0 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 22 Mar 2011 16:32:40 -0700 Subject: mm: shmem: change remove_from_page_cache This patch series changes remove_from_page_cache()'s page ref counting rule. Page cache ref count is decreased in delete_from_page_cache(). So we don't need to decrease the page reference in callers. Signed-off-by: Minchan Kim Acked-by: Hugh Dickins Acked-by: Mel Gorman Reviewed-by: KAMEZAWA Hiroyuki Reviewed-by: Johannes Weiner Reviewed-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm/shmem.c') diff --git a/mm/shmem.c b/mm/shmem.c index 048a95a5244d..88593586bdb7 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1081,7 +1081,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) shmem_recalc_inode(inode); if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { - remove_from_page_cache(page); + delete_from_page_cache(page); shmem_swp_set(info, entry, swap.val); shmem_swp_unmap(entry); if (list_empty(&info->swaplist)) @@ -1091,7 +1091,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) spin_unlock(&info->lock); swap_shmem_alloc(swap); BUG_ON(page_mapped(page)); - page_cache_release(page); /* pagecache ref */ swap_writepage(page, wbc); if (inode) { mutex_lock(&shmem_swaplist_mutex); -- cgit v1.2.3 From bee4c36a5cf5c9f63ce1d7372aa62045fbd16d47 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 22 Mar 2011 16:33:43 -0700 Subject: shmem: let shared anonymous be nonlinear again Up to 2.6.22, you could use remap_file_pages(2) on a tmpfs file or a shared mapping of /dev/zero or a shared anonymous mapping. In 2.6.23 we disabled it by default, but set VM_CAN_NONLINEAR to enable it on safe mappings. We made sure to set it in shmem_mmap() for tmpfs files, but missed it in shmem_zero_setup() for the others. Fix that at last. Reported-by: Kenny Simpson Signed-off-by: Hugh Dickins Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm/shmem.c') diff --git a/mm/shmem.c b/mm/shmem.c index 88593586bdb7..91ce9a1024d7 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2793,5 +2793,6 @@ int shmem_zero_setup(struct vm_area_struct *vma) fput(vma->vm_file); vma->vm_file = file; vma->vm_ops = &shmem_vm_ops; + vma->vm_flags |= VM_CAN_NONLINEAR; return 0; } -- cgit v1.2.3 From fc5da22ae35d4720be59af8787a8a6d5e4da9517 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 14 Apr 2011 15:22:07 -0700 Subject: tmpfs: fix off-by-one in max_blocks checks If you fill up a tmpfs, df was showing tmpfs 460800 - - - /tmp because of an off-by-one in the max_blocks checks. Fix it so df shows tmpfs 460800 460800 0 100% /tmp Signed-off-by: Hugh Dickins Cc: Tim Chen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'mm/shmem.c') diff --git a/mm/shmem.c b/mm/shmem.c index 58da7c150ba6..8fa27e4e582a 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -421,7 +421,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long * a waste to allocate index if we cannot allocate data. */ if (sbinfo->max_blocks) { - if (percpu_counter_compare(&sbinfo->used_blocks, (sbinfo->max_blocks - 1)) > 0) + if (percpu_counter_compare(&sbinfo->used_blocks, + sbinfo->max_blocks - 1) >= 0) return ERR_PTR(-ENOSPC); percpu_counter_inc(&sbinfo->used_blocks); spin_lock(&inode->i_lock); @@ -1397,7 +1398,8 @@ repeat: shmem_swp_unmap(entry); sbinfo = SHMEM_SB(inode->i_sb); if (sbinfo->max_blocks) { - if ((percpu_counter_compare(&sbinfo->used_blocks, sbinfo->max_blocks) > 0) || + if (percpu_counter_compare(&sbinfo->used_blocks, + sbinfo->max_blocks) >= 0 || shmem_acct_block(info->flags)) { spin_unlock(&info->lock); error = -ENOSPC; -- cgit v1.2.3