summaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-12 03:05:37 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-12 03:05:37 +0100
commit608ff1a210ab0e8b969399039bf8e18693605910 (patch)
treefaea7bb1764461c73d0953089bd5439d91733a03 /mm/rmap.c
parentMerge tag 'usb-3.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/greg... (diff)
parentmemory_hotplug: ensure every online node has NORMAL memory (diff)
downloadlinux-608ff1a210ab0e8b969399039bf8e18693605910.tar.xz
linux-608ff1a210ab0e8b969399039bf8e18693605910.zip
Merge branch 'akpm' (Andrew's patchbomb)
Merge misc updates from Andrew Morton: "About half of most of MM. Going very early this time due to uncertainty over the coreautounifiednumasched things. I'll send the other half of most of MM tomorrow. The rest of MM awaits a slab merge from Pekka." * emailed patches from Andrew Morton: (71 commits) memory_hotplug: ensure every online node has NORMAL memory memory_hotplug: handle empty zone when online_movable/online_kernel mm, memory-hotplug: dynamic configure movable memory and portion memory drivers/base/node.c: cleanup node_state_attr[] bootmem: fix wrong call parameter for free_bootmem() avr32, kconfig: remove HAVE_ARCH_BOOTMEM mm: cma: remove watermark hacks mm: cma: skip watermarks check for already isolated blocks in split_free_page() mm, oom: fix race when specifying a thread as the oom origin mm, oom: change type of oom_score_adj to short mm: cleanup register_node() mm, mempolicy: remove duplicate code mm/vmscan.c: try_to_freeze() returns boolean mm: introduce putback_movable_pages() virtio_balloon: introduce migration primitives to balloon pages mm: introduce compaction and migration for ballooned pages mm: introduce a common interface for balloon pages mobility mm: redefine address_space.assoc_mapping mm: adjust address_space_operations.migratepage() return code arch/sparc/kernel/sys_sparc_64.c: s/COLOUR/COLOR/ ...
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c56
1 files changed, 30 insertions, 26 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 2ee1ef0f317b..cf7e99a87c32 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -562,6 +562,27 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
return address;
}
+pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd = NULL;
+
+ pgd = pgd_offset(mm, address);
+ if (!pgd_present(*pgd))
+ goto out;
+
+ pud = pud_offset(pgd, address);
+ if (!pud_present(*pud))
+ goto out;
+
+ pmd = pmd_offset(pud, address);
+ if (!pmd_present(*pmd))
+ pmd = NULL;
+out:
+ return pmd;
+}
+
/*
* Check that @page is mapped at @address into @mm.
*
@@ -574,8 +595,6 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
unsigned long address, spinlock_t **ptlp, int sync)
{
- pgd_t *pgd;
- pud_t *pud;
pmd_t *pmd;
pte_t *pte;
spinlock_t *ptl;
@@ -586,17 +605,10 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
goto check;
}
- pgd = pgd_offset(mm, address);
- if (!pgd_present(*pgd))
- return NULL;
-
- pud = pud_offset(pgd, address);
- if (!pud_present(*pud))
+ pmd = mm_find_pmd(mm, address);
+ if (!pmd)
return NULL;
- pmd = pmd_offset(pud, address);
- if (!pmd_present(*pmd))
- return NULL;
if (pmd_trans_huge(*pmd))
return NULL;
@@ -1139,9 +1151,11 @@ void page_remove_rmap(struct page *page)
* containing the swap entry, but page not yet written to swap.
*
* And we can skip it on file pages, so long as the filesystem
- * participates in dirty tracking; but need to catch shm and tmpfs
- * and ramfs pages which have been modified since creation by read
- * fault.
+ * participates in dirty tracking (note that this is not only an
+ * optimization but also solves problems caused by dirty flag in
+ * storage key getting set by a write from inside kernel); but need to
+ * catch shm and tmpfs and ramfs pages which have been modified since
+ * creation by read fault.
*
* Note that mapping must be decided above, before decrementing
* mapcount (which luckily provides a barrier): once page is unmapped,
@@ -1345,8 +1359,6 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
struct vm_area_struct *vma, struct page *check_page)
{
struct mm_struct *mm = vma->vm_mm;
- pgd_t *pgd;
- pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pte_t pteval;
@@ -1366,16 +1378,8 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
if (end > vma->vm_end)
end = vma->vm_end;
- pgd = pgd_offset(mm, address);
- if (!pgd_present(*pgd))
- return ret;
-
- pud = pud_offset(pgd, address);
- if (!pud_present(*pud))
- return ret;
-
- pmd = pmd_offset(pud, address);
- if (!pmd_present(*pmd))
+ pmd = mm_find_pmd(mm, address);
+ if (!pmd)
return ret;
mmun_start = address;