summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c28
1 files changed, 11 insertions, 17 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 6105f475fa86..1e77da6d82c1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1295,7 +1295,7 @@ static void unmap_page_range(struct mmu_gather *tlb,
static void unmap_single_vma(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start_addr,
- unsigned long end_addr, unsigned long *nr_accounted,
+ unsigned long end_addr,
struct zap_details *details)
{
unsigned long start = max(vma->vm_start, start_addr);
@@ -1307,9 +1307,6 @@ static void unmap_single_vma(struct mmu_gather *tlb,
if (end <= vma->vm_start)
return;
- if (vma->vm_flags & VM_ACCOUNT)
- *nr_accounted += (end - start) >> PAGE_SHIFT;
-
if (unlikely(is_pfn_mapping(vma)))
untrack_pfn_vma(vma, 0, 0);
@@ -1339,8 +1336,6 @@ static void unmap_single_vma(struct mmu_gather *tlb,
* @vma: the starting vma
* @start_addr: virtual address at which to start unmapping
* @end_addr: virtual address at which to end unmapping
- * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
- * @details: details of nonlinear truncation or shared cache invalidation
*
* Unmap all pages in the vma list.
*
@@ -1355,15 +1350,13 @@ static void unmap_single_vma(struct mmu_gather *tlb,
*/
void unmap_vmas(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start_addr,
- unsigned long end_addr, unsigned long *nr_accounted,
- struct zap_details *details)
+ unsigned long end_addr)
{
struct mm_struct *mm = vma->vm_mm;
mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
- unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted,
- details);
+ unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
}
@@ -1376,19 +1369,21 @@ void unmap_vmas(struct mmu_gather *tlb,
*
* Caller must protect the VMA list
*/
-void zap_page_range(struct vm_area_struct *vma, unsigned long address,
+void zap_page_range(struct vm_area_struct *vma, unsigned long start,
unsigned long size, struct zap_details *details)
{
struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb;
- unsigned long end = address + size;
- unsigned long nr_accounted = 0;
+ unsigned long end = start + size;
lru_add_drain();
tlb_gather_mmu(&tlb, mm, 0);
update_hiwater_rss(mm);
- unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
- tlb_finish_mmu(&tlb, address, end);
+ mmu_notifier_invalidate_range_start(mm, start, end);
+ for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
+ unmap_single_vma(&tlb, vma, start, end, details);
+ mmu_notifier_invalidate_range_end(mm, start, end);
+ tlb_finish_mmu(&tlb, start, end);
}
/**
@@ -1406,13 +1401,12 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb;
unsigned long end = address + size;
- unsigned long nr_accounted = 0;
lru_add_drain();
tlb_gather_mmu(&tlb, mm, 0);
update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, address, end);
- unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details);
+ unmap_single_vma(&tlb, vma, address, end, details);
mmu_notifier_invalidate_range_end(mm, address, end);
tlb_finish_mmu(&tlb, address, end);
}