diff options
author | Dave Airlie <airlied@redhat.com> | 2011-05-16 02:45:40 +0200 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2011-05-16 02:45:40 +0200 |
commit | 69f7876b2ab61e8114675d6092ad0b482e233612 (patch) | |
tree | a55aefd08d6c5f617d277a99e11b5a707e162585 /mm | |
parent | drm/mxm: fix Kconfig options for when to build MXM WMI driver. (diff) | |
parent | drm/i915: split PCH clock gating init (diff) | |
download | linux-69f7876b2ab61e8114675d6092ad0b482e233612.tar.xz linux-69f7876b2ab61e8114675d6092ad0b482e233612.zip |
Merge remote branch 'keithp/drm-intel-next' of /ssd/git/drm-next into drm-core-next
* 'keithp/drm-intel-next' of /ssd/git/drm-next: (301 commits)
drm/i915: split PCH clock gating init
drm/i915: add Ivybridge clock gating init function
drm/i915: Update the location of the ringbuffers' HWS_PGA registers for IVB.
drm/i915: Add support for fence registers on Ivybridge.
drm/i915: Use existing function instead of open-coding fence reg clear.
drm/i915: split clock gating init into per-chipset functions
drm/i915: set IBX pch type explicitly
drm/i915: add Ivy Bridge PCI IDs and driver feature structs
drm/i915: add PantherPoint PCH ID
agp/intel: add Ivy Bridge support
drm/i915: ring support for Ivy Bridge
drm/i915: page flip support for Ivy Bridge
drm/i915: interrupt & vblank support for Ivy Bridge
drm/i915: treat Ivy Bridge watermarks like Sandy Bridge
drm/i915: manual FDI training for Ivy Bridge
drm/i915: add swizzle/tiling support for Ivy Bridge
drm/i915: Ivy Bridge has split display and pipe control
drm/i915: add IS_IVYBRIDGE macro for checks
drm/i915: add IS_GEN7 macro to cover Ivy Bridge and later
drm/i915: split enable/disable vblank code into chipset specific functions
...
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 43 | ||||
-rw-r--r-- | mm/memory.c | 2 | ||||
-rw-r--r-- | mm/oom_kill.c | 9 |
3 files changed, 31 insertions, 23 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 470dcda10add..83326ad66d9b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1408,6 +1408,9 @@ out: return ret; } +#define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \ + VM_HUGETLB|VM_SHARED|VM_MAYSHARE) + int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) { @@ -1416,11 +1419,7 @@ int hugepage_madvise(struct vm_area_struct *vma, /* * Be somewhat over-protective like KSM for now! */ - if (*vm_flags & (VM_HUGEPAGE | - VM_SHARED | VM_MAYSHARE | - VM_PFNMAP | VM_IO | VM_DONTEXPAND | - VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | - VM_MIXEDMAP | VM_SAO)) + if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) return -EINVAL; *vm_flags &= ~VM_NOHUGEPAGE; *vm_flags |= VM_HUGEPAGE; @@ -1436,11 +1435,7 @@ int hugepage_madvise(struct vm_area_struct *vma, /* * Be somewhat over-protective like KSM for now! */ - if (*vm_flags & (VM_NOHUGEPAGE | - VM_SHARED | VM_MAYSHARE | - VM_PFNMAP | VM_IO | VM_DONTEXPAND | - VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | - VM_MIXEDMAP | VM_SAO)) + if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) return -EINVAL; *vm_flags &= ~VM_HUGEPAGE; *vm_flags |= VM_NOHUGEPAGE; @@ -1574,10 +1569,14 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma) * page fault if needed. */ return 0; - if (vma->vm_file || vma->vm_ops) + if (vma->vm_ops) /* khugepaged not yet working on file or special mappings */ return 0; - VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); + /* + * If is_pfn_mapping() is true is_learn_pfn_mapping() must be + * true too, verify it here. + */ + VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (hstart < hend) @@ -1828,12 +1827,15 @@ static void collapse_huge_page(struct mm_struct *mm, (vma->vm_flags & VM_NOHUGEPAGE)) goto out; - /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ - if (!vma->anon_vma || vma->vm_ops || vma->vm_file) + if (!vma->anon_vma || vma->vm_ops) goto out; if (is_vma_temporary_stack(vma)) goto out; - VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); + /* + * If is_pfn_mapping() is true is_learn_pfn_mapping() must be + * true too, verify it here. + */ + VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) @@ -2066,13 +2068,16 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, progress++; continue; } - /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ - if (!vma->anon_vma || vma->vm_ops || vma->vm_file) + if (!vma->anon_vma || vma->vm_ops) goto skip; if (is_vma_temporary_stack(vma)) goto skip; - - VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); + /* + * If is_pfn_mapping() is true is_learn_pfn_mapping() + * must be true too, verify it here. + */ + VM_BUG_ON(is_linear_pfn_mapping(vma) || + vma->vm_flags & VM_NO_THP); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; diff --git a/mm/memory.c b/mm/memory.c index ce22a250926f..607098d47e74 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3396,7 +3396,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, * run pte_offset_map on the pmd, if an huge pmd could * materialize from under us from a different thread. */ - if (unlikely(__pte_alloc(mm, vma, pmd, address))) + if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address)) return VM_FAULT_OOM; /* if an huge pmd materialized from under us just retry later */ if (unlikely(pmd_trans_huge(*pmd))) diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 83fb72c108b7..f52e85c80e8d 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -172,10 +172,13 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, /* * The baseline for the badness score is the proportion of RAM that each - * task's rss and swap space use. + * task's rss, pagetable and swap space use. */ - points = (get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS)) * 1000 / - totalpages; + points = get_mm_rss(p->mm) + p->mm->nr_ptes; + points += get_mm_counter(p->mm, MM_SWAPENTS); + + points *= 1000; + points /= totalpages; task_unlock(p); /* |