diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-27 04:55:54 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-27 04:55:54 +0200 |
commit | 0e06f5c0deeef0332a5da2ecb8f1fcf3e024d958 (patch) | |
tree | e0f0af4aadf10c713c5cf1b65356844b3c9b3215 /include/asm-generic | |
parent | Merge tag 'for-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/sre/lin... (diff) | |
parent | thp: fix comments of __pmd_trans_huge_lock() (diff) | |
download | linux-0e06f5c0deeef0332a5da2ecb8f1fcf3e024d958.tar.xz linux-0e06f5c0deeef0332a5da2ecb8f1fcf3e024d958.zip |
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton:
- a few misc bits
- ocfs2
- most(?) of MM
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (125 commits)
thp: fix comments of __pmd_trans_huge_lock()
cgroup: remove unnecessary 0 check from css_from_id()
cgroup: fix idr leak for the first cgroup root
mm: memcontrol: fix documentation for compound parameter
mm: memcontrol: remove BUG_ON in uncharge_list
mm: fix build warnings in <linux/compaction.h>
mm, thp: convert from optimistic swapin collapsing to conservative
mm, thp: fix comment inconsistency for swapin readahead functions
thp: update Documentation/{vm/transhuge,filesystems/proc}.txt
shmem: split huge pages beyond i_size under memory pressure
thp: introduce CONFIG_TRANSPARENT_HUGE_PAGECACHE
khugepaged: add support of collapse for tmpfs/shmem pages
shmem: make shmem_inode_info::lock irq-safe
khugepaged: move up_read(mmap_sem) out of khugepaged_alloc_page()
thp: extract khugepaged from mm/huge_memory.c
shmem, thp: respect MADV_{NO,}HUGEPAGE for file mappings
shmem: add huge pages support
shmem: get_unmapped_area align huge page
shmem: prepare huge= mount option and sysfs knob
mm, rmap: account shmem thp pages
...
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/tlb.h | 59 |
1 files changed, 48 insertions, 11 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 9dbb739cafa0..c6d667187608 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -107,6 +107,12 @@ struct mmu_gather { struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; unsigned int batch_count; + /* + * __tlb_adjust_range will track the new addr here, + * that that we can adjust the range after the flush + */ + unsigned long addr; + int page_size; }; #define HAVE_GENERIC_MMU_GATHER @@ -115,23 +121,20 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long void tlb_flush_mmu(struct mmu_gather *tlb); void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end); -int __tlb_remove_page(struct mmu_gather *tlb, struct page *page); - -/* tlb_remove_page - * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when - * required. - */ -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) -{ - if (!__tlb_remove_page(tlb, page)) - tlb_flush_mmu(tlb); -} +extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, + int page_size); static inline void __tlb_adjust_range(struct mmu_gather *tlb, unsigned long address) { tlb->start = min(tlb->start, address); tlb->end = max(tlb->end, address + PAGE_SIZE); + /* + * Track the last address with which we adjusted the range. This + * will be used later to adjust again after a mmu_flush due to + * failed __tlb_remove_page + */ + tlb->addr = address; } static inline void __tlb_reset_range(struct mmu_gather *tlb) @@ -144,6 +147,40 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) } } +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + if (__tlb_remove_page_size(tlb, page, page_size)) { + tlb_flush_mmu(tlb); + tlb->page_size = page_size; + __tlb_adjust_range(tlb, tlb->addr); + __tlb_remove_page_size(tlb, page, page_size); + } +} + +static bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + return __tlb_remove_page_size(tlb, page, PAGE_SIZE); +} + +/* tlb_remove_page + * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when + * required. + */ +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + return tlb_remove_page_size(tlb, page, PAGE_SIZE); +} + +static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) +{ + /* active->nr should be zero when we call this */ + VM_BUG_ON_PAGE(tlb->active->nr, page); + tlb->page_size = PAGE_SIZE; + __tlb_adjust_range(tlb, tlb->addr); + return __tlb_remove_page(tlb, page); +} + /* * In the case of tlb vma handling, we can optimise these away in the * case where we're doing a full MM flush. When we're doing a munmap, |