diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2007-10-19 08:39:14 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-19 20:53:34 +0200 |
commit | 1c7037db50ebecf3d5cfbf7082daa5d97d900fef (patch) | |
tree | 1843c417160b79c3f79a54d546ddcf5ccdb1b44b | |
parent | kmap leak fix for x86_32 kdump (diff) | |
download | linux-1c7037db50ebecf3d5cfbf7082daa5d97d900fef.tar.xz linux-1c7037db50ebecf3d5cfbf7082daa5d97d900fef.zip |
remove unused flush_tlb_pgtables
Nobody uses flush_tlb_pgtables anymore, this patch removes all remaining
traces of it from all archs.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
26 files changed, 2 insertions, 195 deletions
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt index 552cabac0608..da42ab414c48 100644 --- a/Documentation/cachetlb.txt +++ b/Documentation/cachetlb.txt @@ -87,30 +87,7 @@ changes occur: This is used primarily during fault processing. -5) void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) - - The software page tables for address space 'mm' for virtual - addresses in the range 'start' to 'end-1' are being torn down. - - Some platforms cache the lowest level of the software page tables - in a linear virtually mapped array, to make TLB miss processing - more efficient. On such platforms, since the TLB is caching the - software page table structure, it needs to be flushed when parts - of the software page table tree are unlinked/freed. - - Sparc64 is one example of a platform which does this. - - Usually, when munmap()'ing an area of user virtual address - space, the kernel leaves the page table parts around and just - marks the individual pte's as invalid. However, if very large - portions of the address space are unmapped, the kernel frees up - those portions of the software page tables to prevent potential - excessive kernel memory usage caused by erratic mmap/mmunmap - sequences. It is at these times that flush_tlb_pgtables will - be invoked. - -6) void update_mmu_cache(struct vm_area_struct *vma, +5) void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) At the end of every page fault, this routine is invoked to @@ -123,7 +100,7 @@ changes occur: translations for software managed TLB configurations. The sparc64 port currently does this. -7) void tlb_migrate_finish(struct mm_struct *mm) +6) void tlb_migrate_finish(struct mm_struct *mm) This interface is called at the end of an explicit process migration. This interface provides a hook diff --git a/include/asm-alpha/tlbflush.h b/include/asm-alpha/tlbflush.h index 1ca3ed3bd6d3..eefab3fb51ae 100644 --- a/include/asm-alpha/tlbflush.h +++ b/include/asm-alpha/tlbflush.h @@ -92,17 +92,6 @@ flush_tlb_other(struct mm_struct *mm) if (*mmc) *mmc = 0; } -/* Flush a specified range of user mapping page tables from TLB. - Although Alpha uses VPTE caches, this can be a nop, as Alpha does - not have finegrained tlb flushing, so it will flush VPTE stuff - during next flush_tlb_range. */ - -static inline void -flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, - unsigned long end) -{ -} - #ifndef CONFIG_SMP /* Flush everything (kernel mapping may also have changed due to vmalloc/vfree). */ diff --git a/include/asm-arm/tlbflush.h b/include/asm-arm/tlbflush.h index 71be4fded7e2..8c6bc1bb9d1a 100644 --- a/include/asm-arm/tlbflush.h +++ b/include/asm-arm/tlbflush.h @@ -463,11 +463,6 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); */ extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); -/* - * ARM processors do not cache TLB tables in RAM. - */ -#define flush_tlb_pgtables(mm,start,end) do { } while (0) - #endif #endif /* CONFIG_MMU */ diff --git a/include/asm-avr32/tlbflush.h b/include/asm-avr32/tlbflush.h index 730e268f81f3..5bc7c88a5770 100644 --- a/include/asm-avr32/tlbflush.h +++ b/include/asm-avr32/tlbflush.h @@ -19,7 +19,6 @@ * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables */ extern void flush_tlb(void); extern void flush_tlb_all(void); @@ -29,12 +28,6 @@ extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void __flush_tlb_page(unsigned long asid, unsigned long page); -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - /* Nothing to do */ -} - extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); #endif /* __ASM_AVR32_TLBFLUSH_H */ diff --git a/include/asm-blackfin/tlbflush.h b/include/asm-blackfin/tlbflush.h index 10a07ba1e011..277b400924b8 100644 --- a/include/asm-blackfin/tlbflush.h +++ b/include/asm-blackfin/tlbflush.h @@ -53,10 +53,4 @@ static inline void flush_tlb_kernel_page(unsigned long addr) BUG(); } -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - BUG(); -} - #endif diff --git a/include/asm-cris/tlbflush.h b/include/asm-cris/tlbflush.h index 0569612477e3..20697e7ef4f2 100644 --- a/include/asm-cris/tlbflush.h +++ b/include/asm-cris/tlbflush.h @@ -38,13 +38,6 @@ static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long st flush_tlb_mm(vma->vm_mm); } -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - /* CRIS does not keep any page table caches in TLB */ -} - - static inline void flush_tlb(void) { flush_tlb_mm(current->mm); diff --git a/include/asm-frv/tlbflush.h b/include/asm-frv/tlbflush.h index 8370f97e41ee..7ac5eafc5d98 100644 --- a/include/asm-frv/tlbflush.h +++ b/include/asm-frv/tlbflush.h @@ -57,7 +57,6 @@ do { \ #define __flush_tlb_global() flush_tlb_all() #define flush_tlb() flush_tlb_all() #define flush_tlb_kernel_range(start, end) flush_tlb_all() -#define flush_tlb_pgtables(mm,start,end) do { } while(0) #else @@ -66,7 +65,6 @@ do { \ #define flush_tlb_mm(mm) BUG() #define flush_tlb_page(vma,addr) BUG() #define flush_tlb_range(mm,start,end) BUG() -#define flush_tlb_pgtables(mm,start,end) BUG() #define flush_tlb_kernel_range(start, end) BUG() #endif diff --git a/include/asm-h8300/tlbflush.h b/include/asm-h8300/tlbflush.h index 9a2c5c9fd700..41c148a9208e 100644 --- a/include/asm-h8300/tlbflush.h +++ b/include/asm-h8300/tlbflush.h @@ -52,10 +52,4 @@ static inline void flush_tlb_kernel_page(unsigned long addr) BUG(); } -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - BUG(); -} - #endif /* _H8300_TLBFLUSH_H */ diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h index e37f9fbf33af..80bcb0a38e8a 100644 --- a/include/asm-ia64/tlbflush.h +++ b/include/asm-ia64/tlbflush.h @@ -84,19 +84,6 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) } /* - * Flush the TLB entries mapping the virtually mapped linear page - * table corresponding to address range [START-END). - */ -static inline void -flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end) -{ - /* - * Deprecated. The virtual page table is now flushed via the normal gather/flush - * interface (see tlb.h). - */ -} - -/* * Flush the local TLB. Invoked from another cpu using an IPI. */ #ifdef CONFIG_SMP diff --git a/include/asm-m32r/tlbflush.h b/include/asm-m32r/tlbflush.h index 3d37ac002bcc..0ef95307784e 100644 --- a/include/asm-m32r/tlbflush.h +++ b/include/asm-m32r/tlbflush.h @@ -12,7 +12,6 @@ * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables */ extern void local_flush_tlb_all(void); @@ -93,8 +92,6 @@ static __inline__ void __flush_tlb_all(void) ); } -#define flush_tlb_pgtables(mm, start, end) do { } while (0) - extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); #endif /* _ASM_M32R_TLBFLUSH_H */ diff --git a/include/asm-m68k/tlbflush.h b/include/asm-m68k/tlbflush.h index 31678831ee47..17707ec315e2 100644 --- a/include/asm-m68k/tlbflush.h +++ b/include/asm-m68k/tlbflush.h @@ -92,11 +92,6 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end flush_tlb_all(); } -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ -} - #else @@ -219,11 +214,6 @@ static inline void flush_tlb_kernel_page (unsigned long addr) sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG); } -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ -} - #endif #endif /* _M68K_TLBFLUSH_H */ diff --git a/include/asm-m68knommu/tlbflush.h b/include/asm-m68knommu/tlbflush.h index de858db28b00..a470cfb803eb 100644 --- a/include/asm-m68knommu/tlbflush.h +++ b/include/asm-m68knommu/tlbflush.h @@ -52,10 +52,4 @@ static inline void flush_tlb_kernel_page(unsigned long addr) BUG(); } -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - BUG(); -} - #endif /* _M68KNOMMU_TLBFLUSH_H */ diff --git a/include/asm-mips/tlbflush.h b/include/asm-mips/tlbflush.h index 730e841fb08a..86b21de12e91 100644 --- a/include/asm-mips/tlbflush.h +++ b/include/asm-mips/tlbflush.h @@ -11,7 +11,6 @@ * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables */ extern void local_flush_tlb_all(void); extern void local_flush_tlb_mm(struct mm_struct *mm); @@ -45,10 +44,4 @@ extern void flush_tlb_one(unsigned long vaddr); #endif /* CONFIG_SMP */ -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - /* Nothing to do on MIPS. */ -} - #endif /* __ASM_TLBFLUSH_H */ diff --git a/include/asm-parisc/tlbflush.h b/include/asm-parisc/tlbflush.h index 270cf309772b..b72ec66db699 100644 --- a/include/asm-parisc/tlbflush.h +++ b/include/asm-parisc/tlbflush.h @@ -57,10 +57,6 @@ static inline void flush_tlb_mm(struct mm_struct *mm) #endif } -extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) -{ -} - static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h index a022f806bb21..b6b036ccee34 100644 --- a/include/asm-powerpc/tlbflush.h +++ b/include/asm-powerpc/tlbflush.h @@ -8,7 +8,6 @@ * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -174,15 +173,5 @@ extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, */ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); -/* - * This is called in munmap when we have freed up some page-table - * pages. We don't need to do anything here, there's nothing special - * about our page-table pages. -- paulus - */ -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ -} - #endif /*__KERNEL__ */ #endif /* _ASM_POWERPC_TLBFLUSH_H */ diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h index 66793f55c8b2..6de2632a3e4f 100644 --- a/include/asm-s390/tlbflush.h +++ b/include/asm-s390/tlbflush.h @@ -14,7 +14,6 @@ * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables */ /* @@ -152,10 +151,4 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, #endif -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - /* S/390 does not keep any page table caches in TLB */ -} - #endif /* _S390_TLBFLUSH_H */ diff --git a/include/asm-sh/tlbflush.h b/include/asm-sh/tlbflush.h index 455fb8da441e..e0ac97221ae6 100644 --- a/include/asm-sh/tlbflush.h +++ b/include/asm-sh/tlbflush.h @@ -9,7 +9,6 @@ * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables */ extern void local_flush_tlb_all(void); extern void local_flush_tlb_mm(struct mm_struct *mm); @@ -47,9 +46,4 @@ extern void flush_tlb_one(unsigned long asid, unsigned long page); #endif /* CONFIG_SMP */ -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - /* Nothing to do */ -} #endif /* __ASM_SH_TLBFLUSH_H */ diff --git a/include/asm-sh64/tlbflush.h b/include/asm-sh64/tlbflush.h index e45beadc29ee..16a164a23754 100644 --- a/include/asm-sh64/tlbflush.h +++ b/include/asm-sh64/tlbflush.h @@ -20,10 +20,6 @@ extern void flush_tlb_mm(struct mm_struct *mm); extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ -} extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); diff --git a/include/asm-sparc/tlbflush.h b/include/asm-sparc/tlbflush.h index a619da5cfaa9..b957e29d2ae1 100644 --- a/include/asm-sparc/tlbflush.h +++ b/include/asm-sparc/tlbflush.h @@ -13,7 +13,6 @@ * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables */ #ifdef CONFIG_SMP @@ -42,11 +41,6 @@ BTFIXUPDEF_CALL(void, flush_tlb_mm, struct mm_struct *) BTFIXUPDEF_CALL(void, flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long) BTFIXUPDEF_CALL(void, flush_tlb_page, struct vm_area_struct *, unsigned long) -// Thanks to Anton Blanchard, our pagetables became uncached in 2.4. Wee! -// extern void flush_tlb_pgtables(struct mm_struct *mm, -// unsigned long start, unsigned long end); -#define flush_tlb_pgtables(mm, start, end) do{ }while(0) - #define flush_tlb_all() BTFIXUP_CALL(flush_tlb_all)() #define flush_tlb_mm(mm) BTFIXUP_CALL(flush_tlb_mm)(mm) #define flush_tlb_range(vma,start,end) BTFIXUP_CALL(flush_tlb_range)(vma,start,end) diff --git a/include/asm-sparc64/tlbflush.h b/include/asm-sparc64/tlbflush.h index 3487328570ed..fbb675dbe0c9 100644 --- a/include/asm-sparc64/tlbflush.h +++ b/include/asm-sparc64/tlbflush.h @@ -41,11 +41,4 @@ do { flush_tsb_kernel_range(start,end); \ #endif /* ! CONFIG_SMP */ -static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) -{ - /* We don't use virtual page tables for TLB miss processing - * any more. Nowadays we use the TSB. - */ -} - #endif /* _SPARC64_TLBFLUSH_H */ diff --git a/include/asm-um/tlbflush.h b/include/asm-um/tlbflush.h index 9d647c55350b..614f2c091178 100644 --- a/include/asm-um/tlbflush.h +++ b/include/asm-um/tlbflush.h @@ -17,7 +17,6 @@ * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_kernel_vm() flushes the kernel vm area * - flush_tlb_range(vma, start, end) flushes a range of pages - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables */ extern void flush_tlb_all(void); @@ -29,9 +28,4 @@ extern void flush_tlb_kernel_vm(void); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void __flush_tlb_one(unsigned long addr); -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ -} - #endif diff --git a/include/asm-v850/tlbflush.h b/include/asm-v850/tlbflush.h index 5f2f85f636ea..c44aa64449c8 100644 --- a/include/asm-v850/tlbflush.h +++ b/include/asm-v850/tlbflush.h @@ -61,10 +61,4 @@ static inline void flush_tlb_kernel_page(unsigned long addr) BUG (); } -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - BUG (); -} - #endif /* __V850_TLBFLUSH_H__ */ diff --git a/include/asm-x86/tlbflush_32.h b/include/asm-x86/tlbflush_32.h index a50fa6741486..2bd5b95e2048 100644 --- a/include/asm-x86/tlbflush_32.h +++ b/include/asm-x86/tlbflush_32.h @@ -78,7 +78,6 @@ * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables * - flush_tlb_others(cpumask, mm, va) flushes a TLBs on other cpus * * ..but the i386 has somewhat limited tlb flushing capabilities, @@ -166,10 +165,4 @@ static inline void flush_tlb_kernel_range(unsigned long start, flush_tlb_all(); } -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - /* i386 does not keep any page table caches in TLB */ -} - #endif /* _I386_TLBFLUSH_H */ diff --git a/include/asm-x86/tlbflush_64.h b/include/asm-x86/tlbflush_64.h index 888eb4abdd07..7731fd23d572 100644 --- a/include/asm-x86/tlbflush_64.h +++ b/include/asm-x86/tlbflush_64.h @@ -31,7 +31,6 @@ static inline void __flush_tlb_all(void) * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables * * x86-64 can only flush individual pages or full VMs. For a range flush * we always do the full VM. Might be worth trying if for a small @@ -98,12 +97,4 @@ static inline void flush_tlb_kernel_range(unsigned long start, flush_tlb_all(); } -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - /* x86_64 does not keep any page table caches in a software TLB. - The CPUs do in their hardware TLBs, but they are handled - by the normal TLB flushing algorithms. */ -} - #endif /* _X8664_TLBFLUSH_H */ diff --git a/include/asm-xtensa/tlbflush.h b/include/asm-xtensa/tlbflush.h index 7c637b3c352c..46d240074f74 100644 --- a/include/asm-xtensa/tlbflush.h +++ b/include/asm-xtensa/tlbflush.h @@ -41,17 +41,6 @@ extern void flush_tlb_range(struct vm_area_struct*,unsigned long,unsigned long); #define flush_tlb_kernel_range(start,end) flush_tlb_all() - -/* This is calld in munmap when we have freed up some page-table pages. - * We don't need to do anything here, there's nothing special about our - * page-table pages. - */ - -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ -} - /* TLB operations. */ static inline unsigned long itlb_probe(unsigned long addr) diff --git a/mm/memory.c b/mm/memory.c index bd16dcaeefb8..142683df8755 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -259,9 +259,6 @@ void free_pgd_range(struct mmu_gather **tlb, continue; free_pud_range(*tlb, pgd, addr, next, floor, ceiling); } while (pgd++, addr = next, addr != end); - - if (!(*tlb)->fullmm) - flush_tlb_pgtables((*tlb)->mm, start, end); } void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, |