diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/parport.h | 4 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 6 | ||||
-rw-r--r-- | include/asm-generic/tlb.h | 9 |
3 files changed, 13 insertions, 6 deletions
diff --git a/include/asm-generic/parport.h b/include/asm-generic/parport.h index 40528cb977e8..2c9f9d4336ca 100644 --- a/include/asm-generic/parport.h +++ b/include/asm-generic/parport.h @@ -10,8 +10,8 @@ * to devices on the PCI bus. */ -static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma); -static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma) +static int parport_pc_find_isa_ports(int autoirq, int autodma); +static int parport_pc_find_nonpci_ports(int autoirq, int autodma) { #ifdef CONFIG_ISA return parport_pc_find_isa_ports(autoirq, autodma); diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 701beab27aab..5cf680a98f9b 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -461,10 +461,8 @@ static inline int is_zero_pfn(unsigned long pfn) return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); } -static inline unsigned long my_zero_pfn(unsigned long addr) -{ - return page_to_pfn(ZERO_PAGE(addr)); -} +#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) + #else static inline int is_zero_pfn(unsigned long pfn) { diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index ed6642ad03e0..25f01d0bc149 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -78,6 +78,14 @@ struct mmu_gather_batch { #define MAX_GATHER_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) +/* + * Limit the maximum number of mmu_gather batches to reduce a risk of soft + * lockups for non-preemptible kernels on huge machines when a lot of memory + * is zapped during unmapping. + * 10K pages freed at once should be safe even without a preemption point. + */ +#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) + /* struct mmu_gather is an opaque type used by the mm code for passing around * any data needed by arch specific code for tlb_remove_page. */ @@ -96,6 +104,7 @@ struct mmu_gather { struct mmu_gather_batch *active; struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; + unsigned int batch_count; }; #define HAVE_GENERIC_MMU_GATHER |