From 80bb3ef109ff40a7593d9481c17de9bbc4d7c0e2 Mon Sep 17 00:00:00 2001 From: Xiangyu Lu Date: Tue, 15 Apr 2014 09:38:17 +0100 Subject: ARM: 8027/1: fix do_div() bug in big-endian systems In big-endian systems, "%1" get the most significant part of the value, cause the instruction to get the wrong result. When viewing ftrace record in big-endian ARM systems, we found that the timestamp errors: swapper-0 [001] 1325.970000: 0:120:R ==> [001] 16:120:R events/1 events/1-16 [001] 1325.970000: 16:120:S ==> [001] 0:120:R swapper swapper-0 [000] 1325.1000000: 0:120:R + [000] 15:120:R events/0 swapper-0 [000] 1325.1000000: 0:120:R ==> [000] 15:120:R events/0 swapper-0 [000] 1326.030000: 0:120:R + [000] 1150:120:R sshd swapper-0 [000] 1326.030000: 0:120:R ==> [000] 1150:120:R sshd When viewed ftrace records, it will call the do_div(n, base) function, which achieved arch/arm/include/asm/div64.h in. When n = 10000000, base = 1000000, in do_div(n, base) will execute "umull %Q0, %R0, %1, %Q2". Reviewed-by: Dave Martin Reviewed-by: Nicolas Pitre Cc: # 2.6.20+ Signed-off-by: Alex Wu Signed-off-by: Xiangyu Lu Signed-off-by: Russell King --- arch/arm/include/asm/div64.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h index 191ada6e4d2d..662c7bd06108 100644 --- a/arch/arm/include/asm/div64.h +++ b/arch/arm/include/asm/div64.h @@ -156,7 +156,7 @@ /* Select the best insn combination to perform the */ \ /* actual __m * __n / (__p << 64) operation. */ \ if (!__c) { \ - asm ( "umull %Q0, %R0, %1, %Q2\n\t" \ + asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \ "mov %Q0, #0" \ : "=&r" (__res) \ : "r" (__m), "r" (__n) \ -- cgit v1.2.3 From 4530e4b6a450af14973c2b0703edfb02d66cbd41 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 22 Apr 2014 00:25:35 +0100 Subject: ARM: 8032/1: bL_switcher: fix validation check before its activation The switcher should not depend on MAX_CLUSTER to determine ifit should be activated or not. In a multiplatform kernel binary it is possible to have dual-cluster and quad-cluster platforms configured in. In that case MAX_CLUSTER which is a build time limit should be 4 and that shouldn't prevent the switcher from working if the kernel is booted on a b.L dual-cluster system. In bL_switcher_halve_cpus() we already have a runtime validation check to make sure we're dealing with only two clusters, so booting on a quad cluster system will be caught and switcher activation aborted. However, the b.L switcher must ensure the MCPM layer is initialized on the booted hardware before doing anything. The mcpm_is_available() function is added to that effect. Signed-off-by: Nicolas Pitre Tested-by: Abhilash Kesavan Signed-off-by: Russell King --- arch/arm/common/bL_switcher.c | 6 ++---- arch/arm/common/mcpm_entry.c | 5 +++++ arch/arm/include/asm/mcpm.h | 7 +++++++ 3 files changed, 14 insertions(+), 4 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c index 5774b6ea7ad5..f01c0ee0c87e 100644 --- a/arch/arm/common/bL_switcher.c +++ b/arch/arm/common/bL_switcher.c @@ -797,10 +797,8 @@ static int __init bL_switcher_init(void) { int ret; - if (MAX_NR_CLUSTERS != 2) { - pr_err("%s: only dual cluster systems are supported\n", __func__); - return -EINVAL; - } + if (!mcpm_is_available()) + return -ENODEV; cpu_notifier(bL_switcher_hotplug_callback, 0); diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 1e361abc29eb..86fd60fefbc9 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c @@ -48,6 +48,11 @@ int __init mcpm_platform_register(const struct mcpm_platform_ops *ops) return 0; } +bool mcpm_is_available(void) +{ + return (platform_ops) ? true : false; +} + int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) { if (!platform_ops) diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index 608516ebabfe..a5ff410dcdb6 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h @@ -53,6 +53,13 @@ void mcpm_set_early_poke(unsigned cpu, unsigned cluster, * CPU/cluster power operations API for higher subsystems to use. */ +/** + * mcpm_is_available - returns whether MCPM is initialized and available + * + * This returns true or false accordingly. + */ +bool mcpm_is_available(void); + /** * mcpm_cpu_power_up - make given CPU in given cluster runable * -- cgit v1.2.3 From 556d3f7f4d791cae54fd24ef28296e666f4c96a6 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Fri, 11 Apr 2014 12:25:39 +0200 Subject: ARM: add renameat2 syscall Signed-off-by: Miklos Szeredi [dropped arch/arm/include/asm/unistd.h changes --rmk] Signed-off-by: Russell King --- arch/arm/include/uapi/asm/unistd.h | 1 + arch/arm/kernel/calls.S | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index fb5584d0cc05..ba94446c72d9 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -408,6 +408,7 @@ #define __NR_finit_module (__NR_SYSCALL_BASE+379) #define __NR_sched_setattr (__NR_SYSCALL_BASE+380) #define __NR_sched_getattr (__NR_SYSCALL_BASE+381) +#define __NR_renameat2 (__NR_SYSCALL_BASE+382) /* * This may need to be greater than __NR_last_syscall+1 in order to diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 166e945de832..8f51bdcdacbb 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -391,6 +391,7 @@ CALL(sys_finit_module) /* 380 */ CALL(sys_sched_setattr) CALL(sys_sched_getattr) + CALL(sys_renameat2) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted -- cgit v1.2.3 From cd1711709fe98d3569e7f8215ba31adcfc3686ae Mon Sep 17 00:00:00 2001 From: Sebastian Hesselbarth Date: Thu, 24 Apr 2014 22:58:00 +0100 Subject: ARM: 8041/1: pj4: fix cpu_is_pj4 check Commit fdb487f5c961b94486a78fa61fa28b8eff1954ab ("ARM: 8015/1: Add cpu_is_pj4 to distinguish PJ4 because it has some differences with V7") introduced a cpuid check for Marvell PJ4 processors to fix a regression caused by adding PJ4 based Marvell Dove into multi_v7. Unfortunately, this check is too narrow to catch PJ4 used on Dove itself and breaks iWMMXt support. This patch therefore relaxes the cpuid mask to match both PJ4 and PJ4B. Also, rework the given comment about PJ4/PJ4B modifications to be a little bit more specific about the differences. Signed-off-by: Sebastian Hesselbarth Tested-by: Thomas Petazzoni Tested-by: Kevin Hilman Signed-off-by: Russell King --- arch/arm/include/asm/cputype.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index c651e3b26ec7..4764344367d4 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -222,22 +222,22 @@ static inline int cpu_is_xsc3(void) #endif /* - * Marvell's PJ4 core is based on V7 version. It has some modification - * for coprocessor setting. For this reason, we need a way to distinguish - * it. + * Marvell's PJ4 and PJ4B cores are based on V7 version, + * but require a specical sequence for enabling coprocessors. + * For this reason, we need a way to distinguish them. */ -#ifndef CONFIG_CPU_PJ4 -#define cpu_is_pj4() 0 -#else +#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B) static inline int cpu_is_pj4(void) { unsigned int id; id = read_cpuid_id(); - if ((id & 0xfffffff0) == 0x562f5840) + if ((id & 0xff0fff00) == 0x560f5800) return 1; return 0; } +#else +#define cpu_is_pj4() 0 #endif #endif -- cgit v1.2.3 From 1cf35d47712dd5dc4d62c6ce984f04ac6eab0408 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 25 Apr 2014 16:05:40 -0700 Subject: mm: split 'tlb_flush_mmu()' into tlb flushing and memory freeing parts The mmu-gather operation 'tlb_flush_mmu()' has done two things: the actual tlb flush operation, and the batched freeing of the pages that the TLB entries pointed at. This splits the operation into separate phases, so that the forced batched flushing done by zap_pte_range() can now do the actual TLB flush while still holding the page table lock, but delay the batched freeing of all the pages to after the lock has been dropped. This in turn allows us to avoid a race condition between set_page_dirty() (as called by zap_pte_range() when it finds a dirty shared memory pte) and page_mkclean(): because we now flush all the dirty page data from the TLB's while holding the pte lock, page_mkclean() will be held up walking the (recently cleaned) page tables until after the TLB entries have been flushed from all CPU's. Reported-by: Benjamin Herrenschmidt Tested-by: Dave Hansen Acked-by: Hugh Dickins Cc: Peter Zijlstra Cc: Russell King - ARM Linux Cc: Tony Luck Signed-off-by: Linus Torvalds --- arch/arm/include/asm/tlb.h | 12 +++++++++- arch/ia64/include/asm/tlb.h | 42 ++++++++++++++++++++++++++--------- arch/s390/include/asm/tlb.h | 13 ++++++++++- arch/sh/include/asm/tlb.h | 8 +++++++ arch/um/include/asm/tlb.h | 16 ++++++++++++-- mm/memory.c | 53 +++++++++++++++++++++++++++++---------------- 6 files changed, 111 insertions(+), 33 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 0baf7f0d9394..f1a0dace3efe 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -98,15 +98,25 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) } } -static inline void tlb_flush_mmu(struct mmu_gather *tlb) +static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { tlb_flush(tlb); +} + +static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ free_pages_and_swap_cache(tlb->pages, tlb->nr); tlb->nr = 0; if (tlb->pages == tlb->local) __tlb_alloc_page(tlb); } +static inline void tlb_flush_mmu(struct mmu_gather *tlb) +{ + tlb_flush_mmu_tlbonly(tlb); + tlb_flush_mmu_free(tlb); +} + static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index bc5efc7c3f3f..39d64e0df1de 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -91,18 +91,9 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; #define RR_RID_MASK 0x00000000ffffff00L #define RR_TO_RID(val) ((val >> 8) & 0xffffff) -/* - * Flush the TLB for address range START to END and, if not in fast mode, release the - * freed pages that where gathered up to this point. - */ static inline void -ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) +ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end) { - unsigned long i; - unsigned int nr; - - if (!tlb->need_flush) - return; tlb->need_flush = 0; if (tlb->fullmm) { @@ -135,6 +126,14 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); } +} + +static inline void +ia64_tlb_flush_mmu_free(struct mmu_gather *tlb) +{ + unsigned long i; + unsigned int nr; + /* lastly, release the freed pages */ nr = tlb->nr; @@ -144,6 +143,19 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e free_page_and_swap_cache(tlb->pages[i]); } +/* + * Flush the TLB for address range START to END and, if not in fast mode, release the + * freed pages that where gathered up to this point. + */ +static inline void +ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) +{ + if (!tlb->need_flush) + return; + ia64_tlb_flush_mmu_tlbonly(tlb, start, end); + ia64_tlb_flush_mmu_free(tlb); +} + static inline void __tlb_alloc_page(struct mmu_gather *tlb) { unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); @@ -206,6 +218,16 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) return tlb->max - tlb->nr; } +static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) +{ + ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr); +} + +static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ + ia64_tlb_flush_mmu_free(tlb); +} + static inline void tlb_flush_mmu(struct mmu_gather *tlb) { ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index c544b6f05d95..a25f09fbaf36 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -59,12 +59,23 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb, tlb->batch = NULL; } -static inline void tlb_flush_mmu(struct mmu_gather *tlb) +static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { __tlb_flush_mm_lazy(tlb->mm); +} + +static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ tlb_table_flush(tlb); } + +static inline void tlb_flush_mmu(struct mmu_gather *tlb) +{ + tlb_flush_mmu_tlbonly(tlb); + tlb_flush_mmu_free(tlb); +} + static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index 362192ed12fe..62f80d2a9df9 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -86,6 +86,14 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) } } +static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) +{ +} + +static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ +} + static inline void tlb_flush_mmu(struct mmu_gather *tlb) { } diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index 29b0301c18aa..16eb63fac57d 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h @@ -58,14 +58,26 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end); +static inline void +tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) +{ + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end); +} + +static inline void +tlb_flush_mmu_free(struct mmu_gather *tlb) +{ + init_tlb_gather(tlb); +} + static inline void tlb_flush_mmu(struct mmu_gather *tlb) { if (!tlb->need_flush) return; - flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end); - init_tlb_gather(tlb); + tlb_flush_mmu_tlbonly(tlb); + tlb_flush_mmu_free(tlb); } /* tlb_finish_mmu diff --git a/mm/memory.c b/mm/memory.c index 93e332d5ed77..037b812a9531 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -232,17 +232,18 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long #endif } -void tlb_flush_mmu(struct mmu_gather *tlb) +static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { - struct mmu_gather_batch *batch; - - if (!tlb->need_flush) - return; tlb->need_flush = 0; tlb_flush(tlb); #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb_table_flush(tlb); #endif +} + +static void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ + struct mmu_gather_batch *batch; for (batch = &tlb->local; batch; batch = batch->next) { free_pages_and_swap_cache(batch->pages, batch->nr); @@ -251,6 +252,14 @@ void tlb_flush_mmu(struct mmu_gather *tlb) tlb->active = &tlb->local; } +void tlb_flush_mmu(struct mmu_gather *tlb) +{ + if (!tlb->need_flush) + return; + tlb_flush_mmu_tlbonly(tlb); + tlb_flush_mmu_free(tlb); +} + /* tlb_finish_mmu * Called at the end of the shootdown operation to free up any resources * that were required. @@ -1127,8 +1136,10 @@ again: if (PageAnon(page)) rss[MM_ANONPAGES]--; else { - if (pte_dirty(ptent)) + if (pte_dirty(ptent)) { + force_flush = 1; set_page_dirty(page); + } if (pte_young(ptent) && likely(!(vma->vm_flags & VM_SEQ_READ))) mark_page_accessed(page); @@ -1137,9 +1148,10 @@ again: page_remove_rmap(page); if (unlikely(page_mapcount(page) < 0)) print_bad_pte(vma, addr, ptent, page); - force_flush = !__tlb_remove_page(tlb, page); - if (force_flush) + if (unlikely(!__tlb_remove_page(tlb, page))) { + force_flush = 1; break; + } continue; } /* @@ -1174,18 +1186,11 @@ again: add_mm_rss_vec(mm, rss); arch_leave_lazy_mmu_mode(); - pte_unmap_unlock(start_pte, ptl); - /* - * mmu_gather ran out of room to batch pages, we break out of - * the PTE lock to avoid doing the potential expensive TLB invalidate - * and page-free while holding it. - */ + /* Do the actual TLB flush before dropping ptl */ if (force_flush) { unsigned long old_end; - force_flush = 0; - /* * Flush the TLB just for the previous segment, * then update the range to be the remaining @@ -1193,11 +1198,21 @@ again: */ old_end = tlb->end; tlb->end = addr; - - tlb_flush_mmu(tlb); - + tlb_flush_mmu_tlbonly(tlb); tlb->start = addr; tlb->end = old_end; + } + pte_unmap_unlock(start_pte, ptl); + + /* + * If we forced a TLB flush (either due to running out of + * batch buffers or because we needed to flush dirty TLB + * entries before releasing the ptl), free the batched + * memory too. Restart if we didn't do everything. + */ + if (force_flush) { + force_flush = 0; + tlb_flush_mmu_free(tlb); if (addr != end) goto again; -- cgit v1.2.3