From 256211f2b0b251e532d1899b115e374feb16fa7a Mon Sep 17 00:00:00 2001 From: Marcin Nowakowski Date: Fri, 9 Feb 2018 22:11:05 +0000 Subject: MIPS: Add crc instruction support flag to elf_hwcap Indicate that CRC32 and CRC32C instuctions are supported by the CPU through elf_hwcap flags. This will be used by a follow-up commit that introduces crc32(c) crypto acceleration modules and is required by GENERIC_CPU_AUTOPROBE feature. Signed-off-by: Marcin Nowakowski Signed-off-by: James Hogan Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/18600/ --- arch/mips/include/asm/mipsregs.h | 1 + arch/mips/include/uapi/asm/hwcap.h | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 858752dac337..f65859784a4c 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -664,6 +664,7 @@ #define MIPS_CONF5_FRE (_ULCAST_(1) << 8) #define MIPS_CONF5_UFE (_ULCAST_(1) << 9) #define MIPS_CONF5_CA2 (_ULCAST_(1) << 14) +#define MIPS_CONF5_CRCP (_ULCAST_(1) << 18) #define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27) #define MIPS_CONF5_EVA (_ULCAST_(1) << 28) #define MIPS_CONF5_CV (_ULCAST_(1) << 29) diff --git a/arch/mips/include/uapi/asm/hwcap.h b/arch/mips/include/uapi/asm/hwcap.h index 600ad8fd6835..a2aba4b059e6 100644 --- a/arch/mips/include/uapi/asm/hwcap.h +++ b/arch/mips/include/uapi/asm/hwcap.h @@ -5,5 +5,6 @@ /* HWCAP flags */ #define HWCAP_MIPS_R6 (1 << 0) #define HWCAP_MIPS_MSA (1 << 1) +#define HWCAP_MIPS_CRC32 (1 << 2) #endif /* _UAPI_ASM_HWCAP_H */ -- cgit v1.2.3 From a4429e53c9b3082b05e51224c3d58dbdd39306c5 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Tue, 13 Feb 2018 09:05:40 +0800 Subject: KVM: Introduce paravirtualization hints and KVM_HINTS_DEDICATED MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch introduces kvm_para_has_hint() to query for hints about the configuration of the guests. The first hint KVM_HINTS_DEDICATED, is set if the guest has dedicated physical CPUs for each vCPU (i.e. pinning and no over-commitment). This allows optimizing spinlocks and tells the guest to avoid PV TLB flush. Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Eduardo Habkost Signed-off-by: Wanpeng Li Signed-off-by: Paolo Bonzini Signed-off-by: Radim Krčmář --- Documentation/virtual/kvm/cpuid.txt | 15 +++++++++++++-- arch/mips/include/asm/kvm_para.h | 5 +++++ arch/powerpc/include/asm/kvm_para.h | 5 +++++ arch/s390/include/asm/kvm_para.h | 5 +++++ arch/x86/include/asm/kvm_para.h | 6 ++++++ arch/x86/include/uapi/asm/kvm_para.h | 8 ++++++-- arch/x86/kernel/kvm.c | 5 +++++ include/asm-generic/kvm_para.h | 5 +++++ include/linux/kvm_para.h | 5 +++++ 9 files changed, 55 insertions(+), 4 deletions(-) (limited to 'arch/mips/include') diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt index 87a7506f31c2..d4f33eb805dd 100644 --- a/Documentation/virtual/kvm/cpuid.txt +++ b/Documentation/virtual/kvm/cpuid.txt @@ -23,8 +23,8 @@ This function queries the presence of KVM cpuid leafs. function: define KVM_CPUID_FEATURES (0x40000001) -returns : ebx, ecx, edx = 0 - eax = and OR'ed group of (1 << flag), where each flags is: +returns : ebx, ecx + eax = an OR'ed group of (1 << flag), where each flags is: flag || value || meaning @@ -66,3 +66,14 @@ KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side || || per-cpu warps are expected in || || kvmclock. ------------------------------------------------------------------------------ + + edx = an OR'ed group of (1 << flag), where each flags is: + + +flag || value || meaning +================================================================================== +KVM_HINTS_DEDICATED || 0 || guest checks this feature bit to + || || determine if there is vCPU pinning + || || and there is no vCPU over-commitment, + || || allowing optimizations +---------------------------------------------------------------------------------- diff --git a/arch/mips/include/asm/kvm_para.h b/arch/mips/include/asm/kvm_para.h index 60b1aa0b7014..b57e978b0946 100644 --- a/arch/mips/include/asm/kvm_para.h +++ b/arch/mips/include/asm/kvm_para.h @@ -94,6 +94,11 @@ static inline unsigned int kvm_arch_para_features(void) return 0; } +static inline unsigned int kvm_arch_para_hints(void) +{ + return 0; +} + #ifdef CONFIG_MIPS_PARAVIRT static inline bool kvm_para_available(void) { diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h index 336a91acb8b1..5ceb4efca65f 100644 --- a/arch/powerpc/include/asm/kvm_para.h +++ b/arch/powerpc/include/asm/kvm_para.h @@ -61,6 +61,11 @@ static inline unsigned int kvm_arch_para_features(void) return r; } +static inline unsigned int kvm_arch_para_hints(void) +{ + return 0; +} + static inline bool kvm_check_and_clear_guest_paused(void) { return false; diff --git a/arch/s390/include/asm/kvm_para.h b/arch/s390/include/asm/kvm_para.h index 74eeec9c0a80..cbc7c3a68e4d 100644 --- a/arch/s390/include/asm/kvm_para.h +++ b/arch/s390/include/asm/kvm_para.h @@ -193,6 +193,11 @@ static inline unsigned int kvm_arch_para_features(void) return 0; } +static inline unsigned int kvm_arch_para_hints(void) +{ + return 0; +} + static inline bool kvm_check_and_clear_guest_paused(void) { return false; diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 7b407dda2bd7..3aea2658323a 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -88,6 +88,7 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, #ifdef CONFIG_KVM_GUEST bool kvm_para_available(void); unsigned int kvm_arch_para_features(void); +unsigned int kvm_arch_para_hints(void); void kvm_async_pf_task_wait(u32 token, int interrupt_kernel); void kvm_async_pf_task_wake(u32 token); u32 kvm_read_and_reset_pf_reason(void); @@ -115,6 +116,11 @@ static inline unsigned int kvm_arch_para_features(void) return 0; } +static inline unsigned int kvm_arch_para_hints(void) +{ + return 0; +} + static inline u32 kvm_read_and_reset_pf_reason(void) { return 0; diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h index 6cfa9c8cb7d6..68a41b6ba3da 100644 --- a/arch/x86/include/uapi/asm/kvm_para.h +++ b/arch/x86/include/uapi/asm/kvm_para.h @@ -10,8 +10,10 @@ */ #define KVM_CPUID_SIGNATURE 0x40000000 -/* This CPUID returns a feature bitmap in eax. Before enabling a particular - * paravirtualization, the appropriate feature bit should be checked. +/* This CPUID returns two feature bitmaps in eax, edx. Before enabling + * a particular paravirtualization, the appropriate feature bit should + * be checked in eax. The performance hint feature bit should be checked + * in edx. */ #define KVM_CPUID_FEATURES 0x40000001 #define KVM_FEATURE_CLOCKSOURCE 0 @@ -28,6 +30,8 @@ #define KVM_FEATURE_PV_TLB_FLUSH 9 #define KVM_FEATURE_ASYNC_PF_VMEXIT 10 +#define KVM_HINTS_DEDICATED 0 + /* The last 8 bits are used to indicate how to interpret the flags field * in pvclock structure. If no bits are set, all flags are ignored. */ diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index bc1a27280c4b..8c9d98c46f84 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -605,6 +605,11 @@ unsigned int kvm_arch_para_features(void) return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES); } +unsigned int kvm_arch_para_hints(void) +{ + return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES); +} + static uint32_t __init kvm_detect(void) { return kvm_cpuid_base(); diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h index 18c6abe81fbd..728e5c5706c4 100644 --- a/include/asm-generic/kvm_para.h +++ b/include/asm-generic/kvm_para.h @@ -19,6 +19,11 @@ static inline unsigned int kvm_arch_para_features(void) return 0; } +static inline unsigned int kvm_arch_para_hints(void) +{ + return 0; +} + static inline bool kvm_para_available(void) { return false; diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h index 51f6ef2c2ff4..f23b90b02898 100644 --- a/include/linux/kvm_para.h +++ b/include/linux/kvm_para.h @@ -9,4 +9,9 @@ static inline bool kvm_para_has_feature(unsigned int feature) { return !!(kvm_arch_para_features() & (1UL << feature)); } + +static inline bool kvm_para_has_hint(unsigned int feature) +{ + return !!(kvm_arch_para_hints() & (1UL << feature)); +} #endif /* __LINUX_KVM_PARA_H */ -- cgit v1.2.3 From 1690905240fd45cc04e873312df8574631c9f595 Mon Sep 17 00:00:00 2001 From: Matt Redfearn Date: Mon, 26 Feb 2018 17:02:42 +0000 Subject: MIPS: Introduce isa-rev.h to define MIPS_ISA_REV There are multiple instances in the kernel where we need to include or exclude particular instructions based on the ISA revision of the target processor. For MIPS32 / MIPS64, the compiler exports a __mips_isa_rev define. However, when targeting MIPS I - V, this define is absent. This leads to each use of __mips_isa_rev having to check that it is defined first. To simplify this, introduce the isa-rev.h header which always exports MIPS_ISA_REV. The name is changed so as to avoid confusion with the compiler builtin and to avoid accidentally using the builtin. MIPS_ISA_REV is defined to the compilers builtin if provided, or 0, which satisfies all current usages. Suggested-by: Paul Burton Signed-off-by: Matt Redfearn Reviewed-by: Maciej W. Rozycki Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/18676/ Signed-off-by: James Hogan --- arch/mips/include/asm/isa-rev.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 arch/mips/include/asm/isa-rev.h (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/isa-rev.h b/arch/mips/include/asm/isa-rev.h new file mode 100644 index 000000000000..683ea3454dcb --- /dev/null +++ b/arch/mips/include/asm/isa-rev.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 MIPS Tech, LLC + * Author: Matt Redfearn + */ + +#ifndef __MIPS_ASM_ISA_REV_H__ +#define __MIPS_ASM_ISA_REV_H__ + +/* + * The ISA revision level. This is 0 for MIPS I to V and N for + * MIPS{32,64}rN. + */ + +/* If the compiler has defined __mips_isa_rev, believe it. */ +#ifdef __mips_isa_rev +#define MIPS_ISA_REV __mips_isa_rev +#else +/* The compiler hasn't defined the isa rev so assume it's MIPS I - V (0) */ +#define MIPS_ISA_REV 0 +#endif + + +#endif /* __MIPS_ASM_ISA_REV_H__ */ -- cgit v1.2.3 From 18ba210a29d08ea96025cb9d19c2eebf65846330 Mon Sep 17 00:00:00 2001 From: Matt Redfearn Date: Mon, 26 Feb 2018 17:02:43 +0000 Subject: MIPS: cpu-features.h: Replace __mips_isa_rev with MIPS_ISA_REV Remove the need to check that __mips_isa_rev is defined by using the newly added MIPS_ISA_REV. Signed-off-by: Matt Redfearn Cc: Ralf Baechle Cc: Paul Burton Cc: "Maciej W. Rozycki" Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/18675/ Signed-off-by: James Hogan --- arch/mips/include/asm/cpu-features.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 721b698bfe3c..5f74590e0bea 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -11,6 +11,7 @@ #include #include +#include #include /* @@ -493,7 +494,7 @@ # define cpu_has_perf (cpu_data[0].options & MIPS_CPU_PERF) #endif -#if defined(CONFIG_SMP) && defined(__mips_isa_rev) && (__mips_isa_rev >= 6) +#if defined(CONFIG_SMP) && (MIPS_ISA_REV >= 6) /* * Some systems share FTLB RAMs between threads within a core (siblings in * kernel parlance). This means that FTLB entries may become invalid at almost @@ -525,7 +526,7 @@ # define cpu_has_shared_ftlb_entries \ (current_cpu_data.options & MIPS_CPU_SHARED_FTLB_ENTRIES) # endif -#endif /* SMP && __mips_isa_rev >= 6 */ +#endif /* SMP && MIPS_ISA_REV >= 6 */ #ifndef cpu_has_shared_ftlb_ram # define cpu_has_shared_ftlb_ram 0 -- cgit v1.2.3 From 05454c1bde91fb013c0431801001da82947e6b5a Mon Sep 17 00:00:00 2001 From: Mathias Kresin Date: Thu, 11 May 2017 08:18:24 +0200 Subject: MIPS: ath79: Fix AR724X_PLL_REG_PCIE_CONFIG offset According to the QCA u-boot source the "PCIE Phase Lock Loop Configuration (PCIE_PLL_CONFIG)" register is for all SoCs except the QCA955X and QCA956X at offset 0x10. Since the PCIE PLL config register is only defined for the AR724x fix only this value. The value is wrong since the day it was added and isn't used by any driver yet. Signed-off-by: Mathias Kresin Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16048/ Signed-off-by: James Hogan --- arch/mips/include/asm/mach-ath79/ar71xx_regs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h index aa3800c82332..d99ca862dae3 100644 --- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h +++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h @@ -167,7 +167,7 @@ #define AR71XX_AHB_DIV_MASK 0x7 #define AR724X_PLL_REG_CPU_CONFIG 0x00 -#define AR724X_PLL_REG_PCIE_CONFIG 0x18 +#define AR724X_PLL_REG_PCIE_CONFIG 0x10 #define AR724X_PLL_FB_SHIFT 0 #define AR724X_PLL_FB_MASK 0x3ff -- cgit v1.2.3 From a4ff8e8620d3f4f50ac4b41e8067b7d395056843 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Tue, 10 Apr 2018 16:35:57 -0700 Subject: mm: introduce MAP_FIXED_NOREPLACE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "mm: introduce MAP_FIXED_NOREPLACE", v2. This has started as a follow up discussion [3][4] resulting in the runtime failure caused by hardening patch [5] which removes MAP_FIXED from the elf loader because MAP_FIXED is inherently dangerous as it might silently clobber an existing underlying mapping (e.g. stack). The reason for the failure is that some architectures enforce an alignment for the given address hint without MAP_FIXED used (e.g. for shared or file backed mappings). One way around this would be excluding those archs which do alignment tricks from the hardening [6]. The patch is really trivial but it has been objected, rightfully so, that this screams for a more generic solution. We basically want a non-destructive MAP_FIXED. The first patch introduced MAP_FIXED_NOREPLACE which enforces the given address but unlike MAP_FIXED it fails with EEXIST if the given range conflicts with an existing one. The flag is introduced as a completely new one rather than a MAP_FIXED extension because of the backward compatibility. We really want a never-clobber semantic even on older kernels which do not recognize the flag. Unfortunately mmap sucks wrt flags evaluation because we do not EINVAL on unknown flags. On those kernels we would simply use the traditional hint based semantic so the caller can still get a different address (which sucks) but at least not silently corrupt an existing mapping. I do not see a good way around that. Except we won't export expose the new semantic to the userspace at all. It seems there are users who would like to have something like that. Jemalloc has been mentioned by Michael Ellerman [7] Florian Weimer has mentioned the following: : glibc ld.so currently maps DSOs without hints. This means that the kernel : will map right next to each other, and the offsets between them a completely : predictable. We would like to change that and supply a random address in a : window of the address space. If there is a conflict, we do not want the : kernel to pick a non-random address. Instead, we would try again with a : random address. John Hubbard has mentioned CUDA example : a) Searches /proc//maps for a "suitable" region of available : VA space. "Suitable" generally means it has to have a base address : within a certain limited range (a particular device model might : have odd limitations, for example), it has to be large enough, and : alignment has to be large enough (again, various devices may have : constraints that lead us to do this). : : This is of course subject to races with other threads in the process. : : Let's say it finds a region starting at va. : : b) Next it does: : p = mmap(va, ...) : : *without* setting MAP_FIXED, of course (so va is just a hint), to : attempt to safely reserve that region. If p != va, then in most cases, : this is a failure (almost certainly due to another thread getting a : mapping from that region before we did), and so this layer now has to : call munmap(), before returning a "failure: retry" to upper layers. : : IMPROVEMENT: --> if instead, we could call this: : : p = mmap(va, ... MAP_FIXED_NOREPLACE ...) : : , then we could skip the munmap() call upon failure. This : is a small thing, but it is useful here. (Thanks to Piotr : Jaroszynski and Mark Hairgrove for helping me get that detail : exactly right, btw.) : : c) After that, CUDA suballocates from p, via: : : q = mmap(sub_region_start, ... MAP_FIXED ...) : : Interestingly enough, "freeing" is also done via MAP_FIXED, and : setting PROT_NONE to the subregion. Anyway, I just included (c) for : general interest. Atomic address range probing in the multithreaded programs in general sounds like an interesting thing to me. The second patch simply replaces MAP_FIXED use in elf loader by MAP_FIXED_NOREPLACE. I believe other places which rely on MAP_FIXED should follow. Actually real MAP_FIXED usages should be docummented properly and they should be more of an exception. [1] http://lkml.kernel.org/r/20171116101900.13621-1-mhocko@kernel.org [2] http://lkml.kernel.org/r/20171129144219.22867-1-mhocko@kernel.org [3] http://lkml.kernel.org/r/20171107162217.382cd754@canb.auug.org.au [4] http://lkml.kernel.org/r/1510048229.12079.7.camel@abdul.in.ibm.com [5] http://lkml.kernel.org/r/20171023082608.6167-1-mhocko@kernel.org [6] http://lkml.kernel.org/r/20171113094203.aofz2e7kueitk55y@dhcp22.suse.cz [7] http://lkml.kernel.org/r/87efp1w7vy.fsf@concordia.ellerman.id.au This patch (of 2): MAP_FIXED is used quite often to enforce mapping at the particular range. The main problem of this flag is, however, that it is inherently dangerous because it unmaps existing mappings covered by the requested range. This can cause silent memory corruptions. Some of them even with serious security implications. While the current semantic might be really desiderable in many cases there are others which would want to enforce the given range but rather see a failure than a silent memory corruption on a clashing range. Please note that there is no guarantee that a given range is obeyed by the mmap even when it is free - e.g. arch specific code is allowed to apply an alignment. Introduce a new MAP_FIXED_NOREPLACE flag for mmap to achieve this behavior. It has the same semantic as MAP_FIXED wrt. the given address request with a single exception that it fails with EEXIST if the requested address is already covered by an existing mapping. We still do rely on get_unmaped_area to handle all the arch specific MAP_FIXED treatment and check for a conflicting vma after it returns. The flag is introduced as a completely new one rather than a MAP_FIXED extension because of the backward compatibility. We really want a never-clobber semantic even on older kernels which do not recognize the flag. Unfortunately mmap sucks wrt. flags evaluation because we do not EINVAL on unknown flags. On those kernels we would simply use the traditional hint based semantic so the caller can still get a different address (which sucks) but at least not silently corrupt an existing mapping. I do not see a good way around that. [mpe@ellerman.id.au: fix whitespace] [fail on clashing range with EEXIST as per Florian Weimer] [set MAP_FIXED before round_hint_to_min as per Khalid Aziz] Link: http://lkml.kernel.org/r/20171213092550.2774-2-mhocko@kernel.org Reviewed-by: Khalid Aziz Signed-off-by: Michal Hocko Acked-by: Michael Ellerman Cc: Khalid Aziz Cc: Russell King - ARM Linux Cc: Andrea Arcangeli Cc: Florian Weimer Cc: John Hubbard Cc: Matthew Wilcox Cc: Abdul Haleem Cc: Joel Stanley Cc: Kees Cook Cc: Michal Hocko Cc: Jason Evans Cc: David Goldblatt Cc: Edward Tomasz Napierała Cc: Anshuman Khandual Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/uapi/asm/mman.h | 1 + arch/mips/include/uapi/asm/mman.h | 1 + arch/parisc/include/uapi/asm/mman.h | 1 + arch/xtensa/include/uapi/asm/mman.h | 1 + include/uapi/asm-generic/mman-common.h | 1 + mm/mmap.c | 11 +++++++++++ 6 files changed, 16 insertions(+) (limited to 'arch/mips/include') diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h index 2dbdf59258d9..f9d4e6b6d4bd 100644 --- a/arch/alpha/include/uapi/asm/mman.h +++ b/arch/alpha/include/uapi/asm/mman.h @@ -32,6 +32,7 @@ #define MAP_NONBLOCK 0x40000 /* do not block on IO */ #define MAP_STACK 0x80000 /* give out an address that is best suited for process/thread stacks */ #define MAP_HUGETLB 0x100000 /* create a huge page mapping */ +#define MAP_FIXED_NOREPLACE 0x200000/* MAP_FIXED which doesn't unmap underlying mapping */ #define MS_ASYNC 1 /* sync memory asynchronously */ #define MS_SYNC 2 /* synchronous memory sync */ diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h index 606e02ca4b6c..3035ca499cd8 100644 --- a/arch/mips/include/uapi/asm/mman.h +++ b/arch/mips/include/uapi/asm/mman.h @@ -50,6 +50,7 @@ #define MAP_NONBLOCK 0x20000 /* do not block on IO */ #define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ #define MAP_HUGETLB 0x80000 /* create a huge page mapping */ +#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ /* * Flags for msync diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index a056a642bb31..870fbf8c7088 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h @@ -26,6 +26,7 @@ #define MAP_NONBLOCK 0x20000 /* do not block on IO */ #define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ #define MAP_HUGETLB 0x80000 /* create a huge page mapping */ +#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ #define MS_SYNC 1 /* synchronous memory sync */ #define MS_ASYNC 2 /* sync memory asynchronously */ diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h index 3e9d01ada81f..58f29a9d895d 100644 --- a/arch/xtensa/include/uapi/asm/mman.h +++ b/arch/xtensa/include/uapi/asm/mman.h @@ -57,6 +57,7 @@ #define MAP_NONBLOCK 0x20000 /* do not block on IO */ #define MAP_STACK 0x40000 /* give out an address that is best suited for process/thread stacks */ #define MAP_HUGETLB 0x80000 /* create a huge page mapping */ +#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ #ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED # define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be * uninitialized */ diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index f8b134f5608f..4aa65a8d7e92 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h @@ -26,6 +26,7 @@ #else # define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ #endif +#define MAP_FIXED_NOREPLACE 0x80000 /* MAP_FIXED which doesn't unmap underlying mapping */ /* * Flags for mlock diff --git a/mm/mmap.c b/mm/mmap.c index f2154fc2548b..188f195883b9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1342,6 +1342,10 @@ unsigned long do_mmap(struct file *file, unsigned long addr, if (!(file && path_noexec(&file->f_path))) prot |= PROT_EXEC; + /* force arch specific MAP_FIXED handling in get_unmapped_area */ + if (flags & MAP_FIXED_NOREPLACE) + flags |= MAP_FIXED; + if (!(flags & MAP_FIXED)) addr = round_hint_to_min(addr); @@ -1365,6 +1369,13 @@ unsigned long do_mmap(struct file *file, unsigned long addr, if (offset_in_page(addr)) return addr; + if (flags & MAP_FIXED_NOREPLACE) { + struct vm_area_struct *vma = find_vma(mm, addr); + + if (vma && vma->vm_start <= addr) + return -EEXIST; + } + if (prot == PROT_EXEC) { pkey = execute_only_pkey(mm); if (pkey < 0) -- cgit v1.2.3 From f6b7aeee8f167409195fbf1364d02988fecad1d0 Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Tue, 3 Apr 2018 08:55:03 -0400 Subject: MIPS: io: Prevent compiler reordering writeX() writeX() has strong ordering semantics with respect to memory updates. In the absence of a write barrier or a compiler barrier, the compiler can reorder register and memory update instructions. This breaks the writeX() API. Signed-off-by: Sinan Kaya Cc: Arnd Bergmann Cc: Ralf Baechle Cc: Paul Burton Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/18997/ [jhogan@kernel.org: Tidy commit message] Signed-off-by: James Hogan --- arch/mips/include/asm/io.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 0cbf3af37eca..fd00ddafb425 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -307,7 +307,7 @@ static inline void iounmap(const volatile void __iomem *addr) #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT) #define war_io_reorder_wmb() wmb() #else -#define war_io_reorder_wmb() do { } while (0) +#define war_io_reorder_wmb() barrier() #endif #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ -- cgit v1.2.3 From a1cc7034e33d12dc17d13fbcd7d597d552889097 Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Thu, 12 Apr 2018 22:30:44 -0400 Subject: MIPS: io: Add barrier after register read in readX() While a barrier is present in the writeX() functions before the register write, a similar barrier is missing in the readX() functions after the register read. This could allow memory accesses following readX() to observe stale data. Signed-off-by: Sinan Kaya Reported-by: Arnd Bergmann Cc: Ralf Baechle Cc: Paul Burton Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/19069/ [jhogan@kernel.org: Tidy commit message] Signed-off-by: James Hogan --- arch/mips/include/asm/io.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index fd00ddafb425..a7d0b836f2f7 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -377,6 +377,8 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ BUG(); \ } \ \ + /* prevent prefetching of coherent DMA data prematurely */ \ + rmb(); \ return pfx##ioswab##bwlq(__mem, __val); \ } -- cgit v1.2.3 From b3d7e55c3f886493235bfee08e1e5a4a27cbcce8 Mon Sep 17 00:00:00 2001 From: Matt Redfearn Date: Tue, 17 Apr 2018 16:40:01 +0100 Subject: MIPS: uaccess: Add micromips clobbers to bzero invocation The micromips implementation of bzero additionally clobbers registers t7 & t8. Specify this in the clobbers list when invoking bzero. Fixes: 26c5e07d1478 ("MIPS: microMIPS: Optimise 'memset' core library function.") Reported-by: James Hogan Signed-off-by: Matt Redfearn Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: # 3.10+ Patchwork: https://patchwork.linux-mips.org/patch/19110/ Signed-off-by: James Hogan --- arch/mips/include/asm/uaccess.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index b71306947290..06629011a434 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -654,6 +654,13 @@ __clear_user(void __user *addr, __kernel_size_t size) { __kernel_size_t res; +#ifdef CONFIG_CPU_MICROMIPS +/* micromips memset / bzero also clobbers t7 & t8 */ +#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31" +#else +#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31" +#endif /* CONFIG_CPU_MICROMIPS */ + if (eva_kernel_access()) { __asm__ __volatile__( "move\t$4, %1\n\t" @@ -663,7 +670,7 @@ __clear_user(void __user *addr, __kernel_size_t size) "move\t%0, $6" : "=r" (res) : "r" (addr), "r" (size) - : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); + : bzero_clobbers); } else { might_fault(); __asm__ __volatile__( @@ -674,7 +681,7 @@ __clear_user(void __user *addr, __kernel_size_t size) "move\t%0, $6" : "=r" (res) : "r" (addr), "r" (size) - : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); + : bzero_clobbers); } return res; -- cgit v1.2.3