summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-08-14 01:23:11 +0200
committerDavid S. Miller <davem@davemloft.net>2015-08-14 01:23:11 +0200
commit182ad468e70fc7e8ff2e5d64344c690beaa00ddd (patch)
treeaf0b9b8da89ed03629c71f6829845c301485d289 /arch
parentdocumentation: bring vxlan documentation more up-to-date (diff)
parentMerge tag 'dm-4.2-fixes-5' of git://git.kernel.org/pub/scm/linux/kernel/git/d... (diff)
downloadlinux-182ad468e70fc7e8ff2e5d64344c690beaa00ddd.tar.xz
linux-182ad468e70fc7e8ff2e5d64344c690beaa00ddd.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/cavium/Kconfig The cavium conflict was overlapping dependency changes. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/Kconfig13
-rw-r--r--arch/arc/Makefile10
-rw-r--r--arch/arc/include/asm/arcregs.h7
-rw-r--r--arch/arc/include/asm/atomic.h78
-rw-r--r--arch/arc/include/asm/ptrace.h54
-rw-r--r--arch/arc/include/asm/spinlock.h538
-rw-r--r--arch/arc/include/asm/spinlock_types.h2
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h20
-rw-r--r--arch/arc/kernel/setup.c12
-rw-r--r--arch/arc/kernel/time.c40
-rw-r--r--arch/arc/lib/memcpy-archs.S2
-rw-r--r--arch/arc/lib/memset-archs.S43
-rw-r--r--arch/arc/plat-axs10x/axs10x.c15
-rw-r--r--arch/arm/boot/dts/dra7.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos4210-origen.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-universal_c210.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210.dtsi12
-rw-r--r--arch/arm/boot/dts/imx35.dtsi8
-rw-r--r--arch/arm/boot/dts/k2e-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2hk-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2l-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/ste-nomadik-nhk15.dts1
-rw-r--r--arch/arm/boot/dts/ste-nomadik-s8815.dts4
-rw-r--r--arch/arm/boot/dts/ste-nomadik-stn8815.dtsi1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c24
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c5
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi2
-rw-r--r--arch/arm64/kernel/signal32.c5
-rw-r--r--arch/arm64/kernel/vdso.c7
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/ath79/setup.c1
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/dma-coherence.h10
-rw-r--r--arch/mips/include/asm/pgtable.h31
-rw-r--r--arch/mips/include/asm/smp.h2
-rw-r--r--arch/mips/include/asm/stackframe.h25
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c5
-rw-r--r--arch/mips/kernel/prom.c2
-rw-r--r--arch/mips/kernel/relocate_kernel.S8
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c4
-rw-r--r--arch/mips/kernel/smp.c10
-rw-r--r--arch/mips/kernel/traps.c13
-rw-r--r--arch/mips/kernel/unaligned.c2
-rw-r--r--arch/mips/lantiq/irq.c3
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c7
-rw-r--r--arch/mips/mm/cache.c8
-rw-r--r--arch/mips/mm/fault.c3
-rw-r--r--arch/mips/mti-malta/malta-int.c2
-rw-r--r--arch/mips/mti-malta/malta-time.c16
-rw-r--r--arch/mips/mti-sead3/sead3-time.c1
-rw-r--r--arch/mips/netlogic/common/smp.c2
-rw-r--r--arch/mips/paravirt/paravirt-smp.c2
-rw-r--r--arch/mips/pistachio/time.c1
-rw-r--r--arch/mips/pmcs-msp71xx/msp_smp.c2
-rw-r--r--arch/mips/ralink/irq.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c8
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c9
-rw-r--r--arch/mips/sibyte/sb1250/smp.c7
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c11
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/sparc/include/asm/visasm.h16
-rw-r--r--arch/sparc/lib/NG4memcpy.S5
-rw-r--r--arch/sparc/lib/VISsave.S67
-rw-r--r--arch/sparc/lib/ksyms.c4
-rw-r--r--arch/tile/kernel/compat_signal.c2
-rw-r--r--arch/x86/boot/compressed/eboot.c4
-rw-r--r--arch/x86/include/asm/desc.h15
-rw-r--r--arch/x86/include/asm/mmu.h3
-rw-r--r--arch/x86/include/asm/mmu_context.h54
-rw-r--r--arch/x86/include/asm/sigcontext.h6
-rw-r--r--arch/x86/include/uapi/asm/sigcontext.h21
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c12
-rw-r--r--arch/x86/kernel/ldt.c262
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/kernel/signal.c26
-rw-r--r--arch/x86/kernel/step.c6
-rw-r--r--arch/x86/kvm/mtrr.c8
-rw-r--r--arch/x86/platform/efi/efi.c5
-rw-r--r--arch/x86/power/cpu.c3
-rw-r--r--arch/x86/xen/Makefile4
-rw-r--r--arch/x86/xen/enlighten.c40
-rw-r--r--arch/x86/xen/xen-ops.h6
89 files changed, 1217 insertions, 504 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 91cf4055acab..bd4670d1b89b 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -313,11 +313,11 @@ config ARC_PAGE_SIZE_8K
config ARC_PAGE_SIZE_16K
bool "16KB"
- depends on ARC_MMU_V3
+ depends on ARC_MMU_V3 || ARC_MMU_V4
config ARC_PAGE_SIZE_4K
bool "4KB"
- depends on ARC_MMU_V3
+ depends on ARC_MMU_V3 || ARC_MMU_V4
endchoice
@@ -365,6 +365,11 @@ config ARC_HAS_LLSC
default y
depends on !ARC_CANT_LLSC
+config ARC_STAR_9000923308
+ bool "Workaround for llock/scond livelock"
+ default y
+ depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
+
config ARC_HAS_SWAPE
bool "Insn: SWAPE (endian-swap)"
default y
@@ -379,6 +384,10 @@ config ARC_HAS_LL64
dest operands with 2 possible source operands.
default y
+config ARC_HAS_DIV_REM
+ bool "Insn: div, divu, rem, remu"
+ default y
+
config ARC_HAS_RTC
bool "Local 64-bit r/o cycle counter"
default n
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 46d87310220d..8a27a48304a4 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -36,8 +36,16 @@ cflags-$(atleast_gcc44) += -fsection-anchors
cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
+ifdef CONFIG_ISA_ARCV2
+
ifndef CONFIG_ARC_HAS_LL64
-cflags-$(CONFIG_ISA_ARCV2) += -mno-ll64
+cflags-y += -mno-ll64
+endif
+
+ifndef CONFIG_ARC_HAS_DIV_REM
+cflags-y += -mno-div-rem
+endif
+
endif
cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 070f58827a5c..c8f57b8449dc 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -89,11 +89,10 @@
#define ECR_C_BIT_DTLB_LD_MISS 8
#define ECR_C_BIT_DTLB_ST_MISS 9
-
/* Auxiliary registers */
#define AUX_IDENTITY 4
#define AUX_INTR_VEC_BASE 0x25
-
+#define AUX_NON_VOL 0x5e
/*
* Floating Pt Registers
@@ -240,9 +239,9 @@ struct bcr_extn_xymem {
struct bcr_perip {
#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int start:8, pad2:8, sz:8, pad:8;
+ unsigned int start:8, pad2:8, sz:8, ver:8;
#else
- unsigned int pad:8, sz:8, pad2:8, start:8;
+ unsigned int ver:8, sz:8, pad2:8, start:8;
#endif
};
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 03484cb4d16d..87d18ae53115 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -23,33 +23,60 @@
#define atomic_set(v, i) (((v)->counter) = (i))
-#ifdef CONFIG_ISA_ARCV2
-#define PREFETCHW " prefetchw [%1] \n"
-#else
-#define PREFETCHW
+#ifdef CONFIG_ARC_STAR_9000923308
+
+#define SCOND_FAIL_RETRY_VAR_DEF \
+ unsigned int delay = 1, tmp; \
+
+#define SCOND_FAIL_RETRY_ASM \
+ " bz 4f \n" \
+ " ; --- scond fail delay --- \n" \
+ " mov %[tmp], %[delay] \n" /* tmp = delay */ \
+ "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
+ " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
+ " rol %[delay], %[delay] \n" /* delay *= 2 */ \
+ " b 1b \n" /* start over */ \
+ "4: ; --- success --- \n" \
+
+#define SCOND_FAIL_RETRY_VARS \
+ ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
+
+#else /* !CONFIG_ARC_STAR_9000923308 */
+
+#define SCOND_FAIL_RETRY_VAR_DEF
+
+#define SCOND_FAIL_RETRY_ASM \
+ " bnz 1b \n" \
+
+#define SCOND_FAIL_RETRY_VARS
+
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
- unsigned int temp; \
+ unsigned int val; \
+ SCOND_FAIL_RETRY_VAR_DEF \
\
__asm__ __volatile__( \
- "1: \n" \
- PREFETCHW \
- " llock %0, [%1] \n" \
- " " #asm_op " %0, %0, %2 \n" \
- " scond %0, [%1] \n" \
- " bnz 1b \n" \
- : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
- : "r"(&v->counter), "ir"(i) \
+ "1: llock %[val], [%[ctr]] \n" \
+ " " #asm_op " %[val], %[val], %[i] \n" \
+ " scond %[val], [%[ctr]] \n" \
+ " \n" \
+ SCOND_FAIL_RETRY_ASM \
+ \
+ : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
+ SCOND_FAIL_RETRY_VARS \
+ : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
+ [i] "ir" (i) \
: "cc"); \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
- unsigned int temp; \
+ unsigned int val; \
+ SCOND_FAIL_RETRY_VAR_DEF \
\
/* \
* Explicit full memory barrier needed before/after as \
@@ -58,19 +85,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
smp_mb(); \
\
__asm__ __volatile__( \
- "1: \n" \
- PREFETCHW \
- " llock %0, [%1] \n" \
- " " #asm_op " %0, %0, %2 \n" \
- " scond %0, [%1] \n" \
- " bnz 1b \n" \
- : "=&r"(temp) \
- : "r"(&v->counter), "ir"(i) \
+ "1: llock %[val], [%[ctr]] \n" \
+ " " #asm_op " %[val], %[val], %[i] \n" \
+ " scond %[val], [%[ctr]] \n" \
+ " \n" \
+ SCOND_FAIL_RETRY_ASM \
+ \
+ : [val] "=&r" (val) \
+ SCOND_FAIL_RETRY_VARS \
+ : [ctr] "r" (&v->counter), \
+ [i] "ir" (i) \
: "cc"); \
\
smp_mb(); \
\
- return temp; \
+ return val; \
}
#else /* !CONFIG_ARC_HAS_LLSC */
@@ -150,6 +179,9 @@ ATOMIC_OP(and, &=, and)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
+#undef SCOND_FAIL_RETRY_VAR_DEF
+#undef SCOND_FAIL_RETRY_ASM
+#undef SCOND_FAIL_RETRY_VARS
/**
* __atomic_add_unless - add unless the number is a given value
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 91694ec1ce95..69095da1fcfd 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -20,20 +20,20 @@
struct pt_regs {
/* Real registers */
- long bta; /* bta_l1, bta_l2, erbta */
+ unsigned long bta; /* bta_l1, bta_l2, erbta */
- long lp_start, lp_end, lp_count;
+ unsigned long lp_start, lp_end, lp_count;
- long status32; /* status32_l1, status32_l2, erstatus */
- long ret; /* ilink1, ilink2 or eret */
- long blink;
- long fp;
- long r26; /* gp */
+ unsigned long status32; /* status32_l1, status32_l2, erstatus */
+ unsigned long ret; /* ilink1, ilink2 or eret */
+ unsigned long blink;
+ unsigned long fp;
+ unsigned long r26; /* gp */
- long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+ unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
- long sp; /* user/kernel sp depending on where we came from */
- long orig_r0;
+ unsigned long sp; /* User/Kernel depending on where we came from */
+ unsigned long orig_r0;
/*
* To distinguish bet excp, syscall, irq
@@ -55,13 +55,13 @@ struct pt_regs {
unsigned long event;
};
- long user_r25;
+ unsigned long user_r25;
};
#else
struct pt_regs {
- long orig_r0;
+ unsigned long orig_r0;
union {
struct {
@@ -76,26 +76,26 @@ struct pt_regs {
unsigned long event;
};
- long bta; /* bta_l1, bta_l2, erbta */
+ unsigned long bta; /* bta_l1, bta_l2, erbta */
- long user_r25;
+ unsigned long user_r25;
- long r26; /* gp */
- long fp;
- long sp; /* user/kernel sp depending on where we came from */
+ unsigned long r26; /* gp */
+ unsigned long fp;
+ unsigned long sp; /* user/kernel sp depending on where we came from */
- long r12;
+ unsigned long r12;
/*------- Below list auto saved by h/w -----------*/
- long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
+ unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
- long blink;
- long lp_end, lp_start, lp_count;
+ unsigned long blink;
+ unsigned long lp_end, lp_start, lp_count;
- long ei, ldi, jli;
+ unsigned long ei, ldi, jli;
- long ret;
- long status32;
+ unsigned long ret;
+ unsigned long status32;
};
#endif
@@ -103,10 +103,10 @@ struct pt_regs {
/* Callee saved registers - need to be saved only when you are scheduled out */
struct callee_regs {
- long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
+ unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
};
-#define instruction_pointer(regs) (unsigned long)((regs)->ret)
+#define instruction_pointer(regs) ((regs)->ret)
#define profile_pc(regs) instruction_pointer(regs)
/* return 1 if user mode or 0 if kernel mode */
@@ -142,7 +142,7 @@ struct callee_regs {
static inline long regs_return_value(struct pt_regs *regs)
{
- return regs->r0;
+ return (long)regs->r0;
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index e1651df6a93d..db8c59d1eaeb 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -18,9 +18,518 @@
#define arch_spin_unlock_wait(x) \
do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
+#ifdef CONFIG_ARC_HAS_LLSC
+
+/*
+ * A normal LLOCK/SCOND based system, w/o need for livelock workaround
+ */
+#ifndef CONFIG_ARC_STAR_9000923308
+
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
- unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+ unsigned int val;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[slock]] \n"
+ " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
+ " scond %[LOCKED], [%[slock]] \n" /* acquire */
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [slock] "r" (&(lock->slock)),
+ [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned int val, got_it = 0;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[slock]] \n"
+ " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
+ " scond %[LOCKED], [%[slock]] \n" /* acquire */
+ " bnz 1b \n"
+ " mov %[got_it], 1 \n"
+ "4: \n"
+ " \n"
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ : [slock] "r" (&(lock->slock)),
+ [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ smp_mb();
+
+ lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+ smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ smp_mb();
+
+ /*
+ * zero means writer holds the lock exclusively, deny Reader.
+ * Otherwise grant lock to first/subseq reader
+ *
+ * if (rw->counter > 0) {
+ * rw->counter--;
+ * ret = 1;
+ * }
+ */
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
+ " sub %[val], %[val], 1 \n" /* reader lock */
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter)),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned int val, got_it = 0;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
+ " sub %[val], %[val], 1 \n" /* counter-- */
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n" /* retry if collided with someone */
+ " mov %[got_it], 1 \n"
+ " \n"
+ "4: ; --- done --- \n"
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ : [rwlock] "r" (&(rw->counter)),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ smp_mb();
+
+ /*
+ * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+ * deny writer. Otherwise if unlocked grant to writer
+ * Hence the claim that Linux rwlocks are unfair to writers.
+ * (can be starved for an indefinite time by readers).
+ *
+ * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+ * rw->counter = 0;
+ * ret = 1;
+ * }
+ */
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
+ " mov %[val], %[WR_LOCKED] \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ unsigned int val, got_it = 0;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
+ " mov %[val], %[WR_LOCKED] \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n" /* retry if collided with someone */
+ " mov %[got_it], 1 \n"
+ " \n"
+ "4: ; --- done --- \n"
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ smp_mb();
+
+ /*
+ * rw->counter++;
+ */
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " add %[val], %[val], 1 \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter))
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ smp_mb();
+
+ rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+
+ smp_mb();
+}
+
+#else /* CONFIG_ARC_STAR_9000923308 */
+
+/*
+ * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
+ * coherency transactions in the SCU. The exclusive line state keeps rotating
+ * among contenting cores leading to a never ending cycle. So break the cycle
+ * by deferring the retry of failed exclusive access (SCOND). The actual delay
+ * needed is function of number of contending cores as well as the unrelated
+ * coherency traffic from other cores. To keep the code simple, start off with
+ * small delay of 1 which would suffice most cases and in case of contention
+ * double the delay. Eventually the delay is sufficient such that the coherency
+ * pipeline is drained, thus a subsequent exclusive access would succeed.
+ */
+
+#define SCOND_FAIL_RETRY_VAR_DEF \
+ unsigned int delay, tmp; \
+
+#define SCOND_FAIL_RETRY_ASM \
+ " ; --- scond fail delay --- \n" \
+ " mov %[tmp], %[delay] \n" /* tmp = delay */ \
+ "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
+ " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
+ " rol %[delay], %[delay] \n" /* delay *= 2 */ \
+ " b 1b \n" /* start over */ \
+ " \n" \
+ "4: ; --- done --- \n" \
+
+#define SCOND_FAIL_RETRY_VARS \
+ ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned int val;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[slock]] \n"
+ " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
+ " scond %[LOCKED], [%[slock]] \n" /* acquire */
+ " bz 4f \n" /* done */
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val)
+ SCOND_FAIL_RETRY_VARS
+ : [slock] "r" (&(lock->slock)),
+ [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned int val, got_it = 0;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[slock]] \n"
+ " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
+ " scond %[LOCKED], [%[slock]] \n" /* acquire */
+ " bz.d 4f \n"
+ " mov.z %[got_it], 1 \n" /* got it */
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ SCOND_FAIL_RETRY_VARS
+ : [slock] "r" (&(lock->slock)),
+ [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ smp_mb();
+
+ lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+ smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ /*
+ * zero means writer holds the lock exclusively, deny Reader.
+ * Otherwise grant lock to first/subseq reader
+ *
+ * if (rw->counter > 0) {
+ * rw->counter--;
+ * ret = 1;
+ * }
+ */
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[rwlock]] \n"
+ " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
+ " sub %[val], %[val], 1 \n" /* reader lock */
+ " scond %[val], [%[rwlock]] \n"
+ " bz 4f \n" /* done */
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val)
+ SCOND_FAIL_RETRY_VARS
+ : [rwlock] "r" (&(rw->counter)),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned int val, got_it = 0;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[rwlock]] \n"
+ " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
+ " sub %[val], %[val], 1 \n" /* counter-- */
+ " scond %[val], [%[rwlock]] \n"
+ " bz.d 4f \n"
+ " mov.z %[got_it], 1 \n" /* got it */
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ SCOND_FAIL_RETRY_VARS
+ : [rwlock] "r" (&(rw->counter)),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ /*
+ * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+ * deny writer. Otherwise if unlocked grant to writer
+ * Hence the claim that Linux rwlocks are unfair to writers.
+ * (can be starved for an indefinite time by readers).
+ *
+ * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+ * rw->counter = 0;
+ * ret = 1;
+ * }
+ */
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[rwlock]] \n"
+ " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
+ " mov %[val], %[WR_LOCKED] \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bz 4f \n"
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val)
+ SCOND_FAIL_RETRY_VARS
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ unsigned int val, got_it = 0;
+ SCOND_FAIL_RETRY_VAR_DEF;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "0: mov %[delay], 1 \n"
+ "1: llock %[val], [%[rwlock]] \n"
+ " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
+ " mov %[val], %[WR_LOCKED] \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bz.d 4f \n"
+ " mov.z %[got_it], 1 \n" /* got it */
+ " \n"
+ SCOND_FAIL_RETRY_ASM
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ SCOND_FAIL_RETRY_VARS
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ smp_mb();
+
+ /*
+ * rw->counter++;
+ */
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " add %[val], %[val], 1 \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter))
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ smp_mb();
+
+ /*
+ * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+ */
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " scond %[UNLOCKED], [%[rwlock]]\n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+#undef SCOND_FAIL_RETRY_VAR_DEF
+#undef SCOND_FAIL_RETRY_ASM
+#undef SCOND_FAIL_RETRY_VARS
+
+#endif /* CONFIG_ARC_STAR_9000923308 */
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
/*
* This smp_mb() is technically superfluous, we only need the one
@@ -33,7 +542,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__asm__ __volatile__(
"1: ex %0, [%1] \n"
" breq %0, %2, 1b \n"
- : "+&r" (tmp)
+ : "+&r" (val)
: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
: "memory");
@@ -48,26 +557,27 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
smp_mb();
}
+/* 1 - lock taken successfully */
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
- unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+ unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
smp_mb();
__asm__ __volatile__(
"1: ex %0, [%1] \n"
- : "+r" (tmp)
+ : "+r" (val)
: "r"(&(lock->slock))
: "memory");
smp_mb();
- return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
+ return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
+ unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
/*
* RELEASE barrier: given the instructions avail on ARCv2, full barrier
@@ -77,7 +587,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
__asm__ __volatile__(
" ex %0, [%1] \n"
- : "+r" (tmp)
+ : "+r" (val)
: "r"(&(lock->slock))
: "memory");
@@ -90,19 +600,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
*
* The spinlock itself is contained in @counter and access to it is
* serialized with @lock_mutex.
- *
- * Unfair locking as Writers could be starved indefinitely by Reader(s)
*/
-/* Would read_trylock() succeed? */
-#define arch_read_can_lock(x) ((x)->counter > 0)
-
-/* Would write_trylock() succeed? */
-#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
-
/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
@@ -173,6 +676,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
arch_spin_unlock(&(rw->lock_mutex));
}
+#endif
+
+#define arch_read_can_lock(x) ((x)->counter > 0)
+#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
+
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
index 662627ced4f2..4e1ef5f650c6 100644
--- a/arch/arc/include/asm/spinlock_types.h
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -26,7 +26,9 @@ typedef struct {
*/
typedef struct {
volatile unsigned int counter;
+#ifndef CONFIG_ARC_HAS_LLSC
arch_spinlock_t lock_mutex;
+#endif
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 76a7739aab1c..0b3ef63d4a03 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -32,20 +32,20 @@
*/
struct user_regs_struct {
- long pad;
+ unsigned long pad;
struct {
- long bta, lp_start, lp_end, lp_count;
- long status32, ret, blink, fp, gp;
- long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
- long sp;
+ unsigned long bta, lp_start, lp_end, lp_count;
+ unsigned long status32, ret, blink, fp, gp;
+ unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+ unsigned long sp;
} scratch;
- long pad2;
+ unsigned long pad2;
struct {
- long r25, r24, r23, r22, r21, r20;
- long r19, r18, r17, r16, r15, r14, r13;
+ unsigned long r25, r24, r23, r22, r21, r20;
+ unsigned long r19, r18, r17, r16, r15, r14, r13;
} callee;
- long efa; /* break pt addr, for break points in delay slots */
- long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
+ unsigned long efa; /* break pt addr, for break points in delay slots */
+ unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
};
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 18cc01591c96..cabde9dc0696 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -47,6 +47,7 @@ static void read_arc_build_cfg_regs(void)
struct bcr_perip uncached_space;
struct bcr_generic bcr;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+ unsigned long perip_space;
FIX_PTR(cpu);
READ_BCR(AUX_IDENTITY, cpu->core);
@@ -56,7 +57,12 @@ static void read_arc_build_cfg_regs(void)
cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
- BUG_ON((uncached_space.start << 24) != ARC_UNCACHED_ADDR_SPACE);
+ if (uncached_space.ver < 3)
+ perip_space = uncached_space.start << 24;
+ else
+ perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000;
+
+ BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE);
READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
@@ -330,6 +336,10 @@ static void arc_chk_core_config(void)
pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
else if (!cpu->extn.fpu_dp && fpu_enabled)
panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
+
+ if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
+ !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
+ panic("llock/scond livelock workaround missing\n");
}
/*
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 3364d2bbc515..4294761a2b3e 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -203,34 +203,24 @@ static int arc_clkevent_set_next_event(unsigned long delta,
return 0;
}
-static void arc_clkevent_set_mode(enum clock_event_mode mode,
- struct clock_event_device *dev)
+static int arc_clkevent_set_periodic(struct clock_event_device *dev)
{
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- /*
- * At X Hz, 1 sec = 1000ms -> X cycles;
- * 10ms -> X / 100 cycles
- */
- arc_timer_event_setup(arc_get_core_freq() / HZ);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- break;
- default:
- break;
- }
-
- return;
+ /*
+ * At X Hz, 1 sec = 1000ms -> X cycles;
+ * 10ms -> X / 100 cycles
+ */
+ arc_timer_event_setup(arc_get_core_freq() / HZ);
+ return 0;
}
static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
- .name = "ARC Timer0",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .mode = CLOCK_EVT_MODE_UNUSED,
- .rating = 300,
- .irq = TIMER0_IRQ, /* hardwired, no need for resources */
- .set_next_event = arc_clkevent_set_next_event,
- .set_mode = arc_clkevent_set_mode,
+ .name = "ARC Timer0",
+ .features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC,
+ .rating = 300,
+ .irq = TIMER0_IRQ, /* hardwired, no need for resources */
+ .set_next_event = arc_clkevent_set_next_event,
+ .set_state_periodic = arc_clkevent_set_periodic,
};
static irqreturn_t timer_irq_handler(int irq, void *dev_id)
@@ -240,7 +230,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
* irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
*/
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
- int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC;
+ int irq_reenable = clockevent_state_periodic(evt);
/*
* Any write to CTRL reg ACks the interrupt, we rewrite the
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
index 1b2b3acfed52..0cab0b8a57c5 100644
--- a/arch/arc/lib/memcpy-archs.S
+++ b/arch/arc/lib/memcpy-archs.S
@@ -206,7 +206,7 @@ unalignedOffby3:
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
- prefetch [r3, 32] ;Prefetch the next write location
+ prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 8)
or r7, r7, r5
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
index 92d573c734b5..365b18364815 100644
--- a/arch/arc/lib/memset-archs.S
+++ b/arch/arc/lib/memset-archs.S
@@ -10,12 +10,6 @@
#undef PREALLOC_NOT_AVAIL
-#ifdef PREALLOC_NOT_AVAIL
-#define PREWRITE(A,B) prefetchw [(A),(B)]
-#else
-#define PREWRITE(A,B) prealloc [(A),(B)]
-#endif
-
ENTRY(memset)
prefetchw [r0] ; Prefetch the write location
mov.f 0, r2
@@ -51,9 +45,15 @@ ENTRY(memset)
;;; Convert len to Dwords, unfold x8
lsr.f lp_count, lp_count, 6
+
lpnz @.Lset64bytes
;; LOOP START
- PREWRITE(r3, 64) ;Prefetch the next write location
+#ifdef PREALLOC_NOT_AVAIL
+ prefetchw [r3, 64] ;Prefetch the next write location
+#else
+ prealloc [r3, 64]
+#endif
+#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
@@ -62,16 +62,45 @@ ENTRY(memset)
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
+#else
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+#endif
.Lset64bytes:
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
lpnz .Lset32bytes
;; LOOP START
prefetchw [r3, 32] ;Prefetch the next write location
+#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
+#else
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+ st.ab r4, [r3, 4]
+#endif
.Lset32bytes:
and.f lp_count, r2, 0x1F ;Last remaining 31 bytes
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index 99f7da513a48..e7769c3ab5f2 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -389,6 +389,21 @@ axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od)
static void __init axs103_early_init(void)
{
+ /*
+ * AXS103 configurations for SMP/QUAD configurations share device tree
+ * which defaults to 90 MHz. However recent failures of Quad config
+ * revealed P&R timing violations so clamp it down to safe 50 MHz
+ * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
+ *
+ * This hack is really hacky as of now. Fix it properly by getting the
+ * number of cores as return value of platform's early SMP callback
+ */
+#ifdef CONFIG_ARC_MCIP
+ unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
+ if (num_cores > 2)
+ arc_set_core_freq(50 * 1000000);
+#endif
+
switch (arc_get_core_freq()/1000000) {
case 33:
axs103_set_freq(1, 1, 1);
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 8f1e25bcecbd..4a0718ccf68e 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1140,6 +1140,7 @@
ctrl-module = <&omap_control_sata>;
clocks = <&sys_clkin1>, <&sata_ref_clk>;
clock-names = "sysclk", "refclk";
+ syscon-pllreset = <&scm_conf 0x3fc>;
#phy-cells = <0>;
};
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index d7201333e3bc..2db99433e17f 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -138,8 +138,8 @@
mipi_phy: video-phy@10020710 {
compatible = "samsung,s5pv210-mipi-video-phy";
- reg = <0x10020710 8>;
#phy-cells = <1>;
+ syscon = <&pmu_system_controller>;
};
pd_cam: cam-power-domain@10023C00 {
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
index e0abfc3324d1..e050d85cdacd 100644
--- a/arch/arm/boot/dts/exynos4210-origen.dts
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -127,6 +127,10 @@
};
};
+&cpu0 {
+ cpu0-supply = <&buck1_reg>;
+};
+
&fimd {
pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 98f3ce65cb9a..ba34886f8b65 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -188,6 +188,10 @@
};
};
+&cpu0 {
+ cpu0-supply = <&varm_breg>;
+};
+
&dsi_0 {
vddcore-supply = <&vusb_reg>;
vddio-supply = <&vmipi_reg>;
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index d4f2b11319dd..775892b2cc6a 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -548,6 +548,10 @@
};
};
+&cpu0 {
+ cpu0-supply = <&vdd_arm_reg>;
+};
+
&pinctrl_1 {
hdmi_hpd: hdmi-hpd {
samsung,pins = "gpx3-7";
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index 10d3c173396e..3e5ba665d200 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -40,6 +40,18 @@
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0x900>;
+ clocks = <&clock CLK_ARM_CLK>;
+ clock-names = "cpu";
+ clock-latency = <160000>;
+
+ operating-points = <
+ 1200000 1250000
+ 1000000 1150000
+ 800000 1075000
+ 500000 975000
+ 400000 975000
+ 200000 950000
+ >;
cooling-min-level = <4>;
cooling-max-level = <2>;
#cooling-cells = <2>; /* min followed by max */
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index b6478e97d6a7..e6540b5cfa4c 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -286,8 +286,8 @@
can1: can@53fe4000 {
compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
reg = <0x53fe4000 0x1000>;
- clocks = <&clks 33>;
- clock-names = "ipg";
+ clocks = <&clks 33>, <&clks 33>;
+ clock-names = "ipg", "per";
interrupts = <43>;
status = "disabled";
};
@@ -295,8 +295,8 @@
can2: can@53fe8000 {
compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
reg = <0x53fe8000 0x1000>;
- clocks = <&clks 34>;
- clock-names = "ipg";
+ clocks = <&clks 34>, <&clks 34>;
+ clock-names = "ipg", "per";
interrupts = <44>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi
index 4773d6af66a0..d56d68fe7ffc 100644
--- a/arch/arm/boot/dts/k2e-clocks.dtsi
+++ b/arch/arm/boot/dts/k2e-clocks.dtsi
@@ -13,9 +13,8 @@ clocks {
#clock-cells = <0>;
compatible = "ti,keystone,main-pll-clock";
clocks = <&refclksys>;
- reg = <0x02620350 4>, <0x02310110 4>;
- reg-names = "control", "multiplier";
- fixed-postdiv = <2>;
+ reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+ reg-names = "control", "multiplier", "post-divider";
};
papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2hk-clocks.dtsi b/arch/arm/boot/dts/k2hk-clocks.dtsi
index d5adee3c0067..af9b7190533a 100644
--- a/arch/arm/boot/dts/k2hk-clocks.dtsi
+++ b/arch/arm/boot/dts/k2hk-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
#clock-cells = <0>;
compatible = "ti,keystone,main-pll-clock";
clocks = <&refclksys>;
- reg = <0x02620350 4>, <0x02310110 4>;
- reg-names = "control", "multiplier";
- fixed-postdiv = <2>;
+ reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+ reg-names = "control", "multiplier", "post-divider";
};
papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2l-clocks.dtsi b/arch/arm/boot/dts/k2l-clocks.dtsi
index eb1e3e29f073..ef8464bb11ff 100644
--- a/arch/arm/boot/dts/k2l-clocks.dtsi
+++ b/arch/arm/boot/dts/k2l-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
#clock-cells = <0>;
compatible = "ti,keystone,main-pll-clock";
clocks = <&refclksys>;
- reg = <0x02620350 4>, <0x02310110 4>;
- reg-names = "control", "multiplier";
- fixed-postdiv = <2>;
+ reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+ reg-names = "control", "multiplier", "post-divider";
};
papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index 3d0b8755caee..3d25dba143a5 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -17,6 +17,7 @@
};
aliases {
+ serial1 = &uart1;
stmpe-i2c0 = &stmpe0;
stmpe-i2c1 = &stmpe1;
};
diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts
index 85d3b95dfdba..3c140d05f796 100644
--- a/arch/arm/boot/dts/ste-nomadik-s8815.dts
+++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts
@@ -15,6 +15,10 @@
bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk";
};
+ aliases {
+ serial1 = &uart1;
+ };
+
src@101e0000 {
/* These chrystal drivers are not used on this board */
disable-sxtalo;
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index 9a5f2ba139b7..ef794a33b4dc 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -757,6 +757,7 @@
clock-names = "uartclk", "apb_pclk";
pinctrl-names = "default";
pinctrl-0 = <&uart0_default_mux>;
+ status = "disabled";
};
uart1: uart@101fb000 {
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index d78c12e7cb5e..486cc4ded190 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
* registers. This address is needed early so the OCP registers that
* are part of the device's address space can be ioremapped properly.
*
+ * If SYSC access is not needed, the registers will not be remapped
+ * and non-availability of MPU access is not treated as an error.
+ *
* Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
* -ENXIO on absent or invalid register target address space.
*/
@@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
_save_mpu_port_index(oh);
+ /* if we don't need sysc access we don't need to ioremap */
+ if (!oh->class->sysc)
+ return 0;
+
+ /* we can't continue without MPU PORT if we need sysc access */
if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
return -ENXIO;
@@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
oh->name);
/* Extract the IO space from device tree blob */
- if (!np)
+ if (!np) {
+ pr_err("omap_hwmod: %s: no dt node\n", oh->name);
return -ENXIO;
+ }
va_start = of_iomap(np, index + oh->mpu_rt_idx);
} else {
@@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
oh->name, np->name);
}
- if (oh->class->sysc) {
- r = _init_mpu_rt_base(oh, NULL, index, np);
- if (r < 0) {
- WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
- oh->name);
- return 0;
- }
+ r = _init_mpu_rt_base(oh, NULL, index, np);
+ if (r < 0) {
+ WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
+ oh->name);
+ return 0;
}
r = _init_clocks(oh, NULL);
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 2606c6608bd8..562247bced49 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -827,8 +827,7 @@ static struct omap_hwmod_class_sysconfig dra7xx_gpmc_sysc = {
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
@@ -844,7 +843,7 @@ static struct omap_hwmod dra7xx_gpmc_hwmod = {
.class = &dra7xx_gpmc_hwmod_class,
.clkdm_name = "l3main1_clkdm",
/* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */
- .flags = HWMOD_SWSUP_SIDLE | DEBUG_OMAP_GPMC_HWMOD_FLAGS,
+ .flags = DEBUG_OMAP_GPMC_HWMOD_FLAGS,
.main_clk = "l3_iclk_div",
.prcm = {
.omap4 = {
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index 0689c3fb56e3..58093edeea2e 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -823,7 +823,7 @@
device_type = "dma";
reg = <0x0 0x1f270000 0x0 0x10000>,
<0x0 0x1f200000 0x0 0x10000>,
- <0x0 0x1b008000 0x0 0x2000>,
+ <0x0 0x1b000000 0x0 0x400000>,
<0x0 0x1054a000 0x0 0x100>;
interrupts = <0x0 0x82 0x4>,
<0x0 0xb8 0x4>,
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 1670f15ef69e..948f0ad2de23 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
* Other callers might not initialize the si_lsb field,
* so check explicitely for the right codes here.
*/
- if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+ if (from->si_signo == SIGBUS &&
+ (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
#endif
break;
@@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{
- memset(to, 0, sizeof *to);
-
if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE))
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index ec37ab3f524f..97bc68f4c689 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -199,16 +199,15 @@ up_fail:
*/
void update_vsyscall(struct timekeeper *tk)
{
- struct timespec xtime_coarse;
u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
++vdso_data->tb_seq_count;
smp_wmb();
- xtime_coarse = __current_kernel_time();
vdso_data->use_syscall = use_syscall;
- vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
- vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->xtime_coarse_sec = tk->xtime_sec;
+ vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
+ tk->tkr_mono.shift;
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index cee5f93e5712..199a8357838c 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -151,7 +151,6 @@ config BMIPS_GENERIC
select BCM7120_L2_IRQ
select BRCMSTB_L2_IRQ
select IRQ_MIPS_CPU
- select RAW_IRQ_ACCESSORS
select DMA_NONCOHERENT
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 01a644f174dd..1ba21204ebe0 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -190,6 +190,7 @@ int get_c0_perfcount_int(void)
{
return ATH79_MISC_IRQ(5);
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 56f5d080ef9d..b7fa9ae28c36 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
if (action & SMP_CALL_FUNCTION)
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
deleted file mode 100644
index 11d3b572b1b3..000000000000
--- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-
-#include <asm/bmips.h>
-
-#define plat_post_dma_flush bmips_post_dma_flush
-
-#include <asm/mach-generic/dma-coherence.h>
-
-#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 9d8106758142..ae8569475264 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
+#ifdef CONFIG_SMP
+ /*
+ * For SMP, multiple CPUs can race, so we need to do
+ * this atomically.
+ */
+#ifdef CONFIG_64BIT
+#define LL_INSN "lld"
+#define SC_INSN "scd"
+#else /* CONFIG_32BIT */
+#define LL_INSN "ll"
+#define SC_INSN "sc"
+#endif
+ unsigned long page_global = _PAGE_GLOBAL;
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ " .set push\n"
+ " .set noreorder\n"
+ "1: " LL_INSN " %[tmp], %[buddy]\n"
+ " bnez %[tmp], 2f\n"
+ " or %[tmp], %[tmp], %[global]\n"
+ " " SC_INSN " %[tmp], %[buddy]\n"
+ " beqz %[tmp], 1b\n"
+ " nop\n"
+ "2:\n"
+ " .set pop"
+ : [buddy] "+m" (buddy->pte),
+ [tmp] "=&r" (tmp)
+ : [global] "r" (page_global));
+#else /* !CONFIG_SMP */
if (pte_none(*buddy))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
+#endif /* CONFIG_SMP */
}
#endif
}
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 16f1ea9ab191..03722d4326a1 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu)
extern void play_dead(void);
#endif
-extern asmlinkage void smp_call_function_interrupt(void);
-
static inline void arch_send_call_function_single_ipi(int cpu)
{
extern struct plat_smp_ops *mp_ops; /* private */
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 28d6d9364bd1..a71da576883c 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -152,6 +152,31 @@
.set noreorder
bltz k0, 8f
move k1, sp
+#ifdef CONFIG_EVA
+ /*
+ * Flush interAptiv's Return Prediction Stack (RPS) by writing
+ * EntryHi. Toggling Config7.RPS is slower and less portable.
+ *
+ * The RPS isn't automatically flushed when exceptions are
+ * taken, which can result in kernel mode speculative accesses
+ * to user addresses if the RPS mispredicts. That's harmless
+ * when user and kernel share the same address space, but with
+ * EVA the same user segments may be unmapped to kernel mode,
+ * even containing sensitive MMIO regions or invalid memory.
+ *
+ * This can happen when the kernel sets the return address to
+ * ret_from_* and jr's to the exception handler, which looks
+ * more like a tail call than a function call. If nested calls
+ * don't evict the last user address in the RPS, it will
+ * mispredict the return and fetch from a user controlled
+ * address into the icache.
+ *
+ * More recent EVA-capable cores with MAAR to restrict
+ * speculative accesses aren't affected.
+ */
+ MFC0 k0, CP0_ENTRYHI
+ MTC0 k0, CP0_ENTRYHI
+#endif
.set reorder
/* Called from user mode, new stack. */
get_saved_sp
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 3e4491aa6d6b..789d7bf4fef3 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
unsigned int real_len;
- cpumask_t mask;
+ cpumask_t allowed, mask;
int retval;
struct task_struct *p;
@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
if (retval)
goto out_unlock;
- cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
+ cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
+ cpumask_and(&mask, &allowed, cpu_active_mask);
out_unlock:
read_unlock(&tasklist_lock);
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index b130033838ba..5fcec3032f38 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -38,7 +38,7 @@ char *mips_get_machine_name(void)
return mips_machine_name;
}
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
return add_memory_region(base, size, BOOT_MEM_RAM);
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index 74bab9ddd0e1..c6bbf2165051 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
process_entry:
PTR_L s2, (s0)
- PTR_ADD s0, s0, SZREG
+ PTR_ADDIU s0, s0, SZREG
/*
* In case of a kdump/crash kernel, the indirection page is not
@@ -61,9 +61,9 @@ copy_word:
/* copy page word by word */
REG_L s5, (s2)
REG_S s5, (s4)
- PTR_ADD s4, s4, SZREG
- PTR_ADD s2, s2, SZREG
- LONG_SUB s6, s6, 1
+ PTR_ADDIU s4, s4, SZREG
+ PTR_ADDIU s2, s2, SZREG
+ LONG_ADDIU s6, s6, -1
beq s6, zero, process_entry
b copy_word
b process_entry
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 19a7705f2a01..5d7f2634996f 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{
- memset(to, 0, sizeof *to);
-
if (copy_from_user(to, from, 3*sizeof(int)) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 336708ae5c5b..78cf8c2f1de0 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
if (action == 0)
scheduler_ipi();
else
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
@@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
if (action & SMP_CALL_FUNCTION)
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index d0744cc77ea7..a31896c33716 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -192,16 +192,6 @@ asmlinkage void start_secondary(void)
cpu_startup_entry(CPUHP_ONLINE);
}
-/*
- * Call into both interrupt handlers, as we share the IPI for them
- */
-void __irq_entry smp_call_function_interrupt(void)
-{
- irq_enter();
- generic_smp_call_function_interrupt();
- irq_exit();
-}
-
static void stop_this_cpu(void *dummy)
{
/*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index e207a43b5f8f..8ea28e6ab37d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
void show_stack(struct task_struct *task, unsigned long *sp)
{
struct pt_regs regs;
+ mm_segment_t old_fs = get_fs();
if (sp) {
regs.regs[29] = (unsigned long)sp;
regs.regs[31] = 0;
@@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
prepare_frametrace(&regs);
}
}
+ /*
+ * show_stack() deals exclusively with kernel mode, so be sure to access
+ * the stack in the kernel (not user) address space.
+ */
+ set_fs(KERNEL_DS);
show_stacktrace(task, &regs);
+ set_fs(old_fs);
}
static void show_code(unsigned int __user *pc)
@@ -1519,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
const int field = 2 * sizeof(unsigned long);
int multi_match = regs->cp0_status & ST0_TS;
enum ctx_state prev_state;
+ mm_segment_t old_fs = get_fs();
prev_state = exception_enter();
show_regs(regs);
@@ -1540,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
dump_tlb_all();
}
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+
show_code((unsigned int __user *) regs->cp0_epc);
+ set_fs(old_fs);
+
/*
* Some chips may have other causes of machine check (e.g. SB1
* graduation timer)
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index af84bef0c90d..eb3efd137fd1 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -438,7 +438,7 @@ do { \
: "memory"); \
} while(0)
-#define StoreDW(addr, value, res) \
+#define _StoreDW(addr, value, res) \
do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 6ab10573490d..2c218c3bbca5 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
@@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
{
return ltq_perfcount_irq;
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index 509877c6e9d9..1a4738a8f2d3 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
- if (action & SMP_CALL_FUNCTION)
- smp_call_function_interrupt();
+ if (action & SMP_CALL_FUNCTION) {
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
+ }
if (action & SMP_ASK_C0COUNT) {
BUG_ON(cpu != 0);
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 77d96db8253c..aab218c36e0d 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -160,18 +160,18 @@ static inline void setup_protection_map(void)
protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+ protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+ protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
- protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+ protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
- protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ);
+ protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
} else {
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 36c0f26fac6b..852a41c6da45 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -133,7 +133,8 @@ good_area:
#endif
goto bad_area;
}
- if (!(vma->vm_flags & VM_READ)) {
+ if (!(vma->vm_flags & VM_READ) &&
+ exception_epc(regs) != address) {
#if 0
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
raw_smp_processor_id(),
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index d1392f8f5811..fa8f591f3713 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 5625b190edc0..b7bf721eabf5 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -154,6 +154,7 @@ int get_c0_perfcount_int(void)
return mips_cpu_perf_irq;
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
@@ -171,14 +172,17 @@ unsigned int get_c0_compare_int(void)
static void __init init_rtc(void)
{
- /* stop the clock whilst setting it up */
- CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
+ unsigned char freq, ctrl;
- /* 32KHz time base */
- CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
+ /* Set 32KHz time base if not already set */
+ freq = CMOS_READ(RTC_FREQ_SELECT);
+ if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
+ CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
- /* start the clock */
- CMOS_WRITE(RTC_24H, RTC_CONTROL);
+ /* Ensure SET bit is clear so RTC can run */
+ ctrl = CMOS_READ(RTC_CONTROL);
+ if (ctrl & RTC_SET)
+ CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
}
void __init plat_time_init(void)
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index e1d69895fb1d..a120b7a5a8fe 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
return -1;
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index dc3e327fbbac..f5fff228b347 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
{
clear_c0_eimr(irq);
ack_c0_eirr(irq);
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
set_c0_eimr(irq);
}
diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c
index 42181c7105df..f8d3e081b2eb 100644
--- a/arch/mips/paravirt/paravirt-smp.c
+++ b/arch/mips/paravirt/paravirt-smp.c
@@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id)
static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
{
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
index 7c73fcb92a10..8a377346f0ca 100644
--- a/arch/mips/pistachio/time.c
+++ b/arch/mips/pistachio/time.c
@@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
{
return gic_get_c0_perfcount_int();
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
int get_c0_fdc_int(void)
{
diff --git a/arch/mips/pmcs-msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c
index 10170580a2de..ffa0f7101a97 100644
--- a/arch/mips/pmcs-msp71xx/msp_smp.c
+++ b/arch/mips/pmcs-msp71xx/msp_smp.c
@@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 53707aacc0f8..8c624a8b9ea2 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
{
return rt_perfcount_irq;
}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 3fbaef97a1b8..16ec4e12daa3 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void)
scheduler_ipi();
} else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
- smp_call_function_interrupt();
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
} else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
- smp_call_function_interrupt();
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
} else
#endif
{
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index af7d44edd9a8..4c71aea25663 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -29,8 +29,6 @@
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_int.h>
-extern void smp_call_function_interrupt(void);
-
/*
* These are routines for dealing with the bcm1480 smp capabilities
* independent of board/firmware
@@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
- if (action & SMP_CALL_FUNCTION)
- smp_call_function_interrupt();
+ if (action & SMP_CALL_FUNCTION) {
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
+ }
}
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index c0c4b3f88a08..1cf66f5ff23d 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
- if (action & SMP_CALL_FUNCTION)
- smp_call_function_interrupt();
+ if (action & SMP_CALL_FUNCTION) {
+ irq_enter();
+ generic_smp_call_function_interrupt();
+ irq_exit();
+ }
}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index d3a831ac0f92..da50e0c9c57e 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
{
- memset(to, 0, sizeof *to);
-
if (copy_from_user(to, from, 3*sizeof(int)) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 5cf5e6ea213b..7cf0df859d05 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -1478,7 +1478,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
}
/* Unmask the event */
- if (eeh_enabled())
+ if (ret == EEH_NEXT_ERR_NONE && eeh_enabled())
enable_irq(eeh_event_irq);
return ret;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 5738d315248b..85cbc96eff6c 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2220,7 +2220,7 @@ static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb)
static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
unsigned levels, unsigned long limit,
- unsigned long *current_offset)
+ unsigned long *current_offset, unsigned long *total_allocated)
{
struct page *tce_mem = NULL;
__be64 *addr, *tmp;
@@ -2236,6 +2236,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
}
addr = page_address(tce_mem);
memset(addr, 0, allocated);
+ *total_allocated += allocated;
--levels;
if (!levels) {
@@ -2245,7 +2246,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
for (i = 0; i < entries; ++i) {
tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
- levels, limit, current_offset);
+ levels, limit, current_offset, total_allocated);
if (!tmp)
break;
@@ -2267,7 +2268,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
struct iommu_table *tbl)
{
void *addr;
- unsigned long offset = 0, level_shift;
+ unsigned long offset = 0, level_shift, total_allocated = 0;
const unsigned window_shift = ilog2(window_size);
unsigned entries_shift = window_shift - page_shift;
unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
@@ -2286,7 +2287,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
/* Allocate TCE table */
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
- levels, tce_table_size, &offset);
+ levels, tce_table_size, &offset, &total_allocated);
/* addr==NULL means that the first level allocation failed */
if (!addr)
@@ -2308,7 +2309,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
page_shift);
tbl->it_level_size = 1ULL << (level_shift - 3);
tbl->it_indirect_levels = levels - 1;
- tbl->it_allocated_size = offset;
+ tbl->it_allocated_size = total_allocated;
pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
window_size, tce_table_size, bus_offset);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 2078f92d15ac..f32f843a3631 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1742,10 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
{
- if (!vcpu->requests)
- return 0;
retry:
kvm_s390_vcpu_request_handled(vcpu);
+ if (!vcpu->requests)
+ return 0;
/*
* We use MMU_RELOAD just to re-arm the ipte notifier for the
* guest prefix page. gmap_ipte_notify will wait on the ptl lock.
diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
index 1f0aa2024e94..6424249d5f78 100644
--- a/arch/sparc/include/asm/visasm.h
+++ b/arch/sparc/include/asm/visasm.h
@@ -28,16 +28,10 @@
* Must preserve %o5 between VISEntryHalf and VISExitHalf */
#define VISEntryHalf \
- rd %fprs, %o5; \
- andcc %o5, FPRS_FEF, %g0; \
- be,pt %icc, 297f; \
- sethi %hi(298f), %g7; \
- sethi %hi(VISenterhalf), %g1; \
- jmpl %g1 + %lo(VISenterhalf), %g0; \
- or %g7, %lo(298f), %g7; \
- clr %o5; \
-297: wr %o5, FPRS_FEF, %fprs; \
-298:
+ VISEntry
+
+#define VISExitHalf \
+ VISExit
#define VISEntryHalfFast(fail_label) \
rd %fprs, %o5; \
@@ -47,7 +41,7 @@
ba,a,pt %xcc, fail_label; \
297: wr %o5, FPRS_FEF, %fprs;
-#define VISExitHalf \
+#define VISExitHalfFast \
wr %o5, 0, %fprs;
#ifndef __ASSEMBLY__
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 140527a20e7d..83aeeb1dffdb 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
add %o0, 0x40, %o0
bne,pt %icc, 1b
LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
+#ifdef NON_USER_COPY
+ VISExitHalfFast
+#else
VISExitHalf
-
+#endif
brz,pn %o2, .Lexit
cmp %o2, 19
ble,pn %icc, .Lsmall_unaligned
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
index b320ae9e2e2e..a063d84336d6 100644
--- a/arch/sparc/lib/VISsave.S
+++ b/arch/sparc/lib/VISsave.S
@@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
stx %g3, [%g6 + TI_GSR]
2: add %g6, %g1, %g3
- cmp %o5, FPRS_DU
- be,pn %icc, 6f
- sll %g1, 3, %g1
+ mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5
+ sll %g1, 3, %g1
stb %o5, [%g3 + TI_FPSAVED]
rd %gsr, %g2
add %g6, %g1, %g3
@@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
.align 32
80: jmpl %g7 + %g0, %g0
nop
-
-6: ldub [%g3 + TI_FPSAVED], %o5
- or %o5, FPRS_DU, %o5
- add %g6, TI_FPREGS+0x80, %g2
- stb %o5, [%g3 + TI_FPSAVED]
-
- sll %g1, 5, %g1
- add %g6, TI_FPREGS+0xc0, %g3
- wr %g0, FPRS_FEF, %fprs
- membar #Sync
- stda %f32, [%g2 + %g1] ASI_BLK_P
- stda %f48, [%g3 + %g1] ASI_BLK_P
- membar #Sync
- ba,pt %xcc, 80f
- nop
-
- .align 32
-80: jmpl %g7 + %g0, %g0
- nop
-
- .align 32
-VISenterhalf:
- ldub [%g6 + TI_FPDEPTH], %g1
- brnz,a,pn %g1, 1f
- cmp %g1, 1
- stb %g0, [%g6 + TI_FPSAVED]
- stx %fsr, [%g6 + TI_XFSR]
- clr %o5
- jmpl %g7 + %g0, %g0
- wr %g0, FPRS_FEF, %fprs
-
-1: bne,pn %icc, 2f
- srl %g1, 1, %g1
- ba,pt %xcc, vis1
- sub %g7, 8, %g7
-2: addcc %g6, %g1, %g3
- sll %g1, 3, %g1
- andn %o5, FPRS_DU, %g2
- stb %g2, [%g3 + TI_FPSAVED]
-
- rd %gsr, %g2
- add %g6, %g1, %g3
- stx %g2, [%g3 + TI_GSR]
- add %g6, %g1, %g2
- stx %fsr, [%g2 + TI_XFSR]
- sll %g1, 5, %g1
-3: andcc %o5, FPRS_DL, %g0
- be,pn %icc, 4f
- add %g6, TI_FPREGS, %g2
-
- add %g6, TI_FPREGS+0x40, %g3
- membar #Sync
- stda %f0, [%g2 + %g1] ASI_BLK_P
- stda %f16, [%g3 + %g1] ASI_BLK_P
- membar #Sync
- ba,pt %xcc, 4f
- nop
-
- .align 32
-4: and %o5, FPRS_DU, %o5
- jmpl %g7 + %g0, %g0
- wr %o5, FPRS_FEF, %fprs
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 1d649a95660c..8069ce12f20b 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
void VISenter(void);
EXPORT_SYMBOL(VISenter);
-/* CRYPTO code needs this */
-void VISenterhalf(void);
-EXPORT_SYMBOL(VISenterhalf);
-
extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
unsigned long *);
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index e8c2c04143cd..c667e104a0c2 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
return -EFAULT;
- memset(to, 0, sizeof(*to));
-
err = __get_user(to->si_signo, &from->si_signo);
err |= __get_user(to->si_errno, &from->si_errno);
err |= __get_user(to->si_code, &from->si_code);
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 2c82bd150d43..7d69afd8b6fa 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1193,6 +1193,10 @@ static efi_status_t setup_e820(struct boot_params *params,
unsigned int e820_type = 0;
unsigned long m = efi->efi_memmap;
+#ifdef CONFIG_X86_64
+ m |= (u64)efi->efi_memmap_hi << 32;
+#endif
+
d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
switch (d->type) {
case EFI_RESERVED_TYPE:
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index a0bf89fd2647..4e10d73cf018 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -280,21 +280,6 @@ static inline void clear_LDT(void)
set_ldt(NULL, 0);
}
-/*
- * load one particular LDT into the current CPU
- */
-static inline void load_LDT_nolock(mm_context_t *pc)
-{
- set_ldt(pc->ldt, pc->size);
-}
-
-static inline void load_LDT(mm_context_t *pc)
-{
- preempt_disable();
- load_LDT_nolock(pc);
- preempt_enable();
-}
-
static inline unsigned long get_desc_base(const struct desc_struct *desc)
{
return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 09b9620a73b4..364d27481a52 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -9,8 +9,7 @@
* we put the segment information here.
*/
typedef struct {
- void *ldt;
- int size;
+ struct ldt_struct *ldt;
#ifdef CONFIG_X86_64
/* True if mm supports a task running in 32 bit compatibility mode. */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 804a3a6030ca..984abfe47edc 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -34,6 +34,50 @@ static inline void load_mm_cr4(struct mm_struct *mm) {}
#endif
/*
+ * ldt_structs can be allocated, used, and freed, but they are never
+ * modified while live.
+ */
+struct ldt_struct {
+ /*
+ * Xen requires page-aligned LDTs with special permissions. This is
+ * needed to prevent us from installing evil descriptors such as
+ * call gates. On native, we could merge the ldt_struct and LDT
+ * allocations, but it's not worth trying to optimize.
+ */
+ struct desc_struct *entries;
+ int size;
+};
+
+static inline void load_mm_ldt(struct mm_struct *mm)
+{
+ struct ldt_struct *ldt;
+
+ /* lockless_dereference synchronizes with smp_store_release */
+ ldt = lockless_dereference(mm->context.ldt);
+
+ /*
+ * Any change to mm->context.ldt is followed by an IPI to all
+ * CPUs with the mm active. The LDT will not be freed until
+ * after the IPI is handled by all such CPUs. This means that,
+ * if the ldt_struct changes before we return, the values we see
+ * will be safe, and the new values will be loaded before we run
+ * any user code.
+ *
+ * NB: don't try to convert this to use RCU without extreme care.
+ * We would still need IRQs off, because we don't want to change
+ * the local LDT after an IPI loaded a newer value than the one
+ * that we can see.
+ */
+
+ if (unlikely(ldt))
+ set_ldt(ldt->entries, ldt->size);
+ else
+ clear_LDT();
+
+ DEBUG_LOCKS_WARN_ON(preemptible());
+}
+
+/*
* Used for LDT copy/destruction.
*/
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
@@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* was called and then modify_ldt changed
* prev->context.ldt but suppressed an IPI to this CPU.
* In this case, prev->context.ldt != NULL, because we
- * never free an LDT while the mm still exists. That
- * means that next->context.ldt != prev->context.ldt,
- * because mms never share an LDT.
+ * never set context.ldt to NULL while the mm still
+ * exists. That means that next->context.ldt !=
+ * prev->context.ldt, because mms never share an LDT.
*/
if (unlikely(prev->context.ldt != next->context.ldt))
- load_LDT_nolock(&next->context);
+ load_mm_ldt(next);
}
#ifdef CONFIG_SMP
else {
@@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
load_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
load_mm_cr4(next);
- load_LDT_nolock(&next->context);
+ load_mm_ldt(next);
}
}
#endif
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 6fe6b182c998..9dfce4e0417d 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -57,9 +57,9 @@ struct sigcontext {
unsigned long ip;
unsigned long flags;
unsigned short cs;
- unsigned short __pad2; /* Was called gs, but was always zero. */
- unsigned short __pad1; /* Was called fs, but was always zero. */
- unsigned short ss;
+ unsigned short gs;
+ unsigned short fs;
+ unsigned short __pad0;
unsigned long err;
unsigned long trapno;
unsigned long oldmask;
diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h
index 0e8a973de9ee..40836a9a7250 100644
--- a/arch/x86/include/uapi/asm/sigcontext.h
+++ b/arch/x86/include/uapi/asm/sigcontext.h
@@ -177,24 +177,9 @@ struct sigcontext {
__u64 rip;
__u64 eflags; /* RFLAGS */
__u16 cs;
-
- /*
- * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
- * Linux saved and restored fs and gs in these slots. This
- * was counterproductive, as fsbase and gsbase were never
- * saved, so arch_prctl was presumably unreliable.
- *
- * If these slots are ever needed for any other purpose, there
- * is some risk that very old 64-bit binaries could get
- * confused. I doubt that many such binaries still work,
- * though, since the same patch in 2.5.64 also removed the
- * 64-bit set_thread_area syscall, so it appears that there is
- * no TLS API that works in both pre- and post-2.5.64 kernels.
- */
- __u16 __pad2; /* Was gs. */
- __u16 __pad1; /* Was fs. */
-
- __u16 ss;
+ __u16 gs;
+ __u16 fs;
+ __u16 __pad0;
__u64 err;
__u64 trapno;
__u64 oldmask;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 845dc0df2002..206052e55517 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -943,7 +943,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
*/
if (irq < nr_legacy_irqs() && data->count == 1) {
if (info->ioapic_trigger != data->trigger)
- mp_register_handler(irq, data->trigger);
+ mp_register_handler(irq, info->ioapic_trigger);
data->entry.trigger = data->trigger = info->ioapic_trigger;
data->entry.polarity = data->polarity = info->ioapic_polarity;
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 922c5e0cea4c..cb9e5df42dd2 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1410,7 +1410,7 @@ void cpu_init(void)
load_sp0(t, &current->thread);
set_tss_desc(cpu, t);
load_TR_desc();
- load_LDT(&init_mm.context);
+ load_mm_ldt(&init_mm);
clear_all_debug_regs();
dbg_restore_debug_regs();
@@ -1459,7 +1459,7 @@ void cpu_init(void)
load_sp0(t, thread);
set_tss_desc(cpu, t);
load_TR_desc();
- load_LDT(&init_mm.context);
+ load_mm_ldt(&init_mm);
t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 3658de47900f..9469dfa55607 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -2179,21 +2179,25 @@ static unsigned long get_segment_base(unsigned int segment)
int idx = segment >> 3;
if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
+ struct ldt_struct *ldt;
+
if (idx > LDT_ENTRIES)
return 0;
- if (idx > current->active_mm->context.size)
+ /* IRQs are off, so this synchronizes with smp_store_release */
+ ldt = lockless_dereference(current->active_mm->context.ldt);
+ if (!ldt || idx > ldt->size)
return 0;
- desc = current->active_mm->context.ldt;
+ desc = &ldt->entries[idx];
} else {
if (idx > GDT_ENTRIES)
return 0;
- desc = raw_cpu_ptr(gdt_page.gdt);
+ desc = raw_cpu_ptr(gdt_page.gdt) + idx;
}
- return get_desc_base(desc + idx);
+ return get_desc_base(desc);
}
#ifdef CONFIG_COMPAT
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index c37886d759cc..2bcc0525f1c1 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
@@ -20,82 +21,82 @@
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
-#ifdef CONFIG_SMP
+/* context.lock is held for us, so we don't need any locking. */
static void flush_ldt(void *current_mm)
{
- if (current->active_mm == current_mm)
- load_LDT(&current->active_mm->context);
+ mm_context_t *pc;
+
+ if (current->active_mm != current_mm)
+ return;
+
+ pc = &current->active_mm->context;
+ set_ldt(pc->ldt->entries, pc->ldt->size);
}
-#endif
-static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
+/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
+static struct ldt_struct *alloc_ldt_struct(int size)
{
- void *oldldt, *newldt;
- int oldsize;
-
- if (mincount <= pc->size)
- return 0;
- oldsize = pc->size;
- mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
- (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
- if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
- newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
+ struct ldt_struct *new_ldt;
+ int alloc_size;
+
+ if (size > LDT_ENTRIES)
+ return NULL;
+
+ new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
+ if (!new_ldt)
+ return NULL;
+
+ BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
+ alloc_size = size * LDT_ENTRY_SIZE;
+
+ /*
+ * Xen is very picky: it requires a page-aligned LDT that has no
+ * trailing nonzero bytes in any page that contains LDT descriptors.
+ * Keep it simple: zero the whole allocation and never allocate less
+ * than PAGE_SIZE.
+ */
+ if (alloc_size > PAGE_SIZE)
+ new_ldt->entries = vzalloc(alloc_size);
else
- newldt = (void *)__get_free_page(GFP_KERNEL);
-
- if (!newldt)
- return -ENOMEM;
+ new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (oldsize)
- memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
- oldldt = pc->ldt;
- memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
- (mincount - oldsize) * LDT_ENTRY_SIZE);
+ if (!new_ldt->entries) {
+ kfree(new_ldt);
+ return NULL;
+ }
- paravirt_alloc_ldt(newldt, mincount);
+ new_ldt->size = size;
+ return new_ldt;
+}
-#ifdef CONFIG_X86_64
- /* CHECKME: Do we really need this ? */
- wmb();
-#endif
- pc->ldt = newldt;
- wmb();
- pc->size = mincount;
- wmb();
-
- if (reload) {
-#ifdef CONFIG_SMP
- preempt_disable();
- load_LDT(pc);
- if (!cpumask_equal(mm_cpumask(current->mm),
- cpumask_of(smp_processor_id())))
- smp_call_function(flush_ldt, current->mm, 1);
- preempt_enable();
-#else
- load_LDT(pc);
-#endif
- }
- if (oldsize) {
- paravirt_free_ldt(oldldt, oldsize);
- if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(oldldt);
- else
- put_page(virt_to_page(oldldt));
- }
- return 0;
+/* After calling this, the LDT is immutable. */
+static void finalize_ldt_struct(struct ldt_struct *ldt)
+{
+ paravirt_alloc_ldt(ldt->entries, ldt->size);
}
-static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+/* context.lock is held */
+static void install_ldt(struct mm_struct *current_mm,
+ struct ldt_struct *ldt)
{
- int err = alloc_ldt(new, old->size, 0);
- int i;
+ /* Synchronizes with lockless_dereference in load_mm_ldt. */
+ smp_store_release(&current_mm->context.ldt, ldt);
+
+ /* Activate the LDT for all CPUs using current_mm. */
+ on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
+}
- if (err < 0)
- return err;
+static void free_ldt_struct(struct ldt_struct *ldt)
+{
+ if (likely(!ldt))
+ return;
- for (i = 0; i < old->size; i++)
- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
- return 0;
+ paravirt_free_ldt(ldt->entries, ldt->size);
+ if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
+ vfree(ldt->entries);
+ else
+ kfree(ldt->entries);
+ kfree(ldt);
}
/*
@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
*/
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
+ struct ldt_struct *new_ldt;
struct mm_struct *old_mm;
int retval = 0;
mutex_init(&mm->context.lock);
- mm->context.size = 0;
old_mm = current->mm;
- if (old_mm && old_mm->context.size > 0) {
- mutex_lock(&old_mm->context.lock);
- retval = copy_ldt(&mm->context, &old_mm->context);
- mutex_unlock(&old_mm->context.lock);
+ if (!old_mm) {
+ mm->context.ldt = NULL;
+ return 0;
}
+
+ mutex_lock(&old_mm->context.lock);
+ if (!old_mm->context.ldt) {
+ mm->context.ldt = NULL;
+ goto out_unlock;
+ }
+
+ new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
+ if (!new_ldt) {
+ retval = -ENOMEM;
+ goto out_unlock;
+ }
+
+ memcpy(new_ldt->entries, old_mm->context.ldt->entries,
+ new_ldt->size * LDT_ENTRY_SIZE);
+ finalize_ldt_struct(new_ldt);
+
+ mm->context.ldt = new_ldt;
+
+out_unlock:
+ mutex_unlock(&old_mm->context.lock);
return retval;
}
@@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
*/
void destroy_context(struct mm_struct *mm)
{
- if (mm->context.size) {
-#ifdef CONFIG_X86_32
- /* CHECKME: Can this ever happen ? */
- if (mm == current->active_mm)
- clear_LDT();
-#endif
- paravirt_free_ldt(mm->context.ldt, mm->context.size);
- if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(mm->context.ldt);
- else
- put_page(virt_to_page(mm->context.ldt));
- mm->context.size = 0;
- }
+ free_ldt_struct(mm->context.ldt);
+ mm->context.ldt = NULL;
}
static int read_ldt(void __user *ptr, unsigned long bytecount)
{
- int err;
+ int retval;
unsigned long size;
struct mm_struct *mm = current->mm;
- if (!mm->context.size)
- return 0;
+ mutex_lock(&mm->context.lock);
+
+ if (!mm->context.ldt) {
+ retval = 0;
+ goto out_unlock;
+ }
+
if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
- mutex_lock(&mm->context.lock);
- size = mm->context.size * LDT_ENTRY_SIZE;
+ size = mm->context.ldt->size * LDT_ENTRY_SIZE;
if (size > bytecount)
size = bytecount;
- err = 0;
- if (copy_to_user(ptr, mm->context.ldt, size))
- err = -EFAULT;
- mutex_unlock(&mm->context.lock);
- if (err < 0)
- goto error_return;
+ if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
+ retval = -EFAULT;
+ goto out_unlock;
+ }
+
if (size != bytecount) {
- /* zero-fill the rest */
- if (clear_user(ptr + size, bytecount - size) != 0) {
- err = -EFAULT;
- goto error_return;
+ /* Zero-fill the rest and pretend we read bytecount bytes. */
+ if (clear_user(ptr + size, bytecount - size)) {
+ retval = -EFAULT;
+ goto out_unlock;
}
}
- return bytecount;
-error_return:
- return err;
+ retval = bytecount;
+
+out_unlock:
+ mutex_unlock(&mm->context.lock);
+ return retval;
}
static int read_default_ldt(void __user *ptr, unsigned long bytecount)
@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
struct desc_struct ldt;
int error;
struct user_desc ldt_info;
+ int oldsize, newsize;
+ struct ldt_struct *new_ldt, *old_ldt;
error = -EINVAL;
if (bytecount != sizeof(ldt_info))
@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
goto out;
}
- mutex_lock(&mm->context.lock);
- if (ldt_info.entry_number >= mm->context.size) {
- error = alloc_ldt(&current->mm->context,
- ldt_info.entry_number + 1, 1);
- if (error < 0)
- goto out_unlock;
- }
-
- /* Allow LDTs to be cleared by the user. */
- if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
- if (oldmode || LDT_empty(&ldt_info)) {
- memset(&ldt, 0, sizeof(ldt));
- goto install;
+ if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
+ LDT_empty(&ldt_info)) {
+ /* The user wants to clear the entry. */
+ memset(&ldt, 0, sizeof(ldt));
+ } else {
+ if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+ error = -EINVAL;
+ goto out;
}
+
+ fill_ldt(&ldt, &ldt_info);
+ if (oldmode)
+ ldt.avl = 0;
}
- if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
- error = -EINVAL;
+ mutex_lock(&mm->context.lock);
+
+ old_ldt = mm->context.ldt;
+ oldsize = old_ldt ? old_ldt->size : 0;
+ newsize = max((int)(ldt_info.entry_number + 1), oldsize);
+
+ error = -ENOMEM;
+ new_ldt = alloc_ldt_struct(newsize);
+ if (!new_ldt)
goto out_unlock;
- }
- fill_ldt(&ldt, &ldt_info);
- if (oldmode)
- ldt.avl = 0;
+ if (old_ldt)
+ memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
+ new_ldt->entries[ldt_info.entry_number] = ldt;
+ finalize_ldt_struct(new_ldt);
- /* Install the new entry ... */
-install:
- write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
+ install_ldt(mm, new_ldt);
+ free_ldt_struct(old_ldt);
error = 0;
out_unlock:
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 71d7849a07f7..f6b916387590 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -121,11 +121,11 @@ void __show_regs(struct pt_regs *regs, int all)
void release_thread(struct task_struct *dead_task)
{
if (dead_task->mm) {
- if (dead_task->mm->context.size) {
+ if (dead_task->mm->context.ldt) {
pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
dead_task->comm,
dead_task->mm->context.ldt,
- dead_task->mm->context.size);
+ dead_task->mm->context.ldt->size);
BUG();
}
}
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 206996c1669d..71820c42b6ce 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
COPY(r15);
#endif /* CONFIG_X86_64 */
+#ifdef CONFIG_X86_32
COPY_SEG_CPL3(cs);
COPY_SEG_CPL3(ss);
+#else /* !CONFIG_X86_32 */
+ /* Kernel saves and restores only the CS segment register on signals,
+ * which is the bare minimum needed to allow mixed 32/64-bit code.
+ * App's signal handler can save/restore other segments if needed. */
+ COPY_SEG_CPL3(cs);
+#endif /* CONFIG_X86_32 */
get_user_ex(tmpflags, &sc->flags);
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
#else /* !CONFIG_X86_32 */
put_user_ex(regs->flags, &sc->flags);
put_user_ex(regs->cs, &sc->cs);
- put_user_ex(0, &sc->__pad2);
- put_user_ex(0, &sc->__pad1);
- put_user_ex(regs->ss, &sc->ss);
+ put_user_ex(0, &sc->gs);
+ put_user_ex(0, &sc->fs);
#endif /* CONFIG_X86_32 */
put_user_ex(fpstate, &sc->fpstate);
@@ -451,19 +457,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
regs->sp = (unsigned long)frame;
- /*
- * Set up the CS and SS registers to run signal handlers in
- * 64-bit mode, even if the handler happens to be interrupting
- * 32-bit or 16-bit code.
- *
- * SS is subtle. In 64-bit mode, we don't need any particular
- * SS descriptor, but we do need SS to be valid. It's possible
- * that the old SS is entirely bogus -- this can happen if the
- * signal we're trying to deliver is #GP or #SS caused by a bad
- * SS value.
- */
+ /* Set up the CS register to run signal handlers in 64-bit mode,
+ even if the handler happens to be interrupting 32-bit code. */
regs->cs = __USER_CS;
- regs->ss = __USER_DS;
return 0;
}
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 9b4d51d0c0d0..6273324186ac 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -5,6 +5,7 @@
#include <linux/mm.h>
#include <linux/ptrace.h>
#include <asm/desc.h>
+#include <asm/mmu_context.h>
unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
{
@@ -30,10 +31,11 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
seg &= ~7UL;
mutex_lock(&child->mm->context.lock);
- if (unlikely((seg >> 3) >= child->mm->context.size))
+ if (unlikely(!child->mm->context.ldt ||
+ (seg >> 3) >= child->mm->context.ldt->size))
addr = -1L; /* bogus selector, access would fault */
else {
- desc = child->mm->context.ldt + seg;
+ desc = &child->mm->context.ldt->entries[seg];
base = get_desc_base(desc);
/* 16-bit code segment? */
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index dc0a84a6f309..9e8bf13572e6 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -672,16 +672,16 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
if (iter.mtrr_disabled)
return mtrr_disabled_type();
+ /* not contained in any MTRRs. */
+ if (type == -1)
+ return mtrr_default_type(mtrr_state);
+
/*
* We just check one page, partially covered by MTRRs is
* impossible.
*/
WARN_ON(iter.partial_map);
- /* not contained in any MTRRs. */
- if (type == -1)
- return mtrr_default_type(mtrr_state);
-
return type;
}
EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index cfba30f27392..e4308fe6afe8 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -972,6 +972,11 @@ u64 efi_mem_attributes(unsigned long phys_addr)
static int __init arch_parse_efi_cmdline(char *str)
{
+ if (!str) {
+ pr_warn("need at least one option\n");
+ return -EINVAL;
+ }
+
if (parse_option_str(str, "old_map"))
set_bit(EFI_OLD_MEMMAP, &efi.flags);
if (parse_option_str(str, "debug"))
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 0d7dd1f5ac36..9ab52791fed5 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -22,6 +22,7 @@
#include <asm/fpu/internal.h>
#include <asm/debugreg.h>
#include <asm/cpu.h>
+#include <asm/mmu_context.h>
#ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx;
@@ -153,7 +154,7 @@ static void fix_processor_context(void)
syscall_init(); /* This sets MSR_*STAR and related */
#endif
load_TR_desc(); /* This does ltr */
- load_LDT(&current->active_mm->context); /* This does lldt */
+ load_mm_ldt(current->active_mm); /* This does lldt */
fpu__resume_cpu();
}
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 7322755f337a..4b6e29ac0968 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -13,13 +13,13 @@ CFLAGS_mmu.o := $(nostackp)
obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
time.o xen-asm.o xen-asm_$(BITS).o \
grant-table.o suspend.o platform-pci-unplug.o \
- p2m.o
+ p2m.o apic.o
obj-$(CONFIG_EVENT_TRACING) += trace.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
-obj-$(CONFIG_XEN_DOM0) += apic.o vga.o
+obj-$(CONFIG_XEN_DOM0) += vga.o
obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
obj-$(CONFIG_XEN_EFI) += efi.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0b95c9b8283f..11d6fb4e8483 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
pte_t pte;
unsigned long pfn;
struct page *page;
+ unsigned char dummy;
ptep = lookup_address((unsigned long)v, &level);
BUG_ON(ptep == NULL);
@@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
pte = pfn_pte(pfn, prot);
+ /*
+ * Careful: update_va_mapping() will fail if the virtual address
+ * we're poking isn't populated in the page tables. We don't
+ * need to worry about the direct map (that's always in the page
+ * tables), but we need to be careful about vmap space. In
+ * particular, the top level page table can lazily propagate
+ * entries between processes, so if we've switched mms since we
+ * vmapped the target in the first place, we might not have the
+ * top-level page table entry populated.
+ *
+ * We disable preemption because we want the same mm active when
+ * we probe the target and when we issue the hypercall. We'll
+ * have the same nominal mm, but if we're a kernel thread, lazy
+ * mm dropping could change our pgd.
+ *
+ * Out of an abundance of caution, this uses __get_user() to fault
+ * in the target address just in case there's some obscure case
+ * in which the target address isn't readable.
+ */
+
+ preempt_disable();
+
+ pagefault_disable(); /* Avoid warnings due to being atomic. */
+ __get_user(dummy, (unsigned char __user __force *)v);
+ pagefault_enable();
+
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
BUG();
@@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
BUG();
} else
kmap_flush_unused();
+
+ preempt_enable();
}
static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
@@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
int i;
+ /*
+ * We need to mark the all aliases of the LDT pages RO. We
+ * don't need to call vm_flush_aliases(), though, since that's
+ * only responsible for flushing aliases out the TLBs, not the
+ * page tables, and Xen will flush the TLB for us if needed.
+ *
+ * To avoid confusing future readers: none of this is necessary
+ * to load the LDT. The hypervisor only checks this when the
+ * LDT is faulted in due to subsequent descriptor access.
+ */
+
for(i = 0; i < entries; i += entries_per_page)
set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index c20fe29e65f4..2292721b1d10 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -101,17 +101,15 @@ struct dom0_vga_console_info;
#ifdef CONFIG_XEN_DOM0
void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
-void __init xen_init_apic(void);
#else
static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
size_t size)
{
}
-static inline void __init xen_init_apic(void)
-{
-}
#endif
+void __init xen_init_apic(void);
+
#ifdef CONFIG_XEN_EFI
extern void xen_efi_init(void);
#else