summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/parisc/Kconfig17
-rw-r--r--arch/parisc/boot/compressed/.gitignore2
-rw-r--r--arch/parisc/boot/compressed/Makefile8
-rw-r--r--arch/parisc/boot/compressed/firmware.c2
-rw-r--r--arch/parisc/boot/compressed/real2.S2
-rw-r--r--arch/parisc/include/asm/assembly.h10
-rw-r--r--arch/parisc/include/asm/futex.h59
-rw-r--r--arch/parisc/include/asm/io.h6
-rw-r--r--arch/parisc/include/asm/special_insns.h44
-rw-r--r--arch/parisc/include/asm/uaccess.h12
-rw-r--r--arch/parisc/include/uapi/asm/pdc.h32
-rw-r--r--arch/parisc/kernel/Makefile3
-rw-r--r--arch/parisc/kernel/asm-offsets.c4
-rw-r--r--arch/parisc/kernel/hpmc.S6
-rw-r--r--arch/parisc/kernel/kgdb.c21
-rw-r--r--arch/parisc/kernel/syscall.S774
-rw-r--r--arch/parisc/kernel/toc.c18
-rw-r--r--arch/parisc/kernel/toc_asm.S33
-rw-r--r--arch/parisc/kernel/traps.c2
-rw-r--r--arch/parisc/mm/fault.c23
20 files changed, 763 insertions, 315 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 011dc32fdb4d..43c1c880def6 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -287,20 +287,6 @@ config SMP
If you don't know what to do here, say N.
-config TOC
- bool "Support TOC switch"
- default y if 64BIT || !SMP
- help
- Most PA-RISC machines have either a switch at the back of the machine
- or a command in BMC to trigger a TOC interrupt. If you say Y here a
- handler will be installed which will either show a backtrace on all
- CPUs, or enter a possible configured debugger like kgdb/kdb.
-
- Note that with this option enabled, the kernel will use an additional 16KB
- per possible CPU as a special stack for the TOC handler.
-
- If you don't want to debug the Kernel, say N.
-
config PARISC_CPU_TOPOLOGY
bool "Support cpu topology definition"
depends on SMP
@@ -370,7 +356,8 @@ config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
depends on SMP
- default "4"
+ default "4" if 64BIT
+ default "16"
config KEXEC
bool "Kexec system call"
diff --git a/arch/parisc/boot/compressed/.gitignore b/arch/parisc/boot/compressed/.gitignore
index b9853a356ab2..a5839aa16706 100644
--- a/arch/parisc/boot/compressed/.gitignore
+++ b/arch/parisc/boot/compressed/.gitignore
@@ -1,6 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-firmware.c
-real2.S
sizes.h
vmlinux
vmlinux.lds
diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile
index bf4f2891d0b7..116bd5c1873c 100644
--- a/arch/parisc/boot/compressed/Makefile
+++ b/arch/parisc/boot/compressed/Makefile
@@ -13,7 +13,6 @@ OBJECTS := head.o real2.o firmware.o misc.o piggy.o
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
targets += $(OBJECTS) sizes.h
-targets += real2.S firmware.c
KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
@@ -42,14 +41,7 @@ $(obj)/head.o: $(obj)/sizes.h
CFLAGS_misc.o += -I$(objtree)/$(obj)
$(obj)/misc.o: $(obj)/sizes.h
-$(obj)/firmware.o: $(obj)/firmware.c
-$(obj)/firmware.c: $(srctree)/arch/$(SRCARCH)/kernel/firmware.c
- $(call cmd,shipped)
-
AFLAGS_real2.o += -DBOOTLOADER
-$(obj)/real2.o: $(obj)/real2.S
-$(obj)/real2.S: $(srctree)/arch/$(SRCARCH)/kernel/real2.S
- $(call cmd,shipped)
CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER
$(obj)/vmlinux.lds: $(obj)/sizes.h
diff --git a/arch/parisc/boot/compressed/firmware.c b/arch/parisc/boot/compressed/firmware.c
new file mode 100644
index 000000000000..16a07137fe92
--- /dev/null
+++ b/arch/parisc/boot/compressed/firmware.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "../../kernel/firmware.c"
diff --git a/arch/parisc/boot/compressed/real2.S b/arch/parisc/boot/compressed/real2.S
new file mode 100644
index 000000000000..cdc6a4da3240
--- /dev/null
+++ b/arch/parisc/boot/compressed/real2.S
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include "../../kernel/real2.S"
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index 6d13ae236fcb..6369082c6c74 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -158,6 +158,16 @@
#endif
.endm
+ /* The depi instruction leaves the most significant 32 bits of the
+ * target register in an undefined state on PA 2.0 systems. */
+ .macro depi_safe i, p, len, t
+#ifdef CONFIG_64BIT
+ depdi \i, 32+(\p), \len, \t
+#else
+ depi \i, \p, \len, \t
+#endif
+ .endm
+
/* load 32-bit 'value' into 'reg' compensating for the ldil
* sign-extension when running in wide mode.
* WARNING!! neither 'value' nor 'reg' can be expressions
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index 9cd4dd6e63ad..b5835325d44b 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -8,39 +8,47 @@
#include <asm/errno.h>
/* The following has to match the LWS code in syscall.S. We have
- sixteen four-word locks. */
+ * 256 four-word locks. We use bits 20-27 of the futex virtual
+ * address for the hash index.
+ */
+
+static inline unsigned long _futex_hash_index(unsigned long ua)
+{
+ return (ua >> 2) & 0x3fc;
+}
static inline void
-_futex_spin_lock(u32 __user *uaddr)
+_futex_spin_lock_irqsave(arch_spinlock_t *s, unsigned long *flags)
{
- extern u32 lws_lock_start[];
- long index = ((long)uaddr & 0x7f8) >> 1;
- arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
- preempt_disable();
+ local_irq_save(*flags);
arch_spin_lock(s);
}
static inline void
-_futex_spin_unlock(u32 __user *uaddr)
+_futex_spin_unlock_irqrestore(arch_spinlock_t *s, unsigned long *flags)
{
- extern u32 lws_lock_start[];
- long index = ((long)uaddr & 0x7f8) >> 1;
- arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
arch_spin_unlock(s);
- preempt_enable();
+ local_irq_restore(*flags);
}
static inline int
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
+ extern u32 lws_lock_start[];
+ unsigned long ua = (unsigned long)uaddr;
+ arch_spinlock_t *s;
+ unsigned long flags;
int oldval, ret;
u32 tmp;
- ret = -EFAULT;
+ s = (arch_spinlock_t *)&lws_lock_start[_futex_hash_index(ua)];
+ _futex_spin_lock_irqsave(s, &flags);
- _futex_spin_lock(uaddr);
- if (unlikely(get_user(oldval, uaddr) != 0))
+ /* Return -EFAULT if we encounter a page fault or COW break */
+ if (unlikely(get_user(oldval, uaddr) != 0)) {
+ ret = -EFAULT;
goto out_pagefault_enable;
+ }
ret = 0;
tmp = oldval;
@@ -63,13 +71,14 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
break;
default:
ret = -ENOSYS;
+ goto out_pagefault_enable;
}
- if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
+ if (unlikely(put_user(tmp, uaddr) != 0))
ret = -EFAULT;
out_pagefault_enable:
- _futex_spin_unlock(uaddr);
+ _futex_spin_unlock_irqrestore(s, &flags);
if (!ret)
*oval = oldval;
@@ -81,7 +90,11 @@ static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ extern u32 lws_lock_start[];
+ unsigned long ua = (unsigned long)uaddr;
+ arch_spinlock_t *s;
u32 val;
+ unsigned long flags;
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
* our gateway page, and causes no end of trouble...
@@ -94,23 +107,25 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
/* HPPA has no cmpxchg in hardware and therefore the
* best we can do here is use an array of locks. The
- * lock selected is based on a hash of the userspace
- * address. This should scale to a couple of CPUs.
+ * lock selected is based on a hash of the virtual
+ * address of the futex. This should scale to a couple
+ * of CPUs.
*/
- _futex_spin_lock(uaddr);
+ s = (arch_spinlock_t *)&lws_lock_start[_futex_hash_index(ua)];
+ _futex_spin_lock_irqsave(s, &flags);
if (unlikely(get_user(val, uaddr) != 0)) {
- _futex_spin_unlock(uaddr);
+ _futex_spin_unlock_irqrestore(s, &flags);
return -EFAULT;
}
if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
- _futex_spin_unlock(uaddr);
+ _futex_spin_unlock_irqrestore(s, &flags);
return -EFAULT;
}
*uval = val;
- _futex_spin_unlock(uaddr);
+ _futex_spin_unlock_irqrestore(s, &flags);
return 0;
}
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index 0b5259102319..837ddddbac6a 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -273,9 +273,9 @@ static inline int inl(unsigned long addr)
return -1;
}
-#define outb(x, y) BUG()
-#define outw(x, y) BUG()
-#define outl(x, y) BUG()
+#define outb(x, y) ({(void)(x); (void)(y); BUG(); 0;})
+#define outw(x, y) ({(void)(x); (void)(y); BUG(); 0;})
+#define outl(x, y) ({(void)(x); (void)(y); BUG(); 0;})
#endif
/*
diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h
index a303ae9a77f4..16ee41e77174 100644
--- a/arch/parisc/include/asm/special_insns.h
+++ b/arch/parisc/include/asm/special_insns.h
@@ -2,28 +2,32 @@
#ifndef __PARISC_SPECIAL_INSNS_H
#define __PARISC_SPECIAL_INSNS_H
-#define lpa(va) ({ \
- unsigned long pa; \
- __asm__ __volatile__( \
- "copy %%r0,%0\n\t" \
- "lpa %%r0(%1),%0" \
- : "=r" (pa) \
- : "r" (va) \
- : "memory" \
- ); \
- pa; \
+#define lpa(va) ({ \
+ unsigned long pa; \
+ __asm__ __volatile__( \
+ "copy %%r0,%0\n" \
+ "8:\tlpa %%r0(%1),%0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
+ : "=&r" (pa) \
+ : "r" (va) \
+ : "memory" \
+ ); \
+ pa; \
})
-#define lpa_user(va) ({ \
- unsigned long pa; \
- __asm__ __volatile__( \
- "copy %%r0,%0\n\t" \
- "lpa %%r0(%%sr3,%1),%0" \
- : "=r" (pa) \
- : "r" (va) \
- : "memory" \
- ); \
- pa; \
+#define lpa_user(va) ({ \
+ unsigned long pa; \
+ __asm__ __volatile__( \
+ "copy %%r0,%0\n" \
+ "8:\tlpa %%r0(%%sr3,%1),%0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
+ : "=&r" (pa) \
+ : "r" (va) \
+ : "memory" \
+ ); \
+ pa; \
})
#define mfctl(reg) ({ \
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 192ad9e11b25..ebf8a845b017 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -53,15 +53,18 @@ struct exception_table_entry {
/*
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
* (with lowest bit set) for which the fault handler in fixup_exception() will
- * load -EFAULT into %r8 for a read or write fault, and zeroes the target
+ * load -EFAULT into %r29 for a read or write fault, and zeroes the target
* register in case of a read fault in get_user().
*/
+#define ASM_EXCEPTIONTABLE_REG 29
+#define ASM_EXCEPTIONTABLE_VAR(__variable) \
+ register long __variable __asm__ ("r29") = 0
#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
#define __get_user_internal(sr, val, ptr) \
({ \
- register long __gu_err __asm__ ("r8") = 0; \
+ ASM_EXCEPTIONTABLE_VAR(__gu_err); \
\
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm(sr, val, "ldb", ptr); break; \
@@ -131,7 +134,7 @@ struct exception_table_entry {
#define __put_user_internal(sr, x, ptr) \
({ \
- register long __pu_err __asm__ ("r8") = 0; \
+ ASM_EXCEPTIONTABLE_VAR(__pu_err); \
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
\
switch (sizeof(*(ptr))) { \
@@ -168,7 +171,8 @@ struct exception_table_entry {
* gcc knows about, so there are no aliasing issues. These macros must
* also be aware that fixups are executed in the context of the fault,
* and any registers used there must be listed as clobbers.
- * r8 is already listed as err.
+ * The register holding the possible EFAULT error (ASM_EXCEPTIONTABLE_REG)
+ * is already listed as input and output register.
*/
#define __put_user_asm(sr, stx, x, ptr) \
diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h
index acc633c15722..e794e143ec5f 100644
--- a/arch/parisc/include/uapi/asm/pdc.h
+++ b/arch/parisc/include/uapi/asm/pdc.h
@@ -4,7 +4,7 @@
/*
* PDC return values ...
- * All PDC calls return a subset of these errors.
+ * All PDC calls return a subset of these errors.
*/
#define PDC_WARN 3 /* Call completed with a warning */
@@ -165,7 +165,7 @@
#define PDC_PSW_GET_DEFAULTS 1 /* Return defaults */
#define PDC_PSW_SET_DEFAULTS 2 /* Set default */
#define PDC_PSW_ENDIAN_BIT 1 /* set for big endian */
-#define PDC_PSW_WIDE_BIT 2 /* set for wide mode */
+#define PDC_PSW_WIDE_BIT 2 /* set for wide mode */
#define PDC_SYSTEM_MAP 22 /* find system modules */
#define PDC_FIND_MODULE 0
@@ -274,7 +274,7 @@
#define PDC_PCI_PCI_INT_ROUTE_SIZE 13
#define PDC_PCI_GET_INT_TBL_SIZE PDC_PCI_PCI_INT_ROUTE_SIZE
#define PDC_PCI_PCI_INT_ROUTE 14
-#define PDC_PCI_GET_INT_TBL PDC_PCI_PCI_INT_ROUTE
+#define PDC_PCI_GET_INT_TBL PDC_PCI_PCI_INT_ROUTE
#define PDC_PCI_READ_MON_TYPE 15
#define PDC_PCI_WRITE_MON_TYPE 16
@@ -345,7 +345,7 @@
/* constants for PDC_CHASSIS */
#define OSTAT_OFF 0
-#define OSTAT_FLT 1
+#define OSTAT_FLT 1
#define OSTAT_TEST 2
#define OSTAT_INIT 3
#define OSTAT_SHUT 4
@@ -403,7 +403,7 @@ struct zeropage {
int vec_pad1[6];
/* [0x040] reserved processor dependent */
- int pad0[112];
+ int pad0[112]; /* in QEMU pad0[0] holds "SeaBIOS\0" */
/* [0x200] reserved */
int pad1[84];
@@ -691,6 +691,22 @@ struct pdc_hpmc_pim_20 { /* PDC_PIM */
unsigned long long fr[32];
};
+struct pim_cpu_state_cf {
+ union {
+ unsigned int
+ iqv : 1, /* IIA queue Valid */
+ iqf : 1, /* IIA queue Failure */
+ ipv : 1, /* IPRs Valid */
+ grv : 1, /* GRs Valid */
+ crv : 1, /* CRs Valid */
+ srv : 1, /* SRs Valid */
+ trv : 1, /* CR24 through CR31 valid */
+ pad : 24, /* reserved */
+ td : 1; /* TOC did not cause any damage to the system state */
+ unsigned int val;
+ };
+};
+
struct pdc_toc_pim_11 {
unsigned int gr[32];
unsigned int cr[32];
@@ -698,8 +714,7 @@ struct pdc_toc_pim_11 {
unsigned int iasq_back;
unsigned int iaoq_back;
unsigned int check_type;
- unsigned int hversion;
- unsigned int cpu_state;
+ struct pim_cpu_state_cf cpu_state;
};
struct pdc_toc_pim_20 {
@@ -709,8 +724,7 @@ struct pdc_toc_pim_20 {
unsigned long long iasq_back;
unsigned long long iaoq_back;
unsigned int check_type;
- unsigned int hversion;
- unsigned int cpu_state;
+ struct pim_cpu_state_cf cpu_state;
};
#endif /* !defined(__ASSEMBLY__) */
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index ed0b87908d71..8fb819bbbb17 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -10,7 +10,7 @@ obj-y := cache.o pacache.o setup.o pdt.o traps.o time.o irq.o \
ptrace.o hardware.o inventory.o drivers.o alternative.o \
signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
- patch.o
+ patch.o toc.o toc_asm.o
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
@@ -39,4 +39,3 @@ obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KEXEC_CORE) += kexec.o relocate_kernel.o
obj-$(CONFIG_KEXEC_FILE) += kexec_file.o
-obj-$(CONFIG_TOC) += toc.o toc_asm.o
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index 55c1c5189c6a..2a83ef36d216 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -36,7 +36,11 @@
int main(void)
{
DEFINE(TASK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
+#ifdef CONFIG_SMP
+ DEFINE(TASK_TI_CPU, offsetof(struct task_struct, thread_info.cpu));
+#endif
DEFINE(TASK_STACK, offsetof(struct task_struct, stack));
+ DEFINE(TASK_PAGEFAULT_DISABLED, offsetof(struct task_struct, pagefault_disabled));
BLANK();
DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs));
DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0]));
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
index c2981401775c..eb2e4bd67035 100644
--- a/arch/parisc/kernel/hpmc.S
+++ b/arch/parisc/kernel/hpmc.S
@@ -43,10 +43,8 @@
* IODC requires 7K byte stack. That leaves 1K byte for os_hpmc.
*/
- __PAGE_ALIGNED_BSS
- .align 4096
-hpmc_stack:
- .block 16384
+ .import toc_stack,data
+#define hpmc_stack toc_stack /* re-use the TOC stack */
#define HPMC_IODC_BUF_SIZE 0x8000
diff --git a/arch/parisc/kernel/kgdb.c b/arch/parisc/kernel/kgdb.c
index c4554ac13eac..ab7620f695be 100644
--- a/arch/parisc/kernel/kgdb.c
+++ b/arch/parisc/kernel/kgdb.c
@@ -3,6 +3,7 @@
* PA-RISC KGDB support
*
* Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
+ * Copyright (c) 2022 Helge Deller <deller@gmx.de>
*
*/
@@ -207,3 +208,23 @@ int kgdb_arch_handle_exception(int trap, int signo,
}
return -1;
}
+
+/* KGDB console driver which uses PDC to read chars from keyboard */
+
+static void kgdb_pdc_write_char(u8 chr)
+{
+ /* no need to print char. kgdb will do it. */
+}
+
+static struct kgdb_io kgdb_pdc_io_ops = {
+ .name = "kgdb_pdc",
+ .read_char = pdc_iodc_getc,
+ .write_char = kgdb_pdc_write_char,
+};
+
+static int __init kgdb_pdc_init(void)
+{
+ kgdb_register_io_module(&kgdb_pdc_io_ops);
+ return 0;
+}
+early_initcall(kgdb_pdc_init);
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 65c88ca7a7ac..1373e5129868 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -50,6 +50,22 @@ registers).
.level PA_ASM_LEVEL
+ .macro lws_pagefault_disable reg1,reg2
+ mfctl %cr30, \reg2
+ ldo TASK_PAGEFAULT_DISABLED(\reg2), \reg2
+ ldw 0(%sr2,\reg2), \reg1
+ ldo 1(\reg1), \reg1
+ stw \reg1, 0(%sr2,\reg2)
+ .endm
+
+ .macro lws_pagefault_enable reg1,reg2
+ mfctl %cr30, \reg2
+ ldo TASK_PAGEFAULT_DISABLED(\reg2), \reg2
+ ldw 0(%sr2,\reg2), \reg1
+ ldo -1(\reg1), \reg1
+ stw \reg1, 0(%sr2,\reg2)
+ .endm
+
.text
.import syscall_exit,code
@@ -74,7 +90,7 @@ ENTRY(linux_gateway_page)
/* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
/* Light-weight-syscall entry must always be located at 0xb0 */
/* WARNING: Keep this number updated with table size changes */
-#define __NR_lws_entries (3)
+#define __NR_lws_entries (5)
lws_entry:
gate lws_start, %r0 /* increase privilege */
@@ -490,8 +506,34 @@ lws_start:
/* Jump to lws, lws table pointers already relocated */
be,n 0(%sr2,%r21)
+lws_exit_noerror:
+ lws_pagefault_enable %r1,%r21
+ stw,ma %r20, 0(%sr2,%r20)
+ ssm PSW_SM_I, %r0
+ b lws_exit
+ copy %r0, %r21
+
+lws_wouldblock:
+ ssm PSW_SM_I, %r0
+ ldo 2(%r0), %r28
+ b lws_exit
+ ldo -EAGAIN(%r0), %r21
+
+lws_pagefault:
+ lws_pagefault_enable %r1,%r21
+ stw,ma %r20, 0(%sr2,%r20)
+ ssm PSW_SM_I, %r0
+ ldo 3(%r0),%r28
+ b lws_exit
+ ldo -EAGAIN(%r0),%r21
+
+lws_fault:
+ ldo 1(%r0),%r28
+ b lws_exit
+ ldo -EFAULT(%r0),%r21
+
lws_exit_nosys:
- ldo -ENOSYS(%r0),%r21 /* set errno */
+ ldo -ENOSYS(%r0),%r21
/* Fall through: Return to userspace */
lws_exit:
@@ -518,27 +560,19 @@ lws_exit:
%r28 - Return prev through this register.
%r21 - Kernel error code
- If debugging is DISabled:
-
- %r21 has the following meanings:
-
+ %r21 returns the following error codes:
EAGAIN - CAS is busy, ldcw failed, try again.
EFAULT - Read or write failed.
- If debugging is enabled:
-
- EDEADLOCK - CAS called recursively.
- EAGAIN && r28 == 1 - CAS is busy. Lock contended.
- EAGAIN && r28 == 2 - CAS is busy. ldcw failed.
- EFAULT - Read or write failed.
+ If EAGAIN is returned, %r28 indicates the busy reason:
+ r28 == 1 - CAS is busy. lock contended.
+ r28 == 2 - CAS is busy. ldcw failed.
+ r28 == 3 - CAS is busy. page fault.
Scratch: r20, r28, r1
****************************************************/
- /* Do not enable LWS debugging */
-#define ENABLE_LWS_DEBUG 0
-
/* ELF64 Process entry path */
lws_compare_and_swap64:
#ifdef CONFIG_64BIT
@@ -551,59 +585,45 @@ lws_compare_and_swap64:
b,n lws_exit_nosys
#endif
- /* ELF32 Process entry path */
+ /* ELF32/ELF64 Process entry path */
lws_compare_and_swap32:
#ifdef CONFIG_64BIT
- /* Clip all the input registers */
+ /* Wide mode user process? */
+ bb,<,n %sp, 31, lws_compare_and_swap
+
+ /* Clip all the input registers for 32-bit processes */
depdi 0, 31, 32, %r26
depdi 0, 31, 32, %r25
depdi 0, 31, 32, %r24
#endif
lws_compare_and_swap:
- /* Load start of lock table */
- ldil L%lws_lock_start, %r20
- ldo R%lws_lock_start(%r20), %r28
+ /* Trigger memory reference interruptions without writing to memory */
+1: ldw 0(%r26), %r28
+2: stbys,e %r0, 0(%r26)
- /* Extract eight bits from r26 and hash lock (Bits 3-11) */
- extru_safe %r26, 28, 8, %r20
+ /* Calculate 8-bit hash index from virtual address */
+ extru_safe %r26, 27, 8, %r20
- /* Find lock to use, the hash is either one of 0 to
- 15, multiplied by 16 (keep it 16-byte aligned)
+ /* Load start of lock table */
+ ldil L%lws_lock_start, %r28
+ ldo R%lws_lock_start(%r28), %r28
+
+ /* Find lock to use, the hash index is one of 0 to
+ 255, multiplied by 16 (keep it 16-byte aligned)
and add to the lock table offset. */
shlw %r20, 4, %r20
add %r20, %r28, %r20
-# if ENABLE_LWS_DEBUG
- /*
- DEBUG, check for deadlock!
- If the thread register values are the same
- then we were the one that locked it last and
- this is a recurisve call that will deadlock.
- We *must* giveup this call and fail.
- */
- ldw 4(%sr2,%r20), %r28 /* Load thread register */
- /* WARNING: If cr27 cycles to the same value we have problems */
- mfctl %cr27, %r21 /* Get current thread register */
- cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */
- b lws_exit /* Return error! */
- ldo -EDEADLOCK(%r0), %r21
-cas_lock:
- cmpb,=,n %r0, %r28, cas_nocontend /* Is nobody using it? */
- ldo 1(%r0), %r28 /* 1st case */
- b lws_exit /* Contended... */
- ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
-cas_nocontend:
-# endif
-/* ENABLE_LWS_DEBUG */
-
- /* COW breaks can cause contention on UP systems */
- LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
- cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */
-cas_wouldblock:
- ldo 2(%r0), %r28 /* 2nd case */
- b lws_exit /* Contended... */
- ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
+ rsm PSW_SM_I, %r0 /* Disable interrupts */
+
+ /* Try to acquire the lock */
+ LDCW 0(%sr2,%r20), %r28
+ comclr,<> %r0, %r28, %r0
+ b,n lws_wouldblock
+
+ /* Disable page faults to prevent sleeping in critical region */
+ lws_pagefault_disable %r21,%r28
/*
prev = *addr;
@@ -613,59 +633,35 @@ cas_wouldblock:
*/
/* NOTES:
- This all works becuse intr_do_signal
+ This all works because intr_do_signal
and schedule both check the return iasq
and see that we are on the kernel page
so this process is never scheduled off
or is ever sent any signal of any sort,
- thus it is wholly atomic from usrspaces
+ thus it is wholly atomic from usrspace's
perspective
*/
-cas_action:
-#if defined CONFIG_SMP && ENABLE_LWS_DEBUG
- /* DEBUG */
- mfctl %cr27, %r1
- stw %r1, 4(%sr2,%r20)
-#endif
/* The load and store could fail */
-1: ldw 0(%r26), %r28
+3: ldw 0(%r26), %r28
sub,<> %r28, %r25, %r0
-2: stw %r24, 0(%r26)
- /* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
-#if ENABLE_LWS_DEBUG
- /* Clear thread register indicator */
- stw %r0, 4(%sr2,%r20)
-#endif
- /* Return to userspace, set no error */
- b lws_exit
- copy %r0, %r21
+4: stw %r24, 0(%r26)
+ b,n lws_exit_noerror
-3:
- /* Error occurred on load or store */
- /* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
-#if ENABLE_LWS_DEBUG
- stw %r0, 4(%sr2,%r20)
-#endif
- b lws_exit
- ldo -EFAULT(%r0),%r21 /* set errno */
- nop
- nop
- nop
- nop
+ /* A fault occurred on load or stbys,e store */
+5: b,n lws_fault
+ ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 5b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 5b-linux_gateway_page)
- /* Two exception table entries, one for the load,
- the other for the store. Either return -EFAULT.
- Each of the entries must be relocated. */
- ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 3b-linux_gateway_page)
- ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page)
+ /* A page fault occurred in critical region */
+6: b,n lws_pagefault
+ ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 6b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 6b-linux_gateway_page)
/***************************************************
New CAS implementation which uses pointers and variable size
information. The value pointed by old and new MUST NOT change
- while performing CAS. The lock only protect the value at %r26.
+ while performing CAS. The lock only protects the value at %r26.
%r26 - Address to examine
%r25 - Pointer to the value to check (old)
@@ -674,25 +670,32 @@ cas_action:
%r28 - Return non-zero on failure
%r21 - Kernel error code
- %r21 has the following meanings:
-
+ %r21 returns the following error codes:
EAGAIN - CAS is busy, ldcw failed, try again.
EFAULT - Read or write failed.
+ If EAGAIN is returned, %r28 indicates the busy reason:
+ r28 == 1 - CAS is busy. lock contended.
+ r28 == 2 - CAS is busy. ldcw failed.
+ r28 == 3 - CAS is busy. page fault.
+
Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only)
****************************************************/
- /* ELF32 Process entry path */
lws_compare_and_swap_2:
#ifdef CONFIG_64BIT
- /* Clip the input registers. We don't need to clip %r23 as we
- only use it for word operations */
+ /* Wide mode user process? */
+ bb,<,n %sp, 31, cas2_begin
+
+ /* Clip the input registers for 32-bit processes. We don't
+ need to clip %r23 as we only use it for word operations */
depdi 0, 31, 32, %r26
depdi 0, 31, 32, %r25
depdi 0, 31, 32, %r24
#endif
+cas2_begin:
/* Check the validity of the size pointer */
subi,>>= 3, %r23, %r0
b,n lws_exit_nosys
@@ -703,69 +706,77 @@ lws_compare_and_swap_2:
blr %r29, %r0
nop
- /* 8bit load */
-4: ldb 0(%r25), %r25
+ /* 8-bit load */
+1: ldb 0(%r25), %r25
b cas2_lock_start
-5: ldb 0(%r24), %r24
+2: ldb 0(%r24), %r24
nop
nop
nop
nop
nop
- /* 16bit load */
-6: ldh 0(%r25), %r25
+ /* 16-bit load */
+3: ldh 0(%r25), %r25
b cas2_lock_start
-7: ldh 0(%r24), %r24
+4: ldh 0(%r24), %r24
nop
nop
nop
nop
nop
- /* 32bit load */
-8: ldw 0(%r25), %r25
+ /* 32-bit load */
+5: ldw 0(%r25), %r25
b cas2_lock_start
-9: ldw 0(%r24), %r24
+6: ldw 0(%r24), %r24
nop
nop
nop
nop
nop
- /* 64bit load */
+ /* 64-bit load */
#ifdef CONFIG_64BIT
-10: ldd 0(%r25), %r25
-11: ldd 0(%r24), %r24
+7: ldd 0(%r25), %r25
+8: ldd 0(%r24), %r24
#else
/* Load old value into r22/r23 - high/low */
-10: ldw 0(%r25), %r22
-11: ldw 4(%r25), %r23
+7: ldw 0(%r25), %r22
+8: ldw 4(%r25), %r23
/* Load new value into fr4 for atomic store later */
-12: flddx 0(%r24), %fr4
+9: flddx 0(%r24), %fr4
#endif
cas2_lock_start:
- /* Load start of lock table */
- ldil L%lws_lock_start, %r20
- ldo R%lws_lock_start(%r20), %r28
+ /* Trigger memory reference interruptions without writing to memory */
+ copy %r26, %r28
+ depi_safe 0, 31, 2, %r28
+10: ldw 0(%r28), %r1
+11: stbys,e %r0, 0(%r28)
+
+ /* Calculate 8-bit hash index from virtual address */
+ extru_safe %r26, 27, 8, %r20
- /* Extract eight bits from r26 and hash lock (Bits 3-11) */
- extru_safe %r26, 28, 8, %r20
+ /* Load start of lock table */
+ ldil L%lws_lock_start, %r28
+ ldo R%lws_lock_start(%r28), %r28
- /* Find lock to use, the hash is either one of 0 to
- 15, multiplied by 16 (keep it 16-byte aligned)
+ /* Find lock to use, the hash index is one of 0 to
+ 255, multiplied by 16 (keep it 16-byte aligned)
and add to the lock table offset. */
shlw %r20, 4, %r20
add %r20, %r28, %r20
- /* COW breaks can cause contention on UP systems */
- LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
- cmpb,<>,n %r0, %r28, cas2_action /* Did we get it? */
-cas2_wouldblock:
- ldo 2(%r0), %r28 /* 2nd case */
- b lws_exit /* Contended... */
- ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
+ rsm PSW_SM_I, %r0 /* Disable interrupts */
+
+ /* Try to acquire the lock */
+ LDCW 0(%sr2,%r20), %r28
+ comclr,<> %r0, %r28, %r0
+ b,n lws_wouldblock
+
+ /* Disable page faults to prevent sleeping in critical region */
+ lws_pagefault_disable %r21,%r28
/*
prev = *addr;
@@ -775,112 +786,493 @@ cas2_wouldblock:
*/
/* NOTES:
- This all works becuse intr_do_signal
+ This all works because intr_do_signal
and schedule both check the return iasq
and see that we are on the kernel page
so this process is never scheduled off
or is ever sent any signal of any sort,
- thus it is wholly atomic from usrspaces
+ thus it is wholly atomic from usrspace's
perspective
*/
-cas2_action:
+
/* Jump to the correct function */
blr %r29, %r0
/* Set %r28 as non-zero for now */
ldo 1(%r0),%r28
- /* 8bit CAS */
-13: ldb 0(%r26), %r29
+ /* 8-bit CAS */
+12: ldb 0(%r26), %r29
sub,= %r29, %r25, %r0
- b,n cas2_end
-14: stb %r24, 0(%r26)
- b cas2_end
+ b,n lws_exit_noerror
+13: stb %r24, 0(%r26)
+ b lws_exit_noerror
copy %r0, %r28
nop
nop
- /* 16bit CAS */
-15: ldh 0(%r26), %r29
+ /* 16-bit CAS */
+14: ldh 0(%r26), %r29
sub,= %r29, %r25, %r0
- b,n cas2_end
-16: sth %r24, 0(%r26)
- b cas2_end
+ b,n lws_exit_noerror
+15: sth %r24, 0(%r26)
+ b lws_exit_noerror
copy %r0, %r28
nop
nop
- /* 32bit CAS */
-17: ldw 0(%r26), %r29
+ /* 32-bit CAS */
+16: ldw 0(%r26), %r29
sub,= %r29, %r25, %r0
- b,n cas2_end
-18: stw %r24, 0(%r26)
- b cas2_end
+ b,n lws_exit_noerror
+17: stw %r24, 0(%r26)
+ b lws_exit_noerror
copy %r0, %r28
nop
nop
- /* 64bit CAS */
+ /* 64-bit CAS */
#ifdef CONFIG_64BIT
-19: ldd 0(%r26), %r29
+18: ldd 0(%r26), %r29
sub,*= %r29, %r25, %r0
- b,n cas2_end
-20: std %r24, 0(%r26)
+ b,n lws_exit_noerror
+19: std %r24, 0(%r26)
copy %r0, %r28
#else
/* Compare first word */
-19: ldw 0(%r26), %r29
+18: ldw 0(%r26), %r29
sub,= %r29, %r22, %r0
- b,n cas2_end
+ b,n lws_exit_noerror
/* Compare second word */
-20: ldw 4(%r26), %r29
+19: ldw 4(%r26), %r29
sub,= %r29, %r23, %r0
- b,n cas2_end
+ b,n lws_exit_noerror
/* Perform the store */
-21: fstdx %fr4, 0(%r26)
+20: fstdx %fr4, 0(%r26)
copy %r0, %r28
#endif
+ b lws_exit_noerror
+ copy %r0, %r28
-cas2_end:
- /* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
- /* Return to userspace, set no error */
- b lws_exit
- copy %r0, %r21
+ /* A fault occurred on load or stbys,e store */
+30: b,n lws_fault
+ ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 30b-linux_gateway_page)
+#ifndef CONFIG_64BIT
+ ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 30b-linux_gateway_page)
+#endif
-22:
- /* Error occurred on load or store */
- /* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
+ ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 30b-linux_gateway_page)
+
+ /* A page fault occurred in critical region */
+31: b,n lws_pagefault
+ ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 31b-linux_gateway_page)
+#ifndef CONFIG_64BIT
+ ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 31b-linux_gateway_page)
+#endif
+
+
+ /***************************************************
+ LWS atomic exchange.
+
+ %r26 - Exchange address
+ %r25 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
+ %r24 - Address of new value
+ %r23 - Address of old value
+ %r28 - Return non-zero on failure
+ %r21 - Kernel error code
+
+ %r21 returns the following error codes:
+ EAGAIN - CAS is busy, ldcw failed, try again.
+ EFAULT - Read or write failed.
+
+ If EAGAIN is returned, %r28 indicates the busy reason:
+ r28 == 1 - CAS is busy. lock contended.
+ r28 == 2 - CAS is busy. ldcw failed.
+ r28 == 3 - CAS is busy. page fault.
+
+ Scratch: r20, r1
+
+ ****************************************************/
+
+lws_atomic_xchg:
+#ifdef CONFIG_64BIT
+ /* Wide mode user process? */
+ bb,<,n %sp, 31, atomic_xchg_begin
+
+ /* Clip the input registers for 32-bit processes. We don't
+ need to clip %r23 as we only use it for word operations */
+ depdi 0, 31, 32, %r26
+ depdi 0, 31, 32, %r25
+ depdi 0, 31, 32, %r24
+ depdi 0, 31, 32, %r23
+#endif
+
+atomic_xchg_begin:
+ /* Check the validity of the size pointer */
+ subi,>>= 3, %r25, %r0
+ b,n lws_exit_nosys
+
+ /* Jump to the functions which will load the old and new values into
+ registers depending on the their size */
+ shlw %r25, 2, %r1
+ blr %r1, %r0
+ nop
+
+ /* Perform exception checks */
+
+ /* 8-bit exchange */
+1: ldb 0(%r24), %r20
+ copy %r23, %r20
+ depi_safe 0, 31, 2, %r20
+ b atomic_xchg_start
+2: stbys,e %r0, 0(%r20)
+ nop
+ nop
+ nop
+
+ /* 16-bit exchange */
+3: ldh 0(%r24), %r20
+ copy %r23, %r20
+ depi_safe 0, 31, 2, %r20
+ b atomic_xchg_start
+4: stbys,e %r0, 0(%r20)
+ nop
+ nop
+ nop
+
+ /* 32-bit exchange */
+5: ldw 0(%r24), %r20
+ b atomic_xchg_start
+6: stbys,e %r0, 0(%r23)
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ /* 64-bit exchange */
+#ifdef CONFIG_64BIT
+7: ldd 0(%r24), %r20
+8: stdby,e %r0, 0(%r23)
+#else
+7: ldw 0(%r24), %r20
+8: ldw 4(%r24), %r20
+ copy %r23, %r20
+ depi_safe 0, 31, 2, %r20
+9: stbys,e %r0, 0(%r20)
+10: stbys,e %r0, 4(%r20)
+#endif
+
+atomic_xchg_start:
+ /* Trigger memory reference interruptions without writing to memory */
+ copy %r26, %r28
+ depi_safe 0, 31, 2, %r28
+11: ldw 0(%r28), %r1
+12: stbys,e %r0, 0(%r28)
+
+ /* Calculate 8-bit hash index from virtual address */
+ extru_safe %r26, 27, 8, %r20
+
+ /* Load start of lock table */
+ ldil L%lws_lock_start, %r28
+ ldo R%lws_lock_start(%r28), %r28
+
+ /* Find lock to use, the hash index is one of 0 to
+ 255, multiplied by 16 (keep it 16-byte aligned)
+ and add to the lock table offset. */
+ shlw %r20, 4, %r20
+ add %r20, %r28, %r20
+
+ rsm PSW_SM_I, %r0 /* Disable interrupts */
+
+ /* Try to acquire the lock */
+ LDCW 0(%sr2,%r20), %r28
+ comclr,<> %r0, %r28, %r0
+ b,n lws_wouldblock
+
+ /* Disable page faults to prevent sleeping in critical region */
+ lws_pagefault_disable %r21,%r28
+
+ /* NOTES:
+ This all works because intr_do_signal
+ and schedule both check the return iasq
+ and see that we are on the kernel page
+ so this process is never scheduled off
+ or is ever sent any signal of any sort,
+ thus it is wholly atomic from userspace's
+ perspective
+ */
+
+ /* Jump to the correct function */
+ blr %r1, %r0
+ /* Set %r28 as non-zero for now */
ldo 1(%r0),%r28
- b lws_exit
- ldo -EFAULT(%r0),%r21 /* set errno */
- nop
- nop
- nop
-
- /* Exception table entries, for the load and store, return EFAULT.
- Each of the entries must be relocated. */
- ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page)
- ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page)
- ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page)
- ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page)
- ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page)
- ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page)
- ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page)
- ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page)
- ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page)
- ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page)
- ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page)
- ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page)
- ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page)
- ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page)
- ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page)
- ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page)
+
+ /* 8-bit exchange */
+14: ldb 0(%r26), %r1
+15: stb %r1, 0(%r23)
+15: ldb 0(%r24), %r1
+17: stb %r1, 0(%r26)
+ b lws_exit_noerror
+ copy %r0, %r28
+ nop
+ nop
+
+ /* 16-bit exchange */
+18: ldh 0(%r26), %r1
+19: sth %r1, 0(%r23)
+20: ldh 0(%r24), %r1
+21: sth %r1, 0(%r26)
+ b lws_exit_noerror
+ copy %r0, %r28
+ nop
+ nop
+
+ /* 32-bit exchange */
+22: ldw 0(%r26), %r1
+23: stw %r1, 0(%r23)
+24: ldw 0(%r24), %r1
+25: stw %r1, 0(%r26)
+ b lws_exit_noerror
+ copy %r0, %r28
+ nop
+ nop
+
+ /* 64-bit exchange */
+#ifdef CONFIG_64BIT
+26: ldd 0(%r26), %r1
+27: std %r1, 0(%r23)
+28: ldd 0(%r24), %r1
+29: std %r1, 0(%r26)
+#else
+26: flddx 0(%r26), %fr4
+27: fstdx %fr4, 0(%r23)
+28: flddx 0(%r24), %fr4
+29: fstdx %fr4, 0(%r26)
+#endif
+ b lws_exit_noerror
+ copy %r0, %r28
+
+ /* A fault occurred on load or stbys,e store */
+30: b,n lws_fault
+ ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 30b-linux_gateway_page)
#ifndef CONFIG_64BIT
- ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page)
- ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 30b-linux_gateway_page)
+#endif
+
+ ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 30b-linux_gateway_page)
+
+ /* A page fault occurred in critical region */
+31: b,n lws_pagefault
+ ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(22b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(23b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(24b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(25b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(26b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(27b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(28b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(29b-linux_gateway_page, 31b-linux_gateway_page)
+
+ /***************************************************
+ LWS atomic store.
+
+ %r26 - Address to store
+ %r25 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
+ %r24 - Address of value to store
+ %r28 - Return non-zero on failure
+ %r21 - Kernel error code
+
+ %r21 returns the following error codes:
+ EAGAIN - CAS is busy, ldcw failed, try again.
+ EFAULT - Read or write failed.
+
+ If EAGAIN is returned, %r28 indicates the busy reason:
+ r28 == 1 - CAS is busy. lock contended.
+ r28 == 2 - CAS is busy. ldcw failed.
+ r28 == 3 - CAS is busy. page fault.
+
+ Scratch: r20, r1
+
+ ****************************************************/
+
+lws_atomic_store:
+#ifdef CONFIG_64BIT
+ /* Wide mode user process? */
+ bb,<,n %sp, 31, atomic_store_begin
+
+ /* Clip the input registers for 32-bit processes. We don't
+ need to clip %r23 as we only use it for word operations */
+ depdi 0, 31, 32, %r26
+ depdi 0, 31, 32, %r25
+ depdi 0, 31, 32, %r24
#endif
+atomic_store_begin:
+ /* Check the validity of the size pointer */
+ subi,>>= 3, %r25, %r0
+ b,n lws_exit_nosys
+
+ shlw %r25, 1, %r1
+ blr %r1, %r0
+ nop
+
+ /* Perform exception checks */
+
+ /* 8-bit store */
+1: ldb 0(%r24), %r20
+ b,n atomic_store_start
+ nop
+ nop
+
+ /* 16-bit store */
+2: ldh 0(%r24), %r20
+ b,n atomic_store_start
+ nop
+ nop
+
+ /* 32-bit store */
+3: ldw 0(%r24), %r20
+ b,n atomic_store_start
+ nop
+ nop
+
+ /* 64-bit store */
+#ifdef CONFIG_64BIT
+4: ldd 0(%r24), %r20
+#else
+4: ldw 0(%r24), %r20
+5: ldw 4(%r24), %r20
+#endif
+
+atomic_store_start:
+ /* Trigger memory reference interruptions without writing to memory */
+ copy %r26, %r28
+ depi_safe 0, 31, 2, %r28
+6: ldw 0(%r28), %r1
+7: stbys,e %r0, 0(%r28)
+
+ /* Calculate 8-bit hash index from virtual address */
+ extru_safe %r26, 27, 8, %r20
+
+ /* Load start of lock table */
+ ldil L%lws_lock_start, %r28
+ ldo R%lws_lock_start(%r28), %r28
+
+ /* Find lock to use, the hash index is one of 0 to
+ 255, multiplied by 16 (keep it 16-byte aligned)
+ and add to the lock table offset. */
+ shlw %r20, 4, %r20
+ add %r20, %r28, %r20
+
+ rsm PSW_SM_I, %r0 /* Disable interrupts */
+
+ /* Try to acquire the lock */
+ LDCW 0(%sr2,%r20), %r28
+ comclr,<> %r0, %r28, %r0
+ b,n lws_wouldblock
+
+ /* Disable page faults to prevent sleeping in critical region */
+ lws_pagefault_disable %r21,%r28
+
+ /* NOTES:
+ This all works because intr_do_signal
+ and schedule both check the return iasq
+ and see that we are on the kernel page
+ so this process is never scheduled off
+ or is ever sent any signal of any sort,
+ thus it is wholly atomic from userspace's
+ perspective
+ */
+
+ /* Jump to the correct function */
+ blr %r1, %r0
+ /* Set %r28 as non-zero for now */
+ ldo 1(%r0),%r28
+
+ /* 8-bit store */
+9: ldb 0(%r24), %r1
+10: stb %r1, 0(%r26)
+ b lws_exit_noerror
+ copy %r0, %r28
+
+ /* 16-bit store */
+11: ldh 0(%r24), %r1
+12: sth %r1, 0(%r26)
+ b lws_exit_noerror
+ copy %r0, %r28
+
+ /* 32-bit store */
+13: ldw 0(%r24), %r1
+14: stw %r1, 0(%r26)
+ b lws_exit_noerror
+ copy %r0, %r28
+
+ /* 64-bit store */
+#ifdef CONFIG_64BIT
+15: ldd 0(%r24), %r1
+16: std %r1, 0(%r26)
+#else
+15: flddx 0(%r24), %fr4
+16: fstdx %fr4, 0(%r26)
+#endif
+ b lws_exit_noerror
+ copy %r0, %r28
+
+ /* A fault occurred on load or stbys,e store */
+30: b,n lws_fault
+ ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page)
+#ifndef CONFIG_64BIT
+ ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page)
+#endif
+
+ ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page)
+
+ /* A page fault occurred in critical region */
+31: b,n lws_pagefault
+ ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page)
+
/* Make sure nothing else is placed on this page */
.align PAGE_SIZE
END(linux_gateway_page)
@@ -899,7 +1291,9 @@ ENTRY(end_linux_gateway_page)
ENTRY(lws_table)
LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic 32bit CAS */
LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic 32bit CAS */
- LWS_ENTRY(compare_and_swap_2) /* 2 - ELF32 Atomic 64bit CAS */
+ LWS_ENTRY(compare_and_swap_2) /* 2 - Atomic 64bit CAS */
+ LWS_ENTRY(atomic_xchg) /* 3 - Atomic Exchange */
+ LWS_ENTRY(atomic_store) /* 4 - Atomic Store */
END(lws_table)
/* End of lws table */
diff --git a/arch/parisc/kernel/toc.c b/arch/parisc/kernel/toc.c
index 18327611cf8f..be9a0bebe61e 100644
--- a/arch/parisc/kernel/toc.c
+++ b/arch/parisc/kernel/toc.c
@@ -9,8 +9,10 @@
#include <asm/pdc.h>
#include <asm/pdc_chassis.h>
+#include <asm/ldcw.h>
-unsigned int __aligned(16) toc_lock = 1;
+static unsigned int __aligned(16) toc_lock = 1;
+DEFINE_PER_CPU_PAGE_ALIGNED(char [16384], toc_stack);
static void toc20_to_pt_regs(struct pt_regs *regs, struct pdc_toc_pim_20 *toc)
{
@@ -63,7 +65,8 @@ void notrace __noreturn __cold toc_intr(struct pt_regs *regs)
struct pdc_toc_pim_20 pim_data20;
struct pdc_toc_pim_11 pim_data11;
- nmi_enter();
+ /* verify we wrote regs to the correct stack */
+ BUG_ON(regs != (struct pt_regs *)&per_cpu(toc_stack, raw_smp_processor_id()));
if (boot_cpu_data.cpu_type >= pcxu) {
if (pdc_pim_toc20(&pim_data20))
@@ -76,14 +79,25 @@ void notrace __noreturn __cold toc_intr(struct pt_regs *regs)
}
#ifdef CONFIG_KGDB
+ nmi_enter();
+
if (atomic_read(&kgdb_active) != -1)
kgdb_nmicallback(raw_smp_processor_id(), regs);
kgdb_handle_exception(9, SIGTRAP, 0, regs);
#endif
+
+ /* serialize output, otherwise all CPUs write backtrace at once */
+ while (__ldcw(&toc_lock) == 0)
+ ; /* wait */
show_regs(regs);
+ toc_lock = 1; /* release lock for next CPU */
+
+ if (raw_smp_processor_id() != 0)
+ while (1) ; /* all but monarch CPU will wait endless. */
/* give other CPUs time to show their backtrace */
mdelay(2000);
+
machine_restart("TOC");
/* should never reach this */
diff --git a/arch/parisc/kernel/toc_asm.S b/arch/parisc/kernel/toc_asm.S
index e94ba8044190..570f5cef526e 100644
--- a/arch/parisc/kernel/toc_asm.S
+++ b/arch/parisc/kernel/toc_asm.S
@@ -5,34 +5,25 @@
.level 1.1
#include <asm/assembly.h>
-#include <asm/psw.h>
#include <linux/threads.h>
#include <linux/linkage.h>
.text
.import toc_intr,code
- .import toc_lock,data
+ .import toc_stack,data
.align 16
ENTRY_CFI(toc_handler)
- /*
- * synchronize CPUs and obtain offset
- * for stack setup.
- */
- load32 PA(toc_lock),%r1
-0: ldcw,co 0(%r1),%r2
- cmpib,= 0,%r2,0b
- nop
- addi 1,%r2,%r4
- stw %r4,0(%r1)
- addi -1,%r2,%r4
-
load32 PA(toc_stack),%sp
- /*
- * deposit CPU number into stack address,
- * so every CPU will have its own stack.
- */
- SHLREG %r4,14,%r4
+
+#ifdef CONFIG_SMP
+ /* get per-cpu toc_stack address. */
+ mfctl %cr30, %r1
+ tophys %r1,%r2 /* task_struct */
+ LDREG TASK_TI_CPU(%r2),%r4 /* cpu */
+ load32 PA(__per_cpu_offset),%r1
+ LDREGX %r4(%r1),%r4
add %r4,%sp,%sp
+#endif
/*
* setup pt_regs on stack and save the
@@ -82,7 +73,3 @@ ENDPROC_CFI(toc_handler)
*/
SYM_DATA(toc_handler_csum, .long 0)
SYM_DATA(toc_handler_size, .long . - toc_handler)
-
- __PAGE_ALIGNED_BSS
- .align 64
-SYM_DATA(toc_stack, .block 16384*NR_CPUS)
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 892b7fc8f3c4..eb41fece1910 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -785,7 +785,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
* unless pagefault_disable() was called before.
*/
- if (fault_space == 0 && !faulthandler_disabled())
+ if (faulthandler_disabled() || fault_space == 0)
{
/* Clean up and return if in exception table. */
if (fixup_exception(regs))
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 4a6221b869fd..147868427b7c 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -148,11 +148,11 @@ int fixup_exception(struct pt_regs *regs)
* Fix up get_user() and put_user().
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
* bit in the relative address of the fixup routine to indicate
- * that %r8 should be loaded with -EFAULT to report a userspace
- * access error.
+ * that gr[ASM_EXCEPTIONTABLE_REG] should be loaded with
+ * -EFAULT to report a userspace access error.
*/
if (fix->fixup & 1) {
- regs->gr[8] = -EFAULT;
+ regs->gr[ASM_EXCEPTIONTABLE_REG] = -EFAULT;
/* zero target register for get_user() */
if (parisc_acctyp(0, regs->iir) == VM_READ) {
@@ -266,14 +266,14 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
unsigned long acc_type;
vm_fault_t fault = 0;
unsigned int flags;
-
- if (faulthandler_disabled())
- goto no_context;
+ char *msg;
tsk = current;
mm = tsk->mm;
- if (!mm)
+ if (!mm) {
+ msg = "Page fault: no context";
goto no_context;
+ }
flags = FAULT_FLAG_DEFAULT;
if (user_mode(regs))
@@ -409,6 +409,7 @@ bad_area:
force_sig_fault(signo, si_code, (void __user *) address);
return;
}
+ msg = "Page fault: bad address";
no_context:
@@ -416,11 +417,13 @@ no_context:
return;
}
- parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
+ parisc_terminate(msg, regs, code, address);
- out_of_memory:
+out_of_memory:
mmap_read_unlock(mm);
- if (!user_mode(regs))
+ if (!user_mode(regs)) {
+ msg = "Page fault: out of memory";
goto no_context;
+ }
pagefault_out_of_memory();
}