summaryrefslogtreecommitdiffstats
path: root/arch/parisc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/Kconfig.debug11
-rw-r--r--arch/parisc/include/asm/assembly.h4
-rw-r--r--arch/parisc/include/asm/cacheflush.h4
-rw-r--r--arch/parisc/include/asm/spinlock.h39
-rw-r--r--arch/parisc/include/asm/spinlock_types.h8
-rw-r--r--arch/parisc/kernel/alternative.c2
-rw-r--r--arch/parisc/kernel/cache.c5
-rw-r--r--arch/parisc/kernel/pci-dma.c18
-rw-r--r--arch/parisc/kernel/process.c11
-rw-r--r--arch/parisc/kernel/traps.c18
11 files changed, 102 insertions, 22 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 466a25525364..967bde65dd0e 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -130,6 +130,10 @@ config PM
config STACKTRACE_SUPPORT
def_bool y
+config LOCKDEP_SUPPORT
+ bool
+ default y
+
config ISA_DMA_API
bool
diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug
index f66554cd5c45..3a059cb5e112 100644
--- a/arch/parisc/Kconfig.debug
+++ b/arch/parisc/Kconfig.debug
@@ -1 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
+#
+config LIGHTWEIGHT_SPINLOCK_CHECK
+ bool "Enable lightweight spinlock checks"
+ depends on SMP && !DEBUG_SPINLOCK
+ default y
+ help
+ Add checks with low performance impact to the spinlock functions
+ to catch memory overwrites at runtime. For more advanced
+ spinlock debugging you should choose the DEBUG_SPINLOCK option
+ which will detect unitialized spinlocks too.
+ If unsure say Y here.
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index 0f0d4a496fef..75677b526b2b 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -90,10 +90,6 @@
#include <asm/asmregs.h>
#include <asm/psw.h>
- sp = 30
- gp = 27
- ipsw = 22
-
/*
* We provide two versions of each macro to convert from physical
* to virtual and vice versa. The "_r1" versions take one argument
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 0bdee6724132..c8b6928cee1e 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -48,6 +48,10 @@ void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
+#define flush_dcache_mmap_lock_irqsave(mapping, flags) \
+ xa_lock_irqsave(&mapping->i_pages, flags)
+#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \
+ xa_unlock_irqrestore(&mapping->i_pages, flags)
#define flush_icache_page(vma,page) do { \
flush_kernel_dcache_page_addr(page_address(page)); \
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index a6e5d66a7656..edfcb9858bcb 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -7,10 +7,26 @@
#include <asm/processor.h>
#include <asm/spinlock_types.h>
+#define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */
+
+static inline void arch_spin_val_check(int lock_val)
+{
+ if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))
+ asm volatile( "andcm,= %0,%1,%%r0\n"
+ ".word %2\n"
+ : : "r" (lock_val), "r" (__ARCH_SPIN_LOCK_UNLOCKED_VAL),
+ "i" (SPINLOCK_BREAK_INSN));
+}
+
static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
- volatile unsigned int *a = __ldcw_align(x);
- return READ_ONCE(*a) == 0;
+ volatile unsigned int *a;
+ int lock_val;
+
+ a = __ldcw_align(x);
+ lock_val = READ_ONCE(*a);
+ arch_spin_val_check(lock_val);
+ return (lock_val == 0);
}
static inline void arch_spin_lock(arch_spinlock_t *x)
@@ -18,9 +34,18 @@ static inline void arch_spin_lock(arch_spinlock_t *x)
volatile unsigned int *a;
a = __ldcw_align(x);
- while (__ldcw(a) == 0)
+ do {
+ int lock_val_old;
+
+ lock_val_old = __ldcw(a);
+ arch_spin_val_check(lock_val_old);
+ if (lock_val_old)
+ return; /* got lock */
+
+ /* wait until we should try to get lock again */
while (*a == 0)
continue;
+ } while (1);
}
static inline void arch_spin_unlock(arch_spinlock_t *x)
@@ -29,15 +54,19 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
a = __ldcw_align(x);
/* Release with ordered store. */
- __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
+ __asm__ __volatile__("stw,ma %0,0(%1)"
+ : : "r"(__ARCH_SPIN_LOCK_UNLOCKED_VAL), "r"(a) : "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *x)
{
volatile unsigned int *a;
+ int lock_val;
a = __ldcw_align(x);
- return __ldcw(a) != 0;
+ lock_val = __ldcw(a);
+ arch_spin_val_check(lock_val);
+ return lock_val != 0;
}
/*
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
index ca39ee350c3f..d65934079ebd 100644
--- a/arch/parisc/include/asm/spinlock_types.h
+++ b/arch/parisc/include/asm/spinlock_types.h
@@ -2,13 +2,17 @@
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H
+#define __ARCH_SPIN_LOCK_UNLOCKED_VAL 0x1a46
+
typedef struct {
#ifdef CONFIG_PA20
volatile unsigned int slock;
-# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
+# define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED_VAL }
#else
volatile unsigned int lock[4];
-# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
+# define __ARCH_SPIN_LOCK_UNLOCKED \
+ { { __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL, \
+ __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL } }
#endif
} arch_spinlock_t;
diff --git a/arch/parisc/kernel/alternative.c b/arch/parisc/kernel/alternative.c
index 66f5672c70bd..25c4d6c3375d 100644
--- a/arch/parisc/kernel/alternative.c
+++ b/arch/parisc/kernel/alternative.c
@@ -25,7 +25,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
{
struct alt_instr *entry;
int index = 0, applied = 0;
- int num_cpus = num_online_cpus();
+ int num_cpus = num_present_cpus();
u16 cond_check;
cond_check = ALT_COND_ALWAYS |
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 1d3b8bc8a623..ca4a302d4365 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -399,6 +399,7 @@ void flush_dcache_page(struct page *page)
unsigned long offset;
unsigned long addr, old_addr = 0;
unsigned long count = 0;
+ unsigned long flags;
pgoff_t pgoff;
if (mapping && !mapping_mapped(mapping)) {
@@ -420,7 +421,7 @@ void flush_dcache_page(struct page *page)
* to flush one address here for them all to become coherent
* on machines that support equivalent aliasing
*/
- flush_dcache_mmap_lock(mapping);
+ flush_dcache_mmap_lock_irqsave(mapping, flags);
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
addr = mpnt->vm_start + offset;
@@ -460,7 +461,7 @@ void flush_dcache_page(struct page *page)
}
WARN_ON(++count == 4096);
}
- flush_dcache_mmap_unlock(mapping);
+ flush_dcache_mmap_unlock_irqrestore(mapping, flags);
}
EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index ba87f791323b..71ed5391f29d 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -446,11 +446,27 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
+ /*
+ * fdc: The data cache line is written back to memory, if and only if
+ * it is dirty, and then invalidated from the data cache.
+ */
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
}
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
- flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
+ unsigned long addr = (unsigned long) phys_to_virt(paddr);
+
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ flush_kernel_dcache_range(addr, size);
+ return;
+ case DMA_FROM_DEVICE:
+ purge_kernel_dcache_range_asm(addr, addr + size);
+ return;
+ default:
+ BUG();
+ }
}
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 97c6f875bd0e..24411ab79c30 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -122,13 +122,18 @@ void machine_power_off(void)
/* It seems we have no way to power the system off via
* software. The user has to press the button himself. */
- printk(KERN_EMERG "System shut down completed.\n"
- "Please power this system off now.");
+ printk("Power off or press RETURN to reboot.\n");
/* prevent soft lockup/stalled CPU messages for endless loop. */
rcu_sysrq_start();
lockup_detector_soft_poweroff();
- for (;;);
+ while (1) {
+ /* reboot if user presses RETURN key */
+ if (pdc_iodc_getc() == 13) {
+ printk("Rebooting...\n");
+ machine_restart(NULL);
+ }
+ }
}
void (*pm_power_off)(void);
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index f9696fbf646c..304eebd1c83e 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -47,6 +47,10 @@
#include <linux/kgdb.h>
#include <linux/kprobes.h>
+#if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)
+#include <asm/spinlock.h>
+#endif
+
#include "../math-emu/math-emu.h" /* for handle_fpe() */
static void parisc_show_stack(struct task_struct *task,
@@ -291,24 +295,30 @@ static void handle_break(struct pt_regs *regs)
}
#ifdef CONFIG_KPROBES
- if (unlikely(iir == PARISC_KPROBES_BREAK_INSN)) {
+ if (unlikely(iir == PARISC_KPROBES_BREAK_INSN && !user_mode(regs))) {
parisc_kprobe_break_handler(regs);
return;
}
- if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2)) {
+ if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2 && !user_mode(regs))) {
parisc_kprobe_ss_handler(regs);
return;
}
#endif
#ifdef CONFIG_KGDB
- if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
- iir == PARISC_KGDB_BREAK_INSN)) {
+ if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
+ iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) {
kgdb_handle_exception(9, SIGTRAP, 0, regs);
return;
}
#endif
+#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
+ if ((iir == SPINLOCK_BREAK_INSN) && !user_mode(regs)) {
+ die_if_kernel("Spinlock was trashed", regs, 1);
+ }
+#endif
+
if (unlikely(iir != GDB_BREAK_INSN))
parisc_printk_ratelimited(0, regs,
KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",