diff options
author | Tony Lindgren <tony@atomide.com> | 2017-11-28 17:12:32 +0100 |
---|---|---|
committer | Tony Lindgren <tony@atomide.com> | 2017-11-28 17:12:32 +0100 |
commit | bc686442f8a601bccac1f22506ecdb4b0d62cadd (patch) | |
tree | b224ab4aa2350b233da640f5850f48bc6bfeb2d0 /arch/openrisc/kernel | |
parent | ARM: dts: Add remote-wakeup-connected for omap OHCI (diff) | |
parent | ARM: dts: am437x-cm-t43: Correct the dmas property of spi0 (diff) | |
download | linux-bc686442f8a601bccac1f22506ecdb4b0d62cadd.tar.xz linux-bc686442f8a601bccac1f22506ecdb4b0d62cadd.zip |
Merge branch 'dts-fixes' into omap-for-v4.15/fixes-dt
Diffstat (limited to 'arch/openrisc/kernel')
-rw-r--r-- | arch/openrisc/kernel/Makefile | 5 | ||||
-rw-r--r-- | arch/openrisc/kernel/dma.c | 14 | ||||
-rw-r--r-- | arch/openrisc/kernel/entry.S | 74 | ||||
-rw-r--r-- | arch/openrisc/kernel/head.S | 239 | ||||
-rw-r--r-- | arch/openrisc/kernel/setup.c | 165 | ||||
-rw-r--r-- | arch/openrisc/kernel/smp.c | 259 | ||||
-rw-r--r-- | arch/openrisc/kernel/stacktrace.c | 86 | ||||
-rw-r--r-- | arch/openrisc/kernel/sync-timer.c | 120 | ||||
-rw-r--r-- | arch/openrisc/kernel/time.c | 66 | ||||
-rw-r--r-- | arch/openrisc/kernel/traps.c | 54 | ||||
-rw-r--r-- | arch/openrisc/kernel/unwinder.c | 105 | ||||
-rw-r--r-- | arch/openrisc/kernel/vmlinux.h | 1 |
12 files changed, 1021 insertions, 167 deletions
diff --git a/arch/openrisc/kernel/Makefile b/arch/openrisc/kernel/Makefile index ec6d9d37cefd..2d172e79f58d 100644 --- a/arch/openrisc/kernel/Makefile +++ b/arch/openrisc/kernel/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 # # Makefile for the linux kernel. # @@ -6,8 +7,10 @@ extra-y := head.o vmlinux.lds obj-y := setup.o or32_ksyms.o process.o dma.o \ traps.o time.o irq.o entry.o ptrace.o signal.o \ - sys_call_table.o + sys_call_table.o unwinder.o +obj-$(CONFIG_SMP) += smp.o sync-timer.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_OF) += prom.o diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index b10369b7e31b..a945f00011b4 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -32,6 +32,7 @@ page_set_nocache(pte_t *pte, unsigned long addr, unsigned long next, struct mm_walk *walk) { unsigned long cl; + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; pte_val(*pte) |= _PAGE_CI; @@ -42,7 +43,7 @@ page_set_nocache(pte_t *pte, unsigned long addr, flush_tlb_page(NULL, addr); /* Flush page out of dcache */ - for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo.dcache_block_size) + for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size) mtspr(SPR_DCBFR, cl); return 0; @@ -140,6 +141,7 @@ or1k_map_page(struct device *dev, struct page *page, { unsigned long cl; dma_addr_t addr = page_to_phys(page) + offset; + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; if (attrs & DMA_ATTR_SKIP_CPU_SYNC) return addr; @@ -148,13 +150,13 @@ or1k_map_page(struct device *dev, struct page *page, case DMA_TO_DEVICE: /* Flush the dcache for the requested range */ for (cl = addr; cl < addr + size; - cl += cpuinfo.dcache_block_size) + cl += cpuinfo->dcache_block_size) mtspr(SPR_DCBFR, cl); break; case DMA_FROM_DEVICE: /* Invalidate the dcache for the requested range */ for (cl = addr; cl < addr + size; - cl += cpuinfo.dcache_block_size) + cl += cpuinfo->dcache_block_size) mtspr(SPR_DCBIR, cl); break; default: @@ -213,9 +215,10 @@ or1k_sync_single_for_cpu(struct device *dev, { unsigned long cl; dma_addr_t addr = dma_handle; + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; /* Invalidate the dcache for the requested range */ - for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size) + for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size) mtspr(SPR_DCBIR, cl); } @@ -226,9 +229,10 @@ or1k_sync_single_for_device(struct device *dev, { unsigned long cl; dma_addr_t addr = dma_handle; + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; /* Flush the dcache for the requested range */ - for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size) + for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size) mtspr(SPR_DCBFR, cl); } diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index 1b7160c79646..690d55272ba6 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S @@ -42,6 +42,61 @@ /* =========================================================[ macros ]=== */ +#ifdef CONFIG_TRACE_IRQFLAGS +/* + * Trace irq on/off creating a stack frame. + */ +#define TRACE_IRQS_OP(trace_op) \ + l.sw -8(r1),r2 /* store frame pointer */ ;\ + l.sw -4(r1),r9 /* store return address */ ;\ + l.addi r2,r1,0 /* move sp to fp */ ;\ + l.jal trace_op ;\ + l.addi r1,r1,-8 ;\ + l.ori r1,r2,0 /* restore sp */ ;\ + l.lwz r9,-4(r1) /* restore return address */ ;\ + l.lwz r2,-8(r1) /* restore fp */ ;\ +/* + * Trace irq on/off and save registers we need that would otherwise be + * clobbered. + */ +#define TRACE_IRQS_SAVE(t1,trace_op) \ + l.sw -12(r1),t1 /* save extra reg */ ;\ + l.sw -8(r1),r2 /* store frame pointer */ ;\ + l.sw -4(r1),r9 /* store return address */ ;\ + l.addi r2,r1,0 /* move sp to fp */ ;\ + l.jal trace_op ;\ + l.addi r1,r1,-12 ;\ + l.ori r1,r2,0 /* restore sp */ ;\ + l.lwz r9,-4(r1) /* restore return address */ ;\ + l.lwz r2,-8(r1) /* restore fp */ ;\ + l.lwz t1,-12(r1) /* restore extra reg */ + +#define TRACE_IRQS_OFF TRACE_IRQS_OP(trace_hardirqs_off) +#define TRACE_IRQS_ON TRACE_IRQS_OP(trace_hardirqs_on) +#define TRACE_IRQS_ON_SYSCALL \ + TRACE_IRQS_SAVE(r10,trace_hardirqs_on) ;\ + l.lwz r3,PT_GPR3(r1) ;\ + l.lwz r4,PT_GPR4(r1) ;\ + l.lwz r5,PT_GPR5(r1) ;\ + l.lwz r6,PT_GPR6(r1) ;\ + l.lwz r7,PT_GPR7(r1) ;\ + l.lwz r8,PT_GPR8(r1) ;\ + l.lwz r11,PT_GPR11(r1) +#define TRACE_IRQS_OFF_ENTRY \ + l.lwz r5,PT_SR(r1) ;\ + l.andi r3,r5,(SPR_SR_IEE|SPR_SR_TEE) ;\ + l.sfeq r5,r0 /* skip trace if irqs were already off */;\ + l.bf 1f ;\ + l.nop ;\ + TRACE_IRQS_SAVE(r4,trace_hardirqs_off) ;\ +1: +#else +#define TRACE_IRQS_OFF +#define TRACE_IRQS_ON +#define TRACE_IRQS_OFF_ENTRY +#define TRACE_IRQS_ON_SYSCALL +#endif + /* * We need to disable interrupts at beginning of RESTORE_ALL * since interrupt might come in after we've loaded EPC return address @@ -124,6 +179,7 @@ handler: ;\ /* r30 already save */ ;\ /* l.sw PT_GPR30(r1),r30*/ ;\ l.sw PT_GPR31(r1),r31 ;\ + TRACE_IRQS_OFF_ENTRY ;\ /* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\ l.addi r30,r0,-1 ;\ l.sw PT_ORIG_GPR11(r1),r30 @@ -557,9 +613,6 @@ _string_syscall_return: .align 4 ENTRY(_sys_call_handler) - /* syscalls run with interrupts enabled */ - ENABLE_INTERRUPTS(r29) // enable interrupts, r29 is temp - /* r1, EPCR, ESR a already saved */ l.sw PT_GPR2(r1),r2 /* r3-r8 must be saved because syscall restart relies @@ -597,6 +650,10 @@ ENTRY(_sys_call_handler) /* l.sw PT_GPR30(r1),r30 */ _syscall_check_trace_enter: + /* syscalls run with interrupts enabled */ + TRACE_IRQS_ON_SYSCALL + ENABLE_INTERRUPTS(r29) // enable interrupts, r29 is temp + /* If TIF_SYSCALL_TRACE is set, then we want to do syscall tracing */ l.lwz r30,TI_FLAGS(r10) l.andi r30,r30,_TIF_SYSCALL_TRACE @@ -657,6 +714,7 @@ _syscall_check_trace_leave: _syscall_check_work: /* Here we need to disable interrupts */ DISABLE_INTERRUPTS(r27,r29) + TRACE_IRQS_OFF l.lwz r30,TI_FLAGS(r10) l.andi r30,r30,_TIF_WORK_MASK l.sfne r30,r0 @@ -871,6 +929,7 @@ UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00) _resume_userspace: DISABLE_INTERRUPTS(r3,r4) + TRACE_IRQS_OFF l.lwz r4,TI_FLAGS(r10) l.andi r13,r4,_TIF_WORK_MASK l.sfeqi r13,0 @@ -909,6 +968,15 @@ _work_pending: l.lwz r8,PT_GPR8(r1) _restore_all: +#ifdef CONFIG_TRACE_IRQFLAGS + l.lwz r4,PT_SR(r1) + l.andi r3,r4,(SPR_SR_IEE|SPR_SR_TEE) + l.sfeq r3,r0 /* skip trace if irqs were off */ + l.bf skip_hardirqs_on + l.nop + TRACE_IRQS_ON +skip_hardirqs_on: +#endif RESTORE_ALL /* This returns to userspace code */ diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S index 1e87913576e3..fb02b2a1d6f2 100644 --- a/arch/openrisc/kernel/head.S +++ b/arch/openrisc/kernel/head.S @@ -49,9 +49,31 @@ /* ============================================[ tmp store locations ]=== */ +#define SPR_SHADOW_GPR(x) ((x) + SPR_GPR_BASE + 32) + /* * emergency_print temporary stores */ +#ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS +#define EMERGENCY_PRINT_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(14) +#define EMERGENCY_PRINT_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(14) + +#define EMERGENCY_PRINT_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(15) +#define EMERGENCY_PRINT_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(15) + +#define EMERGENCY_PRINT_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(16) +#define EMERGENCY_PRINT_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(16) + +#define EMERGENCY_PRINT_STORE_GPR7 l.mtspr r0,r7,SPR_SHADOW_GPR(7) +#define EMERGENCY_PRINT_LOAD_GPR7 l.mfspr r7,r0,SPR_SHADOW_GPR(7) + +#define EMERGENCY_PRINT_STORE_GPR8 l.mtspr r0,r8,SPR_SHADOW_GPR(8) +#define EMERGENCY_PRINT_LOAD_GPR8 l.mfspr r8,r0,SPR_SHADOW_GPR(8) + +#define EMERGENCY_PRINT_STORE_GPR9 l.mtspr r0,r9,SPR_SHADOW_GPR(9) +#define EMERGENCY_PRINT_LOAD_GPR9 l.mfspr r9,r0,SPR_SHADOW_GPR(9) + +#else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */ #define EMERGENCY_PRINT_STORE_GPR4 l.sw 0x20(r0),r4 #define EMERGENCY_PRINT_LOAD_GPR4 l.lwz r4,0x20(r0) @@ -70,13 +92,28 @@ #define EMERGENCY_PRINT_STORE_GPR9 l.sw 0x34(r0),r9 #define EMERGENCY_PRINT_LOAD_GPR9 l.lwz r9,0x34(r0) +#endif /* * TLB miss handlers temorary stores */ -#define EXCEPTION_STORE_GPR9 l.sw 0x10(r0),r9 -#define EXCEPTION_LOAD_GPR9 l.lwz r9,0x10(r0) +#ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS +#define EXCEPTION_STORE_GPR2 l.mtspr r0,r2,SPR_SHADOW_GPR(2) +#define EXCEPTION_LOAD_GPR2 l.mfspr r2,r0,SPR_SHADOW_GPR(2) + +#define EXCEPTION_STORE_GPR3 l.mtspr r0,r3,SPR_SHADOW_GPR(3) +#define EXCEPTION_LOAD_GPR3 l.mfspr r3,r0,SPR_SHADOW_GPR(3) + +#define EXCEPTION_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(4) +#define EXCEPTION_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(4) + +#define EXCEPTION_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(5) +#define EXCEPTION_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(5) + +#define EXCEPTION_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(6) +#define EXCEPTION_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(6) +#else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */ #define EXCEPTION_STORE_GPR2 l.sw 0x64(r0),r2 #define EXCEPTION_LOAD_GPR2 l.lwz r2,0x64(r0) @@ -92,35 +129,67 @@ #define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6 #define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0) +#endif /* * EXCEPTION_HANDLE temporary stores */ +#ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS +#define EXCEPTION_T_STORE_GPR30 l.mtspr r0,r30,SPR_SHADOW_GPR(30) +#define EXCEPTION_T_LOAD_GPR30(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(30) + +#define EXCEPTION_T_STORE_GPR10 l.mtspr r0,r10,SPR_SHADOW_GPR(10) +#define EXCEPTION_T_LOAD_GPR10(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(10) + +#define EXCEPTION_T_STORE_SP l.mtspr r0,r1,SPR_SHADOW_GPR(1) +#define EXCEPTION_T_LOAD_SP(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(1) + +#else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */ #define EXCEPTION_T_STORE_GPR30 l.sw 0x78(r0),r30 #define EXCEPTION_T_LOAD_GPR30(reg) l.lwz reg,0x78(r0) #define EXCEPTION_T_STORE_GPR10 l.sw 0x7c(r0),r10 #define EXCEPTION_T_LOAD_GPR10(reg) l.lwz reg,0x7c(r0) -#define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1 +#define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1 #define EXCEPTION_T_LOAD_SP(reg) l.lwz reg,0x80(r0) - -/* - * For UNHANLDED_EXCEPTION - */ - -#define EXCEPTION_T_STORE_GPR31 l.sw 0x84(r0),r31 -#define EXCEPTION_T_LOAD_GPR31(reg) l.lwz reg,0x84(r0) +#endif /* =========================================================[ macros ]=== */ - +#ifdef CONFIG_SMP +#define GET_CURRENT_PGD(reg,t1) \ + LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\ + l.mfspr t1,r0,SPR_COREID ;\ + l.slli t1,t1,2 ;\ + l.add reg,reg,t1 ;\ + tophys (t1,reg) ;\ + l.lwz reg,0(t1) +#else #define GET_CURRENT_PGD(reg,t1) \ LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\ tophys (t1,reg) ;\ l.lwz reg,0(t1) +#endif +/* Load r10 from current_thread_info_set - clobbers r1 and r30 */ +#ifdef CONFIG_SMP +#define GET_CURRENT_THREAD_INFO \ + LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\ + tophys (r30,r1) ;\ + l.mfspr r10,r0,SPR_COREID ;\ + l.slli r10,r10,2 ;\ + l.add r30,r30,r10 ;\ + /* r10: current_thread_info */ ;\ + l.lwz r10,0(r30) +#else +#define GET_CURRENT_THREAD_INFO \ + LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\ + tophys (r30,r1) ;\ + /* r10: current_thread_info */ ;\ + l.lwz r10,0(r30) +#endif /* * DSCR: this is a common hook for handling exceptions. it will save @@ -163,10 +232,7 @@ l.bnf 2f /* kernel_mode */ ;\ EXCEPTION_T_STORE_SP /* delay slot */ ;\ 1: /* user_mode: */ ;\ - LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\ - tophys (r30,r1) ;\ - /* r10: current_thread_info */ ;\ - l.lwz r10,0(r30) ;\ + GET_CURRENT_THREAD_INFO ;\ tophys (r30,r10) ;\ l.lwz r1,(TI_KSP)(r30) ;\ /* fall through */ ;\ @@ -226,7 +292,7 @@ * */ #define UNHANDLED_EXCEPTION(handler) \ - EXCEPTION_T_STORE_GPR31 ;\ + EXCEPTION_T_STORE_GPR30 ;\ EXCEPTION_T_STORE_GPR10 ;\ EXCEPTION_T_STORE_SP ;\ /* temporary store r3, r9 into r1, r10 */ ;\ @@ -255,35 +321,35 @@ /* r1: KSP, r10: current, r31: __pa(KSP) */ ;\ /* r12: temp, syscall indicator, r13 temp */ ;\ l.addi r1,r1,-(INT_FRAME_SIZE) ;\ - /* r1 is KSP, r31 is __pa(KSP) */ ;\ - tophys (r31,r1) ;\ - l.sw PT_GPR12(r31),r12 ;\ + /* r1 is KSP, r30 is __pa(KSP) */ ;\ + tophys (r30,r1) ;\ + l.sw PT_GPR12(r30),r12 ;\ l.mfspr r12,r0,SPR_EPCR_BASE ;\ - l.sw PT_PC(r31),r12 ;\ + l.sw PT_PC(r30),r12 ;\ l.mfspr r12,r0,SPR_ESR_BASE ;\ - l.sw PT_SR(r31),r12 ;\ + l.sw PT_SR(r30),r12 ;\ /* save r31 */ ;\ - EXCEPTION_T_LOAD_GPR31(r12) ;\ - l.sw PT_GPR31(r31),r12 ;\ + EXCEPTION_T_LOAD_GPR30(r12) ;\ + l.sw PT_GPR30(r30),r12 ;\ /* save r10 as was prior to exception */ ;\ EXCEPTION_T_LOAD_GPR10(r12) ;\ - l.sw PT_GPR10(r31),r12 ;\ + l.sw PT_GPR10(r30),r12 ;\ /* save PT_SP as was prior to exception */ ;\ EXCEPTION_T_LOAD_SP(r12) ;\ - l.sw PT_SP(r31),r12 ;\ - l.sw PT_GPR13(r31),r13 ;\ + l.sw PT_SP(r30),r12 ;\ + l.sw PT_GPR13(r30),r13 ;\ /* --> */ ;\ /* save exception r4, set r4 = EA */ ;\ - l.sw PT_GPR4(r31),r4 ;\ + l.sw PT_GPR4(r30),r4 ;\ l.mfspr r4,r0,SPR_EEAR_BASE ;\ /* r12 == 1 if we come from syscall */ ;\ CLEAR_GPR(r12) ;\ /* ----- play a MMU trick ----- */ ;\ - l.ori r31,r0,(EXCEPTION_SR) ;\ - l.mtspr r0,r31,SPR_ESR_BASE ;\ + l.ori r30,r0,(EXCEPTION_SR) ;\ + l.mtspr r0,r30,SPR_ESR_BASE ;\ /* r31: EA address of handler */ ;\ - LOAD_SYMBOL_2_GPR(r31,handler) ;\ - l.mtspr r0,r31,SPR_EPCR_BASE ;\ + LOAD_SYMBOL_2_GPR(r30,handler) ;\ + l.mtspr r0,r30,SPR_EPCR_BASE ;\ l.rfe /* =====================================================[ exceptions] === */ @@ -487,6 +553,12 @@ _start: CLEAR_GPR(r30) CLEAR_GPR(r31) +#ifdef CONFIG_SMP + l.mfspr r26,r0,SPR_COREID + l.sfeq r26,r0 + l.bnf secondary_wait + l.nop +#endif /* * set up initial ksp and current */ @@ -638,6 +710,100 @@ _flush_tlb: l.jr r9 l.nop +#ifdef CONFIG_SMP +secondary_wait: + /* Doze the cpu until we are asked to run */ + /* If we dont have power management skip doze */ + l.mfspr r25,r0,SPR_UPR + l.andi r25,r25,SPR_UPR_PMP + l.sfeq r25,r0 + l.bf secondary_check_release + l.nop + + /* Setup special secondary exception handler */ + LOAD_SYMBOL_2_GPR(r3, _secondary_evbar) + tophys(r25,r3) + l.mtspr r0,r25,SPR_EVBAR + + /* Enable Interrupts */ + l.mfspr r25,r0,SPR_SR + l.ori r25,r25,SPR_SR_IEE + l.mtspr r0,r25,SPR_SR + + /* Unmask interrupts interrupts */ + l.mfspr r25,r0,SPR_PICMR + l.ori r25,r25,0xffff + l.mtspr r0,r25,SPR_PICMR + + /* Doze */ + l.mfspr r25,r0,SPR_PMR + LOAD_SYMBOL_2_GPR(r3, SPR_PMR_DME) + l.or r25,r25,r3 + l.mtspr r0,r25,SPR_PMR + + /* Wakeup - Restore exception handler */ + l.mtspr r0,r0,SPR_EVBAR + +secondary_check_release: + /* + * Check if we actually got the release signal, if not go-back to + * sleep. + */ + l.mfspr r25,r0,SPR_COREID + LOAD_SYMBOL_2_GPR(r3, secondary_release) + tophys(r4, r3) + l.lwz r3,0(r4) + l.sfeq r25,r3 + l.bnf secondary_wait + l.nop + /* fall through to secondary_init */ + +secondary_init: + /* + * set up initial ksp and current + */ + LOAD_SYMBOL_2_GPR(r10, secondary_thread_info) + tophys (r30,r10) + l.lwz r10,0(r30) + l.addi r1,r10,THREAD_SIZE + tophys (r30,r10) + l.sw TI_KSP(r30),r1 + + l.jal _ic_enable + l.nop + + l.jal _dc_enable + l.nop + + l.jal _flush_tlb + l.nop + + /* + * enable dmmu & immu + */ + l.mfspr r30,r0,SPR_SR + l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME) + l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME) + l.or r30,r30,r28 + /* + * This is a bit tricky, we need to switch over from physical addresses + * to virtual addresses on the fly. + * To do that, we first set up ESR with the IME and DME bits set. + * Then EPCR is set to secondary_start and then a l.rfe is issued to + * "jump" to that. + */ + l.mtspr r0,r30,SPR_ESR_BASE + LOAD_SYMBOL_2_GPR(r30, secondary_start) + l.mtspr r0,r30,SPR_EPCR_BASE + l.rfe + +secondary_start: + LOAD_SYMBOL_2_GPR(r30, secondary_start_kernel) + l.jr r30 + l.nop + +#endif + /* ========================================[ cache ]=== */ /* alignment here so we don't change memory offsets with @@ -1533,6 +1699,17 @@ ENTRY(_early_uart_init) l.jr r9 l.nop + .align 0x1000 + .global _secondary_evbar +_secondary_evbar: + + .space 0x800 + /* Just disable interrupts and Return */ + l.ori r3,r0,SPR_SR_SM + l.mtspr r0,r3,SPR_ESR_BASE + l.rfe + + .section .rodata _string_unhandled_exception: .string "\n\rRunarunaround: Unhandled exception 0x\0" diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c index dbf5ee95a0d5..9d28ab14d139 100644 --- a/arch/openrisc/kernel/setup.c +++ b/arch/openrisc/kernel/setup.c @@ -93,7 +93,7 @@ static void __init setup_memory(void) memblock_dump_all(); } -struct cpuinfo cpuinfo; +struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS]; static void print_cpuinfo(void) { @@ -101,12 +101,13 @@ static void print_cpuinfo(void) unsigned long vr = mfspr(SPR_VR); unsigned int version; unsigned int revision; + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; version = (vr & SPR_VR_VER) >> 24; revision = (vr & SPR_VR_REV); printk(KERN_INFO "CPU: OpenRISC-%x (revision %d) @%d MHz\n", - version, revision, cpuinfo.clock_frequency / 1000000); + version, revision, cpuinfo->clock_frequency / 1000000); if (!(upr & SPR_UPR_UP)) { printk(KERN_INFO @@ -117,15 +118,15 @@ static void print_cpuinfo(void) if (upr & SPR_UPR_DCP) printk(KERN_INFO "-- dcache: %4d bytes total, %2d bytes/line, %d way(s)\n", - cpuinfo.dcache_size, cpuinfo.dcache_block_size, - cpuinfo.dcache_ways); + cpuinfo->dcache_size, cpuinfo->dcache_block_size, + cpuinfo->dcache_ways); else printk(KERN_INFO "-- dcache disabled\n"); if (upr & SPR_UPR_ICP) printk(KERN_INFO "-- icache: %4d bytes total, %2d bytes/line, %d way(s)\n", - cpuinfo.icache_size, cpuinfo.icache_block_size, - cpuinfo.icache_ways); + cpuinfo->icache_size, cpuinfo->icache_block_size, + cpuinfo->icache_ways); else printk(KERN_INFO "-- icache disabled\n"); @@ -153,38 +154,58 @@ static void print_cpuinfo(void) printk(KERN_INFO "-- custom unit(s)\n"); } +static struct device_node *setup_find_cpu_node(int cpu) +{ + u32 hwid; + struct device_node *cpun; + struct device_node *cpus = of_find_node_by_path("/cpus"); + + for_each_available_child_of_node(cpus, cpun) { + if (of_property_read_u32(cpun, "reg", &hwid)) + continue; + if (hwid == cpu) + return cpun; + } + + return NULL; +} + void __init setup_cpuinfo(void) { struct device_node *cpu; unsigned long iccfgr, dccfgr; unsigned long cache_set_size; + int cpu_id = smp_processor_id(); + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu_id]; - cpu = of_find_compatible_node(NULL, NULL, "opencores,or1200-rtlsvn481"); + cpu = setup_find_cpu_node(cpu_id); if (!cpu) - panic("No compatible CPU found in device tree...\n"); + panic("Couldn't find CPU%d in device tree...\n", cpu_id); iccfgr = mfspr(SPR_ICCFGR); - cpuinfo.icache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW); + cpuinfo->icache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW); cache_set_size = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3); - cpuinfo.icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7); - cpuinfo.icache_size = - cache_set_size * cpuinfo.icache_ways * cpuinfo.icache_block_size; + cpuinfo->icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7); + cpuinfo->icache_size = + cache_set_size * cpuinfo->icache_ways * cpuinfo->icache_block_size; dccfgr = mfspr(SPR_DCCFGR); - cpuinfo.dcache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW); + cpuinfo->dcache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW); cache_set_size = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3); - cpuinfo.dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7); - cpuinfo.dcache_size = - cache_set_size * cpuinfo.dcache_ways * cpuinfo.dcache_block_size; + cpuinfo->dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7); + cpuinfo->dcache_size = + cache_set_size * cpuinfo->dcache_ways * cpuinfo->dcache_block_size; if (of_property_read_u32(cpu, "clock-frequency", - &cpuinfo.clock_frequency)) { + &cpuinfo->clock_frequency)) { printk(KERN_WARNING "Device tree missing CPU 'clock-frequency' parameter." "Assuming frequency 25MHZ" "This is probably not what you want."); } + cpuinfo->coreid = mfspr(SPR_COREID); + of_node_put(cpu); print_cpuinfo(); @@ -251,8 +272,8 @@ void __init detect_unit_config(unsigned long upr, unsigned long mask, void calibrate_delay(void) { const int *val; - struct device_node *cpu = NULL; - cpu = of_find_compatible_node(NULL, NULL, "opencores,or1200-rtlsvn481"); + struct device_node *cpu = setup_find_cpu_node(smp_processor_id()); + val = of_get_property(cpu, "clock-frequency", NULL); if (!val) panic("no cpu 'clock-frequency' parameter in device tree"); @@ -268,6 +289,10 @@ void __init setup_arch(char **cmdline_p) setup_cpuinfo(); +#ifdef CONFIG_SMP + smp_init_cpus(); +#endif + /* process 1's initial memory region is the kernel code/data */ init_mm.start_code = (unsigned long)_stext; init_mm.end_code = (unsigned long)_etext; @@ -302,54 +327,78 @@ void __init setup_arch(char **cmdline_p) static int show_cpuinfo(struct seq_file *m, void *v) { - unsigned long vr; - int version, revision; + unsigned int vr, cpucfgr; + unsigned int avr; + unsigned int version; + struct cpuinfo_or1k *cpuinfo = v; vr = mfspr(SPR_VR); - version = (vr & SPR_VR_VER) >> 24; - revision = vr & SPR_VR_REV; - - seq_printf(m, - "cpu\t\t: OpenRISC-%x\n" - "revision\t: %d\n" - "frequency\t: %ld\n" - "dcache size\t: %d bytes\n" - "dcache block size\t: %d bytes\n" - "dcache ways\t: %d\n" - "icache size\t: %d bytes\n" - "icache block size\t: %d bytes\n" - "icache ways\t: %d\n" - "immu\t\t: %d entries, %lu ways\n" - "dmmu\t\t: %d entries, %lu ways\n" - "bogomips\t: %lu.%02lu\n", - version, - revision, - loops_per_jiffy * HZ, - cpuinfo.dcache_size, - cpuinfo.dcache_block_size, - cpuinfo.dcache_ways, - cpuinfo.icache_size, - cpuinfo.icache_block_size, - cpuinfo.icache_ways, - 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2), - 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW), - 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2), - 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW), - (loops_per_jiffy * HZ) / 500000, - ((loops_per_jiffy * HZ) / 5000) % 100); + cpucfgr = mfspr(SPR_CPUCFGR); + +#ifdef CONFIG_SMP + seq_printf(m, "processor\t\t: %d\n", cpuinfo->coreid); +#endif + if (vr & SPR_VR_UVRP) { + vr = mfspr(SPR_VR2); + version = vr & SPR_VR2_VER; + avr = mfspr(SPR_AVR); + seq_printf(m, "cpu architecture\t: " + "OpenRISC 1000 (%d.%d-rev%d)\n", + (avr >> 24) & 0xff, + (avr >> 16) & 0xff, + (avr >> 8) & 0xff); + seq_printf(m, "cpu implementation id\t: 0x%x\n", + (vr & SPR_VR2_CPUID) >> 24); + seq_printf(m, "cpu version\t\t: 0x%x\n", version); + } else { + version = (vr & SPR_VR_VER) >> 24; + seq_printf(m, "cpu\t\t\t: OpenRISC-%x\n", version); + seq_printf(m, "revision\t\t: %d\n", vr & SPR_VR_REV); + } + seq_printf(m, "frequency\t\t: %ld\n", loops_per_jiffy * HZ); + seq_printf(m, "dcache size\t\t: %d bytes\n", cpuinfo->dcache_size); + seq_printf(m, "dcache block size\t: %d bytes\n", + cpuinfo->dcache_block_size); + seq_printf(m, "dcache ways\t\t: %d\n", cpuinfo->dcache_ways); + seq_printf(m, "icache size\t\t: %d bytes\n", cpuinfo->icache_size); + seq_printf(m, "icache block size\t: %d bytes\n", + cpuinfo->icache_block_size); + seq_printf(m, "icache ways\t\t: %d\n", cpuinfo->icache_ways); + seq_printf(m, "immu\t\t\t: %d entries, %lu ways\n", + 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2), + 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW)); + seq_printf(m, "dmmu\t\t\t: %d entries, %lu ways\n", + 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2), + 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW)); + seq_printf(m, "bogomips\t\t: %lu.%02lu\n", + (loops_per_jiffy * HZ) / 500000, + ((loops_per_jiffy * HZ) / 5000) % 100); + + seq_puts(m, "features\t\t: "); + seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OB32S ? "orbis32" : ""); + seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OB64S ? "orbis64" : ""); + seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OF32S ? "orfpx32" : ""); + seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OF64S ? "orfpx64" : ""); + seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OV64S ? "orvdx64" : ""); + seq_puts(m, "\n"); + + seq_puts(m, "\n"); + return 0; } -static void *c_start(struct seq_file *m, loff_t * pos) +static void *c_start(struct seq_file *m, loff_t *pos) { - /* We only have one CPU... */ - return *pos < 1 ? (void *)1 : NULL; + *pos = cpumask_next(*pos - 1, cpu_online_mask); + if ((*pos) < nr_cpu_ids) + return &cpuinfo_or1k[*pos]; + return NULL; } -static void *c_next(struct seq_file *m, void *v, loff_t * pos) +static void *c_next(struct seq_file *m, void *v, loff_t *pos) { - ++*pos; - return NULL; + (*pos)++; + return c_start(m, pos); } static void c_stop(struct seq_file *m, void *v) diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c new file mode 100644 index 000000000000..7d518ee8bddc --- /dev/null +++ b/arch/openrisc/kernel/smp.c @@ -0,0 +1,259 @@ +/* + * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> + * Copyright (C) 2017 Stafford Horne <shorne@gmail.com> + * + * Based on arm64 and arc implementations + * Copyright (C) 2013 ARM Ltd. + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/smp.h> +#include <linux/cpu.h> +#include <linux/sched.h> +#include <linux/irq.h> +#include <asm/cpuinfo.h> +#include <asm/mmu_context.h> +#include <asm/tlbflush.h> +#include <asm/cacheflush.h> +#include <asm/time.h> + +static void (*smp_cross_call)(const struct cpumask *, unsigned int); + +unsigned long secondary_release = -1; +struct thread_info *secondary_thread_info; + +enum ipi_msg_type { + IPI_WAKEUP, + IPI_RESCHEDULE, + IPI_CALL_FUNC, + IPI_CALL_FUNC_SINGLE, +}; + +static DEFINE_SPINLOCK(boot_lock); + +static void boot_secondary(unsigned int cpu, struct task_struct *idle) +{ + /* + * set synchronisation state between this boot processor + * and the secondary one + */ + spin_lock(&boot_lock); + + secondary_release = cpu; + smp_cross_call(cpumask_of(cpu), IPI_WAKEUP); + + /* + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ + spin_unlock(&boot_lock); +} + +void __init smp_prepare_boot_cpu(void) +{ +} + +void __init smp_init_cpus(void) +{ + int i; + + for (i = 0; i < NR_CPUS; i++) + set_cpu_possible(i, true); +} + +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + int i; + + /* + * Initialise the present map, which describes the set of CPUs + * actually populated at the present time. + */ + for (i = 0; i < max_cpus; i++) + set_cpu_present(i, true); +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ +} + +static DECLARE_COMPLETION(cpu_running); + +int __cpu_up(unsigned int cpu, struct task_struct *idle) +{ + if (smp_cross_call == NULL) { + pr_warn("CPU%u: failed to start, IPI controller missing", + cpu); + return -EIO; + } + + secondary_thread_info = task_thread_info(idle); + current_pgd[cpu] = init_mm.pgd; + + boot_secondary(cpu, idle); + if (!wait_for_completion_timeout(&cpu_running, + msecs_to_jiffies(1000))) { + pr_crit("CPU%u: failed to start\n", cpu); + return -EIO; + } + synchronise_count_master(cpu); + + return 0; +} + +asmlinkage __init void secondary_start_kernel(void) +{ + struct mm_struct *mm = &init_mm; + unsigned int cpu = smp_processor_id(); + /* + * All kernel threads share the same mm context; grab a + * reference and switch to it. + */ + atomic_inc(&mm->mm_count); + current->active_mm = mm; + cpumask_set_cpu(cpu, mm_cpumask(mm)); + + pr_info("CPU%u: Booted secondary processor\n", cpu); + + setup_cpuinfo(); + openrisc_clockevent_init(); + + notify_cpu_starting(cpu); + + /* + * OK, now it's safe to let the boot CPU continue + */ + complete(&cpu_running); + + synchronise_count_slave(cpu); + set_cpu_online(cpu, true); + + local_irq_enable(); + + preempt_disable(); + /* + * OK, it's off to the idle thread for us + */ + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} + +void handle_IPI(unsigned int ipi_msg) +{ + unsigned int cpu = smp_processor_id(); + + switch (ipi_msg) { + case IPI_WAKEUP: + break; + + case IPI_RESCHEDULE: + scheduler_ipi(); + break; + + case IPI_CALL_FUNC: + generic_smp_call_function_interrupt(); + break; + + case IPI_CALL_FUNC_SINGLE: + generic_smp_call_function_single_interrupt(); + break; + + default: + WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg); + break; + } +} + +void smp_send_reschedule(int cpu) +{ + smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); +} + +static void stop_this_cpu(void *dummy) +{ + /* Remove this CPU */ + set_cpu_online(smp_processor_id(), false); + + local_irq_disable(); + /* CPU Doze */ + if (mfspr(SPR_UPR) & SPR_UPR_PMP) + mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME); + /* If that didn't work, infinite loop */ + while (1) + ; +} + +void smp_send_stop(void) +{ + smp_call_function(stop_this_cpu, NULL, 0); +} + +/* not supported, yet */ +int setup_profiling_timer(unsigned int multiplier) +{ + return -EINVAL; +} + +void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) +{ + smp_cross_call = fn; +} + +void arch_send_call_function_single_ipi(int cpu) +{ + smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); +} + +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + smp_cross_call(mask, IPI_CALL_FUNC); +} + +/* TLB flush operations - Performed on each CPU*/ +static inline void ipi_flush_tlb_all(void *ignored) +{ + local_flush_tlb_all(); +} + +void flush_tlb_all(void) +{ + on_each_cpu(ipi_flush_tlb_all, NULL, 1); +} + +/* + * FIXME: implement proper functionality instead of flush_tlb_all. + * *But*, as things currently stands, the local_tlb_flush_* functions will + * all boil down to local_tlb_flush_all anyway. + */ +void flush_tlb_mm(struct mm_struct *mm) +{ + on_each_cpu(ipi_flush_tlb_all, NULL, 1); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +{ + on_each_cpu(ipi_flush_tlb_all, NULL, 1); +} + +void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + on_each_cpu(ipi_flush_tlb_all, NULL, 1); +} + +/* Instruction cache invalidate - performed on each cpu */ +static void ipi_icache_page_inv(void *arg) +{ + struct page *page = arg; + + local_icache_page_inv(page); +} + +void smp_icache_page_inv(struct page *page) +{ + on_each_cpu(ipi_icache_page_inv, page, 1); +} +EXPORT_SYMBOL(smp_icache_page_inv); diff --git a/arch/openrisc/kernel/stacktrace.c b/arch/openrisc/kernel/stacktrace.c new file mode 100644 index 000000000000..43f140a28bc7 --- /dev/null +++ b/arch/openrisc/kernel/stacktrace.c @@ -0,0 +1,86 @@ +/* + * Stack trace utility for OpenRISC + * + * Copyright (C) 2017 Stafford Horne <shorne@gmail.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + * + * Losely based on work from sh and powerpc. + */ + +#include <linux/export.h> +#include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/stacktrace.h> + +#include <asm/processor.h> +#include <asm/unwinder.h> + +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +static void +save_stack_address(void *data, unsigned long addr, int reliable) +{ + struct stack_trace *trace = data; + + if (!reliable) + return; + + if (trace->skip > 0) { + trace->skip--; + return; + } + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = addr; +} + +void save_stack_trace(struct stack_trace *trace) +{ + unwind_stack(trace, (unsigned long *) &trace, save_stack_address); +} +EXPORT_SYMBOL_GPL(save_stack_trace); + +static void +save_stack_address_nosched(void *data, unsigned long addr, int reliable) +{ + struct stack_trace *trace = (struct stack_trace *)data; + + if (!reliable) + return; + + if (in_sched_functions(addr)) + return; + + if (trace->skip > 0) { + trace->skip--; + return; + } + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = addr; +} + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + unsigned long *sp = NULL; + + if (tsk == current) + sp = (unsigned long *) &sp; + else + sp = (unsigned long *) KSTK_ESP(tsk); + + unwind_stack(trace, sp, save_stack_address_nosched); +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +void +save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) +{ + unwind_stack(trace, (unsigned long *) regs->sp, + save_stack_address_nosched); +} +EXPORT_SYMBOL_GPL(save_stack_trace_regs); diff --git a/arch/openrisc/kernel/sync-timer.c b/arch/openrisc/kernel/sync-timer.c new file mode 100644 index 000000000000..ed8d835caca1 --- /dev/null +++ b/arch/openrisc/kernel/sync-timer.c @@ -0,0 +1,120 @@ +/* + * OR1K timer synchronisation + * + * Based on work from MIPS implementation. + * + * All CPUs will have their count registers synchronised to the CPU0 next time + * value. This can cause a small timewarp for CPU0. All other CPU's should + * not have done anything significant (but they may have had interrupts + * enabled briefly - prom_smp_finish() should not be responsible for enabling + * interrupts...) + */ + +#include <linux/kernel.h> +#include <linux/irqflags.h> +#include <linux/cpumask.h> + +#include <asm/time.h> +#include <asm/timex.h> +#include <linux/atomic.h> +#include <asm/barrier.h> + +#include <asm/spr.h> + +static unsigned int initcount; +static atomic_t count_count_start = ATOMIC_INIT(0); +static atomic_t count_count_stop = ATOMIC_INIT(0); + +#define COUNTON 100 +#define NR_LOOPS 3 + +void synchronise_count_master(int cpu) +{ + int i; + unsigned long flags; + + pr_info("Synchronize counters for CPU %u: ", cpu); + + local_irq_save(flags); + + /* + * We loop a few times to get a primed instruction cache, + * then the last pass is more or less synchronised and + * the master and slaves each set their cycle counters to a known + * value all at once. This reduces the chance of having random offsets + * between the processors, and guarantees that the maximum + * delay between the cycle counters is never bigger than + * the latency of information-passing (cachelines) between + * two CPUs. + */ + + for (i = 0; i < NR_LOOPS; i++) { + /* slaves loop on '!= 2' */ + while (atomic_read(&count_count_start) != 1) + mb(); + atomic_set(&count_count_stop, 0); + smp_wmb(); + + /* Let the slave writes its count register */ + atomic_inc(&count_count_start); + + /* Count will be initialised to current timer */ + if (i == 1) + initcount = get_cycles(); + + /* + * Everyone initialises count in the last loop: + */ + if (i == NR_LOOPS-1) + openrisc_timer_set(initcount); + + /* + * Wait for slave to leave the synchronization point: + */ + while (atomic_read(&count_count_stop) != 1) + mb(); + atomic_set(&count_count_start, 0); + smp_wmb(); + atomic_inc(&count_count_stop); + } + /* Arrange for an interrupt in a short while */ + openrisc_timer_set_next(COUNTON); + + local_irq_restore(flags); + + /* + * i386 code reported the skew here, but the + * count registers were almost certainly out of sync + * so no point in alarming people + */ + pr_cont("done.\n"); +} + +void synchronise_count_slave(int cpu) +{ + int i; + + /* + * Not every cpu is online at the time this gets called, + * so we first wait for the master to say everyone is ready + */ + + for (i = 0; i < NR_LOOPS; i++) { + atomic_inc(&count_count_start); + while (atomic_read(&count_count_start) != 2) + mb(); + + /* + * Everyone initialises count in the last loop: + */ + if (i == NR_LOOPS-1) + openrisc_timer_set(initcount); + + atomic_inc(&count_count_stop); + while (atomic_read(&count_count_stop) != 2) + mb(); + } + /* Arrange for an interrupt in a short while */ + openrisc_timer_set_next(COUNTON); +} +#undef NR_LOOPS diff --git a/arch/openrisc/kernel/time.c b/arch/openrisc/kernel/time.c index 687c11d048d7..6baecea27080 100644 --- a/arch/openrisc/kernel/time.c +++ b/arch/openrisc/kernel/time.c @@ -27,8 +27,14 @@ #include <asm/cpuinfo.h> -static int openrisc_timer_set_next_event(unsigned long delta, - struct clock_event_device *dev) +/* Test the timer ticks to count, used in sync routine */ +inline void openrisc_timer_set(unsigned long count) +{ + mtspr(SPR_TTCR, count); +} + +/* Set the timer to trigger in delta cycles */ +inline void openrisc_timer_set_next(unsigned long delta) { u32 c; @@ -44,7 +50,12 @@ static int openrisc_timer_set_next_event(unsigned long delta, * Keep timer in continuous mode always. */ mtspr(SPR_TTMR, SPR_TTMR_CR | SPR_TTMR_IE | c); +} +static int openrisc_timer_set_next_event(unsigned long delta, + struct clock_event_device *dev) +{ + openrisc_timer_set_next(delta); return 0; } @@ -53,13 +64,32 @@ static int openrisc_timer_set_next_event(unsigned long delta, * timers) we cannot enable the PERIODIC feature. The tick timer can run using * one-shot events, so no problem. */ +DEFINE_PER_CPU(struct clock_event_device, clockevent_openrisc_timer); -static struct clock_event_device clockevent_openrisc_timer = { - .name = "openrisc_timer_clockevent", - .features = CLOCK_EVT_FEAT_ONESHOT, - .rating = 300, - .set_next_event = openrisc_timer_set_next_event, -}; +void openrisc_clockevent_init(void) +{ + unsigned int cpu = smp_processor_id(); + struct clock_event_device *evt = + &per_cpu(clockevent_openrisc_timer, cpu); + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu]; + + mtspr(SPR_TTMR, SPR_TTMR_CR); + +#ifdef CONFIG_SMP + evt->broadcast = tick_broadcast; +#endif + evt->name = "openrisc_timer_clockevent", + evt->features = CLOCK_EVT_FEAT_ONESHOT, + evt->rating = 300, + evt->set_next_event = openrisc_timer_set_next_event, + + evt->cpumask = cpumask_of(cpu); + + /* We only have 28 bits */ + clockevents_config_and_register(evt, cpuinfo->clock_frequency, + 100, 0x0fffffff); + +} static inline void timer_ack(void) { @@ -83,7 +113,9 @@ static inline void timer_ack(void) irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); - struct clock_event_device *evt = &clockevent_openrisc_timer; + unsigned int cpu = smp_processor_id(); + struct clock_event_device *evt = + &per_cpu(clockevent_openrisc_timer, cpu); timer_ack(); @@ -99,24 +131,12 @@ irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs) return IRQ_HANDLED; } -static __init void openrisc_clockevent_init(void) -{ - clockevent_openrisc_timer.cpumask = cpumask_of(0); - - /* We only have 28 bits */ - clockevents_config_and_register(&clockevent_openrisc_timer, - cpuinfo.clock_frequency, - 100, 0x0fffffff); - -} - /** * Clocksource: Based on OpenRISC timer/counter * * This sets up the OpenRISC Tick Timer as a clock source. The tick timer * is 32 bits wide and runs at the CPU clock frequency. */ - static u64 openrisc_timer_read(struct clocksource *cs) { return (u64) mfspr(SPR_TTCR); @@ -132,7 +152,9 @@ static struct clocksource openrisc_timer = { static int __init openrisc_timer_init(void) { - if (clocksource_register_hz(&openrisc_timer, cpuinfo.clock_frequency)) + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; + + if (clocksource_register_hz(&openrisc_timer, cpuinfo->clock_frequency)) panic("failed to register clocksource"); /* Enable the incrementer: 'continuous' mode with interrupt disabled */ diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index 803e9e756f77..4085d72fa5ae 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -38,6 +38,7 @@ #include <asm/segment.h> #include <asm/io.h> #include <asm/pgtable.h> +#include <asm/unwinder.h> extern char _etext, _stext; @@ -45,61 +46,20 @@ int kstack_depth_to_print = 0x180; int lwa_flag; unsigned long __user *lwa_addr; -static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) +void print_trace(void *data, unsigned long addr, int reliable) { - return p > (void *)tinfo && p < (void *)tinfo + THREAD_SIZE - 3; -} - -void show_trace(struct task_struct *task, unsigned long *stack) -{ - struct thread_info *context; - unsigned long addr; - - context = (struct thread_info *) - ((unsigned long)stack & (~(THREAD_SIZE - 1))); - - while (valid_stack_ptr(context, stack)) { - addr = *stack++; - if (__kernel_text_address(addr)) { - printk(" [<%08lx>]", addr); - print_symbol(" %s", addr); - printk("\n"); - } - } - printk(" =======================\n"); + pr_emerg("[<%p>] %s%pS\n", (void *) addr, reliable ? "" : "? ", + (void *) addr); } /* displays a short stack trace */ void show_stack(struct task_struct *task, unsigned long *esp) { - unsigned long addr, *stack; - int i; - if (esp == NULL) esp = (unsigned long *)&esp; - stack = esp; - - printk("Stack dump [0x%08lx]:\n", (unsigned long)esp); - for (i = 0; i < kstack_depth_to_print; i++) { - if (kstack_end(stack)) - break; - if (__get_user(addr, stack)) { - /* This message matches "failing address" marked - s390 in ksymoops, so lines containing it will - not be filtered out by ksymoops. */ - printk("Failing address 0x%lx\n", (unsigned long)stack); - break; - } - stack++; - - printk("sp + %02d: 0x%08lx\n", i * 4, addr); - } - printk("\n"); - - show_trace(task, esp); - - return; + pr_emerg("Call trace:\n"); + unwind_stack(NULL, esp, print_trace); } void show_trace_task(struct task_struct *tsk) @@ -115,7 +75,7 @@ void show_registers(struct pt_regs *regs) int in_kernel = 1; unsigned long esp; - esp = (unsigned long)(®s->sp); + esp = (unsigned long)(regs->sp); if (user_mode(regs)) in_kernel = 0; diff --git a/arch/openrisc/kernel/unwinder.c b/arch/openrisc/kernel/unwinder.c new file mode 100644 index 000000000000..8ae15c2c1845 --- /dev/null +++ b/arch/openrisc/kernel/unwinder.c @@ -0,0 +1,105 @@ +/* + * OpenRISC unwinder.c + * + * Reusable arch specific api for unwinding stacks. + * + * Copyright (C) 2017 Stafford Horne <shorne@gmail.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/sched/task_stack.h> +#include <linux/kernel.h> + +#include <asm/unwinder.h> + +#ifdef CONFIG_FRAME_POINTER +struct or1k_frameinfo { + unsigned long *fp; + unsigned long ra; + unsigned long top; +}; + +/* + * Verify a frameinfo structure. The return address should be a valid text + * address. The frame pointer may be null if its the last frame, otherwise + * the frame pointer should point to a location in the stack after the the + * top of the next frame up. + */ +static inline int or1k_frameinfo_valid(struct or1k_frameinfo *frameinfo) +{ + return (frameinfo->fp == NULL || + (!kstack_end(frameinfo->fp) && + frameinfo->fp > &frameinfo->top)) && + __kernel_text_address(frameinfo->ra); +} + +/* + * Create a stack trace doing scanning which is frame pointer aware. We can + * get reliable stack traces by matching the previously found frame + * pointer with the top of the stack address every time we find a valid + * or1k_frameinfo. + * + * Ideally the stack parameter will be passed as FP, but it can not be + * guaranteed. Therefore we scan each address looking for the first sign + * of a return address. + * + * The OpenRISC stack frame looks something like the following. The + * location SP is held in r1 and location FP is held in r2 when frame pointers + * enabled. + * + * SP -> (top of stack) + * - (callee saved registers) + * - (local variables) + * FP-8 -> previous FP \ + * FP-4 -> return address |- or1k_frameinfo + * FP -> (previous top of stack) / + */ +void unwind_stack(void *data, unsigned long *stack, + void (*trace)(void *data, unsigned long addr, int reliable)) +{ + unsigned long *next_fp = NULL; + struct or1k_frameinfo *frameinfo = NULL; + int reliable = 0; + + while (!kstack_end(stack)) { + frameinfo = container_of(stack, + struct or1k_frameinfo, + top); + + if (__kernel_text_address(frameinfo->ra)) { + if (or1k_frameinfo_valid(frameinfo) && + (next_fp == NULL || + next_fp == &frameinfo->top)) { + reliable = 1; + next_fp = frameinfo->fp; + } else + reliable = 0; + + trace(data, frameinfo->ra, reliable); + } + stack++; + } +} + +#else /* CONFIG_FRAME_POINTER */ + +/* + * Create a stack trace by doing a simple scan treating all text addresses + * as return addresses. + */ +void unwind_stack(void *data, unsigned long *stack, + void (*trace)(void *data, unsigned long addr, int reliable)) +{ + unsigned long addr; + + while (!kstack_end(stack)) { + addr = *stack++; + if (__kernel_text_address(addr)) + trace(data, addr, 0); + } +} +#endif /* CONFIG_FRAME_POINTER */ + diff --git a/arch/openrisc/kernel/vmlinux.h b/arch/openrisc/kernel/vmlinux.h index bbcdf21b0b35..bdea46c617c7 100644 --- a/arch/openrisc/kernel/vmlinux.h +++ b/arch/openrisc/kernel/vmlinux.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __OPENRISC_VMLINUX_H_ #define __OPENRISC_VMLINUX_H_ |