diff options
Diffstat (limited to 'arch/arc/kernel')
-rw-r--r-- | arch/arc/kernel/entry-arcv2.S | 2 | ||||
-rw-r--r-- | arch/arc/kernel/entry-compact.S | 43 | ||||
-rw-r--r-- | arch/arc/kernel/head.S | 49 | ||||
-rw-r--r-- | arch/arc/kernel/intc-compact.c | 90 | ||||
-rw-r--r-- | arch/arc/kernel/irq.c | 20 | ||||
-rw-r--r-- | arch/arc/kernel/mcip.c | 46 | ||||
-rw-r--r-- | arch/arc/kernel/setup.c | 7 | ||||
-rw-r--r-- | arch/arc/kernel/smp.c | 66 | ||||
-rw-r--r-- | arch/arc/kernel/time.c | 3 | ||||
-rw-r--r-- | arch/arc/kernel/vmlinux.lds.S | 2 |
10 files changed, 127 insertions, 201 deletions
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index 8fa76567e402..445e63a10754 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S @@ -24,7 +24,7 @@ .align 4 # Initial 16 slots are Exception Vectors -VECTOR stext ; Restart Vector (jump to entry point) +VECTOR res_service ; Reset Vector VECTOR mem_service ; Mem exception VECTOR instr_service ; Instrn Error VECTOR EV_MachineCheck ; Fatal Machine check diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S index 15d457b4403a..59f52035b4ea 100644 --- a/arch/arc/kernel/entry-compact.S +++ b/arch/arc/kernel/entry-compact.S @@ -86,7 +86,7 @@ */ ; ********* Critical System Events ********************** -VECTOR res_service ; 0x0, Restart Vector (0x0) +VECTOR res_service ; 0x0, Reset Vector (0x0) VECTOR mem_service ; 0x8, Mem exception (0x1) VECTOR instr_service ; 0x10, Instrn Error (0x2) @@ -155,13 +155,9 @@ int2_saved_reg: ; --------------------------------------------- .section .text, "ax",@progbits -res_service: ; processor restart - flag 0x1 ; not implemented - nop - nop -reserved: ; processor restart - rtie ; jump to processor initializations +reserved: + flag 1 ; Unexpected event, halt ;##################### Interrupt Handling ############################## @@ -175,12 +171,25 @@ ENTRY(handle_interrupt_level2) ;------------------------------------------------------ ; if L2 IRQ interrupted a L1 ISR, disable preemption + ; + ; This is to avoid a potential L1-L2-L1 scenario + ; -L1 IRQ taken + ; -L2 interrupts L1 (before L1 ISR could run) + ; -preemption off IRQ, user task in syscall picked to run + ; -RTIE to userspace + ; Returns from L2 context fine + ; But both L1 and L2 re-enabled, so another L1 can be taken + ; while prev L1 is still unserviced + ; ;------------------------------------------------------ + ; L2 interrupting L1 implies both L2 and L1 active + ; However both A2 and A1 are NOT set in STATUS32, thus + ; need to check STATUS32_L2 to determine if L1 was active + ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal - ; A1 is set in status32_l2 ; bump thread_info->preempt_count (Disable preemption) GET_CURR_THR_INFO_FROM_SP r10 ld r9, [r10, THREAD_INFO_PREEMPT_COUNT] @@ -320,11 +329,10 @@ END(call_do_page_fault) ; Note that we use realtime STATUS32 (not pt_regs->status32) to ; decide that. - ; if Returning from Exception - btst r10, STATUS_AE_BIT - bnz .Lexcep_ret + and.f 0, r10, (STATUS_A1_MASK|STATUS_A2_MASK) + bz .Lexcep_or_pure_K_ret - ; Not Exception so maybe Interrupts (Level 1 or 2) + ; Returning from Interrupts (Level 1 or 2) #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS @@ -365,8 +373,7 @@ END(call_do_page_fault) st r9, [r10, THREAD_INFO_PREEMPT_COUNT] 149: - ;return from level 2 - INTERRUPT_EPILOGUE 2 + INTERRUPT_EPILOGUE 2 ; return from level 2 interrupt debug_marker_l2: rtie @@ -374,15 +381,11 @@ not_level2_interrupt: #endif - bbit0 r10, STATUS_A1_BIT, .Lpure_k_mode_ret - - ;return from level 1 - INTERRUPT_EPILOGUE 1 + INTERRUPT_EPILOGUE 1 ; return from level 1 interrupt debug_marker_l1: rtie -.Lexcep_ret: -.Lpure_k_mode_ret: +.Lexcep_or_pure_K_ret: ;this case is for syscalls or Exceptions or pure kernel mode diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 812f95e6ae69..689dd867fdff 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -50,28 +50,37 @@ .endm .section .init.text, "ax",@progbits - .type stext, @function - .globl stext -stext: - ;------------------------------------------------------------------- - ; Don't clobber r0-r2 yet. It might have bootloader provided info - ;------------------------------------------------------------------- + +;---------------------------------------------------------------- +; Default Reset Handler (jumped into from Reset vector) +; - Don't clobber r0,r1,r2 as they might have u-boot provided args +; - Platforms can override this weak version if needed +;---------------------------------------------------------------- +WEAK(res_service) + j stext +END(res_service) + +;---------------------------------------------------------------- +; Kernel Entry point +;---------------------------------------------------------------- +ENTRY(stext) CPU_EARLY_SETUP #ifdef CONFIG_SMP - ; Ensure Boot (Master) proceeds. Others wait in platform dependent way - ; IDENTITY Reg [ 3 2 1 0 ] - ; (cpu-id) ^^^ => Zero for UP ARC700 - ; => #Core-ID if SMP (Master 0) - ; Note that non-boot CPUs might not land here if halt-on-reset and - ; instead breath life from @first_lines_of_secondary, but we still - ; need to make sure only boot cpu takes this path. GET_CPU_ID r5 cmp r5, 0 - mov.ne r0, r5 - jne arc_platform_smp_wait_to_boot + mov.nz r0, r5 +#ifdef CONFIG_ARC_SMP_HALT_ON_RESET + ; Non-Master can proceed as system would be booted sufficiently + jnz first_lines_of_secondary +#else + ; Non-Masters wait for Master to boot enough and bring them up + jnz arc_platform_smp_wait_to_boot #endif + ; Master falls thru +#endif + ; Clear BSS before updating any globals ; XXX: use ZOL here mov r5, __bss_start @@ -102,18 +111,14 @@ stext: GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output) j start_kernel ; "C" entry point +END(stext) #ifdef CONFIG_SMP ;---------------------------------------------------------------- ; First lines of code run by secondary before jumping to 'C' ;---------------------------------------------------------------- .section .text, "ax",@progbits - .type first_lines_of_secondary, @function - .globl first_lines_of_secondary - -first_lines_of_secondary: - - CPU_EARLY_SETUP +ENTRY(first_lines_of_secondary) ; setup per-cpu idle task as "current" on this CPU ld r0, [@secondary_idle_tsk] @@ -126,5 +131,5 @@ first_lines_of_secondary: GET_TSK_STACK_BASE r0, sp j start_kernel_secondary - +END(first_lines_of_secondary) #endif diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c index 039fac30b5c1..06bcedf19b62 100644 --- a/arch/arc/kernel/intc-compact.c +++ b/arch/arc/kernel/intc-compact.c @@ -79,17 +79,16 @@ static struct irq_chip onchip_intc = { static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { - /* - * XXX: the IPI IRQ needs to be handled like TIMER too. However ARC core - * code doesn't own it (like TIMER0). ISS IDU / ezchip define it - * in platform header which can't be included here as it goes - * against multi-platform image philisophy - */ - if (irq == TIMER0_IRQ) + switch (irq) { + case TIMER0_IRQ: +#ifdef CONFIG_SMP + case IPI_IRQ: +#endif irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq); - else + break; + default: irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq); - + } return 0; } @@ -148,78 +147,15 @@ IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ); void arch_local_irq_enable(void) { - unsigned long flags = arch_local_save_flags(); - /* Allow both L1 and L2 at the onset */ - flags |= (STATUS_E1_MASK | STATUS_E2_MASK); - - /* Called from hard ISR (between irq_enter and irq_exit) */ - if (in_irq()) { - - /* If in L2 ISR, don't re-enable any further IRQs as this can - * cause IRQ priorities to get upside down. e.g. it could allow - * L1 be taken while in L2 hard ISR which is wrong not only in - * theory, it can also cause the dreaded L1-L2-L1 scenario - */ - if (flags & STATUS_A2_MASK) - flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); - - /* Even if in L1 ISR, allowe Higher prio L2 IRQs */ - else if (flags & STATUS_A1_MASK) - flags &= ~(STATUS_E1_MASK); - } - - /* called from soft IRQ, ideally we want to re-enable all levels */ - - else if (in_softirq()) { - - /* However if this is case of L1 interrupted by L2, - * re-enabling both may cause whaco L1-L2-L1 scenario - * because ARC700 allows level 1 to interrupt an active L2 ISR - * Thus we disable both - * However some code, executing in soft ISR wants some IRQs - * to be enabled so we re-enable L2 only - * - * How do we determine L1 intr by L2 - * -A2 is set (means in L2 ISR) - * -E1 is set in this ISR's pt_regs->status32 which is - * saved copy of status32_l2 when l2 ISR happened - */ - struct pt_regs *pt = get_irq_regs(); - - if ((flags & STATUS_A2_MASK) && pt && - (pt->status32 & STATUS_A1_MASK)) { - /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */ - flags &= ~(STATUS_E1_MASK); - } - } + if (flags & STATUS_A2_MASK) + flags |= STATUS_E2_MASK; + else if (flags & STATUS_A1_MASK) + flags |= STATUS_E1_MASK; arch_local_irq_restore(flags); } -#else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */ - -/* - * Simpler version for only 1 level of interrupt - * Here we only Worry about Level 1 Bits - */ -void arch_local_irq_enable(void) -{ - unsigned long flags; - - /* - * ARC IDE Drivers tries to re-enable interrupts from hard-isr - * context which is simply wrong - */ - if (in_irq()) { - WARN_ONCE(1, "IRQ enabled from hard-isr"); - return; - } - - flags = arch_local_save_flags(); - flags |= (STATUS_E1_MASK | STATUS_E2_MASK); - arch_local_irq_restore(flags); -} -#endif EXPORT_SYMBOL(arch_local_irq_enable); +#endif diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c index 2989a7bcf8a8..2ee226546c6a 100644 --- a/arch/arc/kernel/irq.c +++ b/arch/arc/kernel/irq.c @@ -10,6 +10,7 @@ #include <linux/interrupt.h> #include <linux/irqchip.h> #include <asm/mach_desc.h> +#include <asm/smp.h> /* * Late Interrupt system init called from start_kernel for Boot CPU only @@ -19,17 +20,20 @@ */ void __init init_IRQ(void) { - /* Any external intc can be setup here */ - if (machine_desc->init_irq) - machine_desc->init_irq(); - - /* process the entire interrupt tree in one go */ + /* + * process the entire interrupt tree in one go + * Any external intc will be setup provided DT chains them + * properly + */ irqchip_init(); #ifdef CONFIG_SMP - /* Master CPU can initialize it's side of IPI */ - if (machine_desc->init_smp) - machine_desc->init_smp(smp_processor_id()); + /* a SMP H/w block could do IPI IRQ request here */ + if (plat_smp_ops.init_irq_cpu) + plat_smp_ops.init_irq_cpu(smp_processor_id()); + + if (machine_desc->init_cpu_smp) + machine_desc->init_cpu_smp(smp_processor_id()); #endif } diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index 4ffd1855f1bd..74a9b074ac3e 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -12,20 +12,14 @@ #include <linux/irq.h> #include <linux/spinlock.h> #include <asm/mcip.h> +#include <asm/setup.h> static char smp_cpuinfo_buf[128]; static int idu_detected; static DEFINE_RAW_SPINLOCK(mcip_lock); -/* - * Any SMP specific init any CPU does when it comes up. - * Here we setup the CPU to enable Inter-Processor-Interrupts - * Called for each CPU - * -Master : init_IRQ() - * -Other(s) : start_kernel_secondary() - */ -void mcip_init_smp(unsigned int cpu) +static void mcip_setup_per_cpu(int cpu) { smp_ipi_irq_setup(cpu, IPI_IRQ); } @@ -96,34 +90,8 @@ static void mcip_ipi_clear(int irq) #endif } -volatile int wake_flag; - -static void mcip_wakeup_cpu(int cpu, unsigned long pc) -{ - BUG_ON(cpu == 0); - wake_flag = cpu; -} - -void arc_platform_smp_wait_to_boot(int cpu) +static void mcip_probe_n_setup(void) { - while (wake_flag != cpu) - ; - - wake_flag = 0; - __asm__ __volatile__("j @first_lines_of_secondary \n"); -} - -struct plat_smp_ops plat_smp_ops = { - .info = smp_cpuinfo_buf, - .cpu_kick = mcip_wakeup_cpu, - .ipi_send = mcip_ipi_send, - .ipi_clear = mcip_ipi_clear, -}; - -void mcip_init_early_smp(void) -{ -#define IS_AVAIL1(var, str) ((var) ? str : "") - struct mcip_bcr { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad3:8, @@ -161,6 +129,14 @@ void mcip_init_early_smp(void) panic("kernel trying to use non-existent GRTC\n"); } +struct plat_smp_ops plat_smp_ops = { + .info = smp_cpuinfo_buf, + .init_early_smp = mcip_probe_n_setup, + .init_irq_cpu = mcip_setup_per_cpu, + .ipi_send = mcip_ipi_send, + .ipi_clear = mcip_ipi_clear, +}; + /*************************************************************************** * ARCv2 Interrupt Distribution Unit (IDU) * diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index cabde9dc0696..c33e77c0ad3e 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -160,10 +160,6 @@ static const struct cpuinfo_data arc_cpu_tbl[] = { { {0x00, NULL } } }; -#define IS_AVAIL1(v, s) ((v) ? s : "") -#define IS_USED_RUN(v) ((v) ? "" : "(not used) ") -#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg)) -#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg)) static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) { @@ -415,8 +411,9 @@ void __init setup_arch(char **cmdline_p) if (machine_desc->init_early) machine_desc->init_early(); - setup_processor(); smp_init_cpus(); + + setup_processor(); setup_arch_memory(); /* copy flat DT out of .init and then unflatten it */ diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index be13d12420ba..580587805fa3 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -42,8 +42,13 @@ void __init smp_prepare_boot_cpu(void) } /* - * Initialise the CPU possible map early - this describes the CPUs - * which may be present or become present in the system. + * Called from setup_arch() before calling setup_processor() + * + * - Initialise the CPU possible map early - this describes the CPUs + * which may be present or become present in the system. + * - Call early smp init hook. This can initialize a specific multi-core + * IP which is say common to several platforms (hence not part of + * platform specific int_early() hook) */ void __init smp_init_cpus(void) { @@ -51,6 +56,9 @@ void __init smp_init_cpus(void) for (i = 0; i < NR_CPUS; i++) set_cpu_possible(i, true); + + if (plat_smp_ops.init_early_smp) + plat_smp_ops.init_early_smp(); } /* called from init ( ) => process 1 */ @@ -72,35 +80,29 @@ void __init smp_cpus_done(unsigned int max_cpus) } /* - * After power-up, a non Master CPU needs to wait for Master to kick start it - * - * The default implementation halts - * - * This relies on platform specific support allowing Master to directly set - * this CPU's PC (to be @first_lines_of_secondary() and kick start it. - * - * In lack of such h/w assist, platforms can override this function - * - make this function busy-spin on a token, eventually set by Master - * (from arc_platform_smp_wakeup_cpu()) - * - Once token is available, jump to @first_lines_of_secondary - * (using inline asm). - * - * Alert: can NOT use stack here as it has not been determined/setup for CPU. - * If it turns out to be elaborate, it's better to code it in assembly - * + * Default smp boot helper for Run-on-reset case where all cores start off + * together. Non-masters need to wait for Master to start running. + * This is implemented using a flag in memory, which Non-masters spin-wait on. + * Master sets it to cpu-id of core to "ungate" it. */ -void __weak arc_platform_smp_wait_to_boot(int cpu) +static volatile int wake_flag; + +static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) { - /* - * As a hack for debugging - since debugger will single-step over the - * FLAG insn - wrap the halt itself it in a self loop - */ - __asm__ __volatile__( - "1: \n" - " flag 1 \n" - " b 1b \n"); + BUG_ON(cpu == 0); + wake_flag = cpu; +} + +void arc_platform_smp_wait_to_boot(int cpu) +{ + while (wake_flag != cpu) + ; + + wake_flag = 0; + __asm__ __volatile__("j @first_lines_of_secondary \n"); } + const char *arc_platform_smp_cpuinfo(void) { return plat_smp_ops.info ? : ""; @@ -129,8 +131,12 @@ void start_kernel_secondary(void) pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); - if (machine_desc->init_smp) - machine_desc->init_smp(cpu); + /* Some SMP H/w setup - for each cpu */ + if (plat_smp_ops.init_irq_cpu) + plat_smp_ops.init_irq_cpu(cpu); + + if (machine_desc->init_cpu_smp) + machine_desc->init_cpu_smp(cpu); arc_local_timer_setup(); @@ -161,6 +167,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) if (plat_smp_ops.cpu_kick) plat_smp_ops.cpu_kick(cpu, (unsigned long)first_lines_of_secondary); + else + arc_default_smp_cpu_kick(cpu, (unsigned long)NULL); /* wait for 1 sec after kicking the secondary */ wait_till = jiffies + HZ; diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index 4294761a2b3e..dfad287f1db1 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c @@ -285,7 +285,4 @@ void __init time_init(void) /* sets up the periodic event timer */ arc_local_timer_setup(); - - if (machine_desc->init_time) - machine_desc->init_time(); } diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S index dd35bde39f69..894e696bddaa 100644 --- a/arch/arc/kernel/vmlinux.lds.S +++ b/arch/arc/kernel/vmlinux.lds.S @@ -12,7 +12,7 @@ #include <asm/thread_info.h> OUTPUT_ARCH(arc) -ENTRY(_stext) +ENTRY(res_service) #ifdef CONFIG_CPU_BIG_ENDIAN jiffies = jiffies_64 + 4; |