diff options
Diffstat (limited to 'arch/arc/kernel')
-rw-r--r-- | arch/arc/kernel/entry-arcv2.S | 30 | ||||
-rw-r--r-- | arch/arc/kernel/intc-arcv2.c | 41 | ||||
-rw-r--r-- | arch/arc/kernel/intc-compact.c | 3 | ||||
-rw-r--r-- | arch/arc/kernel/mcip.c | 70 | ||||
-rw-r--r-- | arch/arc/kernel/setup.c | 100 | ||||
-rw-r--r-- | arch/arc/kernel/smp.c | 5 | ||||
-rw-r--r-- | arch/arc/kernel/time.c | 8 |
7 files changed, 150 insertions, 107 deletions
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index cbfec79137bf..c1264607bbff 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S @@ -45,11 +45,12 @@ VECTOR reserved ; Reserved slots VECTOR handle_interrupt ; (16) Timer0 VECTOR handle_interrupt ; unused (Timer1) VECTOR handle_interrupt ; unused (WDT) -VECTOR handle_interrupt ; (19) ICI (inter core interrupt) -VECTOR handle_interrupt -VECTOR handle_interrupt -VECTOR handle_interrupt -VECTOR handle_interrupt ; (23) End of fixed IRQs +VECTOR handle_interrupt ; (19) Inter core Interrupt (IPI) +VECTOR handle_interrupt ; (20) perf Interrupt +VECTOR handle_interrupt ; (21) Software Triggered Intr (Self IPI) +VECTOR handle_interrupt ; unused +VECTOR handle_interrupt ; (23) unused +# End of fixed IRQs .rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8 VECTOR handle_interrupt @@ -211,7 +212,11 @@ debug_marker_syscall: ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig ; entry was via Exception in DS which got preempted in kernel). ; -; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling +; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround +; +; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline +; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly + .Lintr_ret_to_delay_slot: debug_marker_ds: @@ -222,18 +227,23 @@ debug_marker_ds: ld r2, [sp, PT_ret] ld r3, [sp, PT_status32] + ; STAT32 for Int return created from scratch + ; (No delay dlot, disable Further intr in trampoline) + bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK st r0, [sp, PT_status32] mov r1, .Lintr_ret_to_delay_slot_2 st r1, [sp, PT_ret] + ; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots st r2, [sp, 0] st r3, [sp, 4] b .Lisr_ret_fast_path .Lintr_ret_to_delay_slot_2: + ; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP sub sp, sp, SZ_PT_REGS st r9, [sp, -4] @@ -243,11 +253,19 @@ debug_marker_ds: ld r9, [sp, 4] sr r9, [erstatus] + ; restore AUX_USER_SP if returning to U mode + bbit0 r9, STATUS_U_BIT, 1f + ld r9, [sp, PT_sp] + sr r9, [AUX_USER_SP] + +1: ld r9, [sp, 8] sr r9, [erbta] ld r9, [sp, -4] add sp, sp, SZ_PT_REGS + + ; return from pure kernel mode to delay slot rtie END(ret_from_exception) diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index 0394f9f61b46..942526322ae7 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c @@ -14,6 +14,8 @@ #include <linux/irqchip.h> #include <asm/irq.h> +static int irq_prio; + /* * Early Hardware specific Interrupt setup * -Called very early (start_kernel -> setup_arch -> setup_processor) @@ -24,6 +26,14 @@ void arc_init_IRQ(void) { unsigned int tmp; + struct irq_build { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:3, firq:1, prio:4, exts:8, irqs:8, ver:8; +#else + unsigned int ver:8, irqs:8, exts:8, prio:4, firq:1, pad:3; +#endif + } irq_bcr; + struct aux_irq_ctrl { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int res3:18, save_idx_regs:1, res2:1, @@ -46,28 +56,25 @@ void arc_init_IRQ(void) WRITE_AUX(AUX_IRQ_CTRL, ictrl); - /* setup status32, don't enable intr yet as kernel doesn't want */ - tmp = read_aux_reg(0xa); - tmp |= ISA_INIT_STATUS_BITS; - tmp &= ~STATUS_IE_MASK; - asm volatile("flag %0 \n"::"r"(tmp)); - /* * ARCv2 core intc provides multiple interrupt priorities (upto 16). * Typical builds though have only two levels (0-high, 1-low) * Linux by default uses lower prio 1 for most irqs, reserving 0 for * NMI style interrupts in future (say perf) - * - * Read the intc BCR to confirm that Linux default priority is avail - * in h/w - * - * Note: - * IRQ_BCR[27..24] contains N-1 (for N priority levels) and prio level - * is 0 based. */ - tmp = (read_aux_reg(ARC_REG_IRQ_BCR) >> 24 ) & 0xF; - if (ARCV2_IRQ_DEF_PRIO > tmp) - panic("Linux default irq prio incorrect\n"); + + READ_BCR(ARC_REG_IRQ_BCR, irq_bcr); + + irq_prio = irq_bcr.prio; /* Encoded as N-1 for N levels */ + pr_info("archs-intc\t: %d priority levels (default %d)%s\n", + irq_prio + 1, irq_prio, + irq_bcr.firq ? " FIRQ (not used)":""); + + /* setup status32, don't enable intr yet as kernel doesn't want */ + tmp = read_aux_reg(0xa); + tmp |= STATUS_AD_MASK | (irq_prio << 1); + tmp &= ~STATUS_IE_MASK; + asm volatile("flag %0 \n"::"r"(tmp)); } static void arcv2_irq_mask(struct irq_data *data) @@ -86,7 +93,7 @@ void arcv2_irq_enable(struct irq_data *data) { /* set default priority */ write_aux_reg(AUX_IRQ_SELECT, data->irq); - write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO); + write_aux_reg(AUX_IRQ_PRIORITY, irq_prio); /* * hw auto enables (linux unmask) all by default diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c index 06bcedf19b62..224d1c3aa9c4 100644 --- a/arch/arc/kernel/intc-compact.c +++ b/arch/arc/kernel/intc-compact.c @@ -81,9 +81,6 @@ static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq, { switch (irq) { case TIMER0_IRQ: -#ifdef CONFIG_SMP - case IPI_IRQ: -#endif irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq); break; default: diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index bd237acdf4f2..c41c364b926c 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -11,9 +11,13 @@ #include <linux/smp.h> #include <linux/irq.h> #include <linux/spinlock.h> +#include <asm/irqflags-arcv2.h> #include <asm/mcip.h> #include <asm/setup.h> +#define IPI_IRQ 19 +#define SOFTIRQ_IRQ 21 + static char smp_cpuinfo_buf[128]; static int idu_detected; @@ -22,6 +26,7 @@ static DEFINE_RAW_SPINLOCK(mcip_lock); static void mcip_setup_per_cpu(int cpu) { smp_ipi_irq_setup(cpu, IPI_IRQ); + smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); } static void mcip_ipi_send(int cpu) @@ -29,46 +34,44 @@ static void mcip_ipi_send(int cpu) unsigned long flags; int ipi_was_pending; + /* ARConnect can only send IPI to others */ + if (unlikely(cpu == raw_smp_processor_id())) { + arc_softirq_trigger(SOFTIRQ_IRQ); + return; + } + + raw_spin_lock_irqsave(&mcip_lock, flags); + /* - * NOTE: We must spin here if the other cpu hasn't yet - * serviced a previous message. This can burn lots - * of time, but we MUST follows this protocol or - * ipi messages can be lost!!! - * Also, we must release the lock in this loop because - * the other side may get to this same loop and not - * be able to ack -- thus causing deadlock. + * If receiver already has a pending interrupt, elide sending this one. + * Linux cross core calling works well with concurrent IPIs + * coalesced into one + * see arch/arc/kernel/smp.c: ipi_send_msg_one() */ + __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu); + ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK); + if (!ipi_was_pending) + __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu); - do { - raw_spin_lock_irqsave(&mcip_lock, flags); - __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu); - ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK); - if (ipi_was_pending == 0) - break; /* break out but keep lock */ - raw_spin_unlock_irqrestore(&mcip_lock, flags); - } while (1); - - __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu); raw_spin_unlock_irqrestore(&mcip_lock, flags); - -#ifdef CONFIG_ARC_IPI_DBG - if (ipi_was_pending) - pr_info("IPI ACK delayed from cpu %d\n", cpu); -#endif } static void mcip_ipi_clear(int irq) { unsigned int cpu, c; unsigned long flags; - unsigned int __maybe_unused copy; + + if (unlikely(irq == SOFTIRQ_IRQ)) { + arc_softirq_clear(irq); + return; + } raw_spin_lock_irqsave(&mcip_lock, flags); /* Who sent the IPI */ __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0); - copy = cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ + cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ /* * In rare case, multiple concurrent IPIs sent to same target can @@ -82,12 +85,6 @@ static void mcip_ipi_clear(int irq) } while (cpu); raw_spin_unlock_irqrestore(&mcip_lock, flags); - -#ifdef CONFIG_ARC_IPI_DBG - if (c != __ffs(copy)) - pr_info("IPIs from %x coalesced to %x\n", - copy, raw_smp_processor_id()); -#endif } static void mcip_probe_n_setup(void) @@ -96,13 +93,13 @@ static void mcip_probe_n_setup(void) #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad3:8, idu:1, llm:1, num_cores:6, - iocoh:1, grtc:1, dbg:1, pad2:1, + iocoh:1, gfrc:1, dbg:1, pad2:1, msg:1, sem:1, ipi:1, pad:1, ver:8; #else unsigned int ver:8, pad:1, ipi:1, sem:1, msg:1, - pad2:1, dbg:1, grtc:1, iocoh:1, + pad2:1, dbg:1, gfrc:1, iocoh:1, num_cores:6, llm:1, idu:1, pad3:8; #endif @@ -111,12 +108,13 @@ static void mcip_probe_n_setup(void) READ_BCR(ARC_REG_MCIP_BCR, mp); sprintf(smp_cpuinfo_buf, - "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n", + "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n", mp.ver, mp.num_cores, IS_AVAIL1(mp.ipi, "IPI "), IS_AVAIL1(mp.idu, "IDU "), + IS_AVAIL1(mp.llm, "LLM "), IS_AVAIL1(mp.dbg, "DEBUG "), - IS_AVAIL1(mp.grtc, "GRTC")); + IS_AVAIL1(mp.gfrc, "GFRC")); idu_detected = mp.idu; @@ -125,8 +123,8 @@ static void mcip_probe_n_setup(void) __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf); } - if (IS_ENABLED(CONFIG_ARC_HAS_GRTC) && !mp.grtc) - panic("kernel trying to use non-existent GRTC\n"); + if (IS_ENABLED(CONFIG_ARC_HAS_GFRC) && !mp.gfrc) + panic("kernel trying to use non-existent GFRC\n"); } struct plat_smp_ops plat_smp_ops = { diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index e1b87444ea9a..cdc821df1809 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -42,9 +42,57 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */ struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; +static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu) +{ + if (is_isa_arcompact()) { + struct bcr_iccm_arcompact iccm; + struct bcr_dccm_arcompact dccm; + + READ_BCR(ARC_REG_ICCM_BUILD, iccm); + if (iccm.ver) { + cpu->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */ + cpu->iccm.base_addr = iccm.base << 16; + } + + READ_BCR(ARC_REG_DCCM_BUILD, dccm); + if (dccm.ver) { + unsigned long base; + cpu->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */ + + base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD); + cpu->dccm.base_addr = base & ~0xF; + } + } else { + struct bcr_iccm_arcv2 iccm; + struct bcr_dccm_arcv2 dccm; + unsigned long region; + + READ_BCR(ARC_REG_ICCM_BUILD, iccm); + if (iccm.ver) { + cpu->iccm.sz = 256 << iccm.sz00; /* 512B to 16M */ + if (iccm.sz00 == 0xF && iccm.sz01 > 0) + cpu->iccm.sz <<= iccm.sz01; + + region = read_aux_reg(ARC_REG_AUX_ICCM); + cpu->iccm.base_addr = region & 0xF0000000; + } + + READ_BCR(ARC_REG_DCCM_BUILD, dccm); + if (dccm.ver) { + cpu->dccm.sz = 256 << dccm.sz0; + if (dccm.sz0 == 0xF && dccm.sz1 > 0) + cpu->dccm.sz <<= dccm.sz1; + + region = read_aux_reg(ARC_REG_AUX_DCCM); + cpu->dccm.base_addr = region & 0xF0000000; + } + } +} + static void read_arc_build_cfg_regs(void) { struct bcr_perip uncached_space; + struct bcr_timer timer; struct bcr_generic bcr; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; unsigned long perip_space; @@ -53,7 +101,11 @@ static void read_arc_build_cfg_regs(void) READ_BCR(AUX_IDENTITY, cpu->core); READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa); - READ_BCR(ARC_REG_TIMERS_BCR, cpu->timers); + READ_BCR(ARC_REG_TIMERS_BCR, timer); + cpu->extn.timer0 = timer.t0; + cpu->extn.timer1 = timer.t1; + cpu->extn.rtc = timer.rtc; + cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); @@ -71,36 +123,11 @@ static void read_arc_build_cfg_regs(void) cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */ cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0; cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */ - - /* Note that we read the CCM BCRs independent of kernel config - * This is to catch the cases where user doesn't know that - * CCMs are present in hardware build - */ - { - struct bcr_iccm iccm; - struct bcr_dccm dccm; - struct bcr_dccm_base dccm_base; - unsigned int bcr_32bit_val; - - bcr_32bit_val = read_aux_reg(ARC_REG_ICCM_BCR); - if (bcr_32bit_val) { - iccm = *((struct bcr_iccm *)&bcr_32bit_val); - cpu->iccm.base_addr = iccm.base << 16; - cpu->iccm.sz = 0x2000 << (iccm.sz - 1); - } - - bcr_32bit_val = read_aux_reg(ARC_REG_DCCM_BCR); - if (bcr_32bit_val) { - dccm = *((struct bcr_dccm *)&bcr_32bit_val); - cpu->dccm.sz = 0x800 << (dccm.sz); - - READ_BCR(ARC_REG_DCCMBASE_BCR, dccm_base); - cpu->dccm.base_addr = dccm_base.addr << 8; - } - } - READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem); + /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */ + read_decode_ccm_bcr(cpu); + read_decode_mmu_bcr(); read_decode_cache_bcr(); @@ -208,9 +235,9 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) (unsigned int)(arc_get_core_freq() / 10000) % 100); n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ", - IS_AVAIL1(cpu->timers.t0, "Timer0 "), - IS_AVAIL1(cpu->timers.t1, "Timer1 "), - IS_AVAIL2(cpu->timers.rtc, "64-bit RTC ", + IS_AVAIL1(cpu->extn.timer0, "Timer0 "), + IS_AVAIL1(cpu->extn.timer1, "Timer1 "), + IS_AVAIL2(cpu->extn.rtc, "Local-64-bit-Ctr ", CONFIG_ARC_HAS_RTC)); n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s", @@ -232,8 +259,6 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt); } - n += scnprintf(buf + n, len - n, "%s", - IS_USED_CFG(CONFIG_ARC_HAS_HW_MPY)); } n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n", @@ -293,13 +318,13 @@ static void arc_chk_core_config(void) struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; int fpu_enabled; - if (!cpu->timers.t0) + if (!cpu->extn.timer0) panic("Timer0 is not present!\n"); - if (!cpu->timers.t1) + if (!cpu->extn.timer1) panic("Timer1 is not present!\n"); - if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->timers.rtc) + if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->extn.rtc) panic("RTC is not present\n"); #ifdef CONFIG_ARC_HAS_DCCM @@ -334,6 +359,7 @@ static void arc_chk_core_config(void) panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n"); if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic && + IS_ENABLED(CONFIG_ARC_HAS_LLSC) && !IS_ENABLED(CONFIG_ARC_STAR_9000923308)) panic("llock/scond livelock workaround missing\n"); } diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index ef6e9e15b82a..4cb3add77c75 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -142,7 +142,7 @@ void start_kernel_secondary(void) local_irq_enable(); preempt_disable(); - cpu_startup_entry(CPUHP_ONLINE); + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); } /* @@ -336,11 +336,8 @@ irqreturn_t do_IPI(int irq, void *dev_id) int rc; rc = __do_IPI(msg); -#ifdef CONFIG_ARC_IPI_DBG - /* IPI received but no valid @msg */ if (rc) pr_info("IPI with bogus msg %ld in %ld\n", msg, copy); -#endif pending &= ~(1U << msg); } while (pending); diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index dfad287f1db1..156d9833ff84 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c @@ -62,7 +62,7 @@ /********** Clock Source Device *********/ -#ifdef CONFIG_ARC_HAS_GRTC +#ifdef CONFIG_ARC_HAS_GFRC static int arc_counter_setup(void) { @@ -83,10 +83,10 @@ static cycle_t arc_counter_read(struct clocksource *cs) local_irq_save(flags); - __mcip_cmd(CMD_GRTC_READ_LO, 0); + __mcip_cmd(CMD_GFRC_READ_LO, 0); stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK); - __mcip_cmd(CMD_GRTC_READ_HI, 0); + __mcip_cmd(CMD_GFRC_READ_HI, 0); stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK); local_irq_restore(flags); @@ -95,7 +95,7 @@ static cycle_t arc_counter_read(struct clocksource *cs) } static struct clocksource arc_counter = { - .name = "ARConnect GRTC", + .name = "ARConnect GFRC", .rating = 400, .read = arc_counter_read, .mask = CLOCKSOURCE_MASK(64), |