diff options
Diffstat (limited to 'arch/arc/kernel')
-rw-r--r-- | arch/arc/kernel/entry-arcv2.S | 7 | ||||
-rw-r--r-- | arch/arc/kernel/head.S | 14 | ||||
-rw-r--r-- | arch/arc/kernel/intc-arcv2.c | 48 | ||||
-rw-r--r-- | arch/arc/kernel/intc-compact.c | 5 | ||||
-rw-r--r-- | arch/arc/kernel/mcip.c | 95 | ||||
-rw-r--r-- | arch/arc/kernel/module.c | 4 | ||||
-rw-r--r-- | arch/arc/kernel/smp.c | 25 | ||||
-rw-r--r-- | arch/arc/kernel/unaligned.c | 3 |
8 files changed, 115 insertions, 86 deletions
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index 0b6388a5f0b8..2585632eaa68 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S @@ -14,6 +14,11 @@ #include <asm/arcregs.h> #include <asm/irqflags.h> +; A maximum number of supported interrupts in the core interrupt controller. +; This number is not equal to the maximum interrupt number (256) because +; first 16 lines are reserved for exceptions and are not configurable. +#define NR_CPU_IRQS 240 + .cpu HS #define VECTOR .word @@ -52,7 +57,7 @@ VECTOR handle_interrupt ; unused VECTOR handle_interrupt ; (23) unused # End of fixed IRQs -.rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8 +.rept NR_CPU_IRQS - 8 VECTOR handle_interrupt .endr diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 689dd867fdff..8b90d25a15cc 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -71,14 +71,14 @@ ENTRY(stext) GET_CPU_ID r5 cmp r5, 0 mov.nz r0, r5 -#ifdef CONFIG_ARC_SMP_HALT_ON_RESET - ; Non-Master can proceed as system would be booted sufficiently - jnz first_lines_of_secondary -#else + bz .Lmaster_proceed + ; Non-Masters wait for Master to boot enough and bring them up - jnz arc_platform_smp_wait_to_boot -#endif - ; Master falls thru + ; when they resume, tail-call to entry point + mov blink, @first_lines_of_secondary + j arc_platform_smp_wait_to_boot + +.Lmaster_proceed: #endif ; Clear BSS before updating any globals diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index 994dca7014db..f928795fd07a 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c @@ -14,6 +14,16 @@ #include <linux/irqchip.h> #include <asm/irq.h> +#define NR_EXCEPTIONS 16 + +struct bcr_irq_arcv2 { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:3, firq:1, prio:4, exts:8, irqs:8, ver:8; +#else + unsigned int ver:8, irqs:8, exts:8, prio:4, firq:1, pad:3; +#endif +}; + /* * Early Hardware specific Interrupt setup * -Called very early (start_kernel -> setup_arch -> setup_processor) @@ -22,15 +32,8 @@ */ void arc_init_IRQ(void) { - unsigned int tmp, irq_prio; - - struct irq_build { -#ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int pad:3, firq:1, prio:4, exts:8, irqs:8, ver:8; -#else - unsigned int ver:8, irqs:8, exts:8, prio:4, firq:1, pad:3; -#endif - } irq_bcr; + unsigned int tmp, irq_prio, i; + struct bcr_irq_arcv2 irq_bcr; struct aux_irq_ctrl { #ifdef CONFIG_CPU_BIG_ENDIAN @@ -68,8 +71,18 @@ void arc_init_IRQ(void) irq_prio + 1, ARCV2_IRQ_DEF_PRIO, irq_bcr.firq ? " FIRQ (not used)":""); + /* + * Set a default priority for all available interrupts to prevent + * switching of register banks if Fast IRQ and multiple register banks + * are supported by CPU. + */ + for (i = NR_EXCEPTIONS; i < irq_bcr.irqs + NR_EXCEPTIONS; i++) { + write_aux_reg(AUX_IRQ_SELECT, i); + write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO); + } + /* setup status32, don't enable intr yet as kernel doesn't want */ - tmp = read_aux_reg(0xa); + tmp = read_aux_reg(ARC_REG_STATUS32); tmp |= STATUS_AD_MASK | (ARCV2_IRQ_DEF_PRIO << 1); tmp &= ~STATUS_IE_MASK; asm volatile("kflag %0 \n"::"r"(tmp)); @@ -77,20 +90,20 @@ void arc_init_IRQ(void) static void arcv2_irq_mask(struct irq_data *data) { - write_aux_reg(AUX_IRQ_SELECT, data->irq); + write_aux_reg(AUX_IRQ_SELECT, data->hwirq); write_aux_reg(AUX_IRQ_ENABLE, 0); } static void arcv2_irq_unmask(struct irq_data *data) { - write_aux_reg(AUX_IRQ_SELECT, data->irq); + write_aux_reg(AUX_IRQ_SELECT, data->hwirq); write_aux_reg(AUX_IRQ_ENABLE, 1); } void arcv2_irq_enable(struct irq_data *data) { /* set default priority */ - write_aux_reg(AUX_IRQ_SELECT, data->irq); + write_aux_reg(AUX_IRQ_SELECT, data->hwirq); write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO); /* @@ -115,7 +128,7 @@ static int arcv2_irq_map(struct irq_domain *d, unsigned int irq, * core intc IRQs [16, 23]: * Statically assigned always private-per-core (Timers, WDT, IPI, PCT) */ - if (hw < 24) { + if (hw < FIRST_EXT_IRQ) { /* * A subsequent request_percpu_irq() fails if percpu_devid is * not set. That in turns sets NOAUTOEN, meaning each core needs @@ -140,11 +153,16 @@ static int __init init_onchip_IRQ(struct device_node *intc, struct device_node *parent) { struct irq_domain *root_domain; + struct bcr_irq_arcv2 irq_bcr; + unsigned int nr_cpu_irqs; + + READ_BCR(ARC_REG_IRQ_BCR, irq_bcr); + nr_cpu_irqs = irq_bcr.irqs + NR_EXCEPTIONS; if (parent) panic("DeviceTree incore intc not a root irq controller\n"); - root_domain = irq_domain_add_linear(intc, NR_CPU_IRQS, &arcv2_irq_ops, NULL); + root_domain = irq_domain_add_linear(intc, nr_cpu_irqs, &arcv2_irq_ops, NULL); if (!root_domain) panic("root irq domain not avail\n"); diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c index ce9deb953ca9..7e608c6b0a01 100644 --- a/arch/arc/kernel/intc-compact.c +++ b/arch/arc/kernel/intc-compact.c @@ -14,6 +14,7 @@ #include <linux/irqchip.h> #include <asm/irq.h> +#define NR_CPU_IRQS 32 /* number of irq lines coming in */ #define TIMER0_IRQ 3 /* Fixed by ISA */ /* @@ -57,7 +58,7 @@ static void arc_irq_mask(struct irq_data *data) unsigned int ienb; ienb = read_aux_reg(AUX_IENABLE); - ienb &= ~(1 << data->irq); + ienb &= ~(1 << data->hwirq); write_aux_reg(AUX_IENABLE, ienb); } @@ -66,7 +67,7 @@ static void arc_irq_unmask(struct irq_data *data) unsigned int ienb; ienb = read_aux_reg(AUX_IENABLE); - ienb |= (1 << data->irq); + ienb |= (1 << data->hwirq); write_aux_reg(AUX_IENABLE, ienb); } diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index 560c4afc2af4..f61a52b01625 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -10,6 +10,7 @@ #include <linux/smp.h> #include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> #include <linux/spinlock.h> #include <soc/arc/mcip.h> #include <asm/irqflags-arcv2.h> @@ -92,11 +93,10 @@ static void mcip_probe_n_setup(void) READ_BCR(ARC_REG_MCIP_BCR, mp); sprintf(smp_cpuinfo_buf, - "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n", + "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n", mp.ver, mp.num_cores, IS_AVAIL1(mp.ipi, "IPI "), IS_AVAIL1(mp.idu, "IDU "), - IS_AVAIL1(mp.llm, "LLM "), IS_AVAIL1(mp.dbg, "DEBUG "), IS_AVAIL1(mp.gfrc, "GFRC")); @@ -156,15 +156,20 @@ static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl, __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word); } -static void idu_irq_mask(struct irq_data *data) +static void idu_irq_mask_raw(irq_hw_number_t hwirq) { unsigned long flags; raw_spin_lock_irqsave(&mcip_lock, flags); - __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1); + __mcip_cmd_data(CMD_IDU_SET_MASK, hwirq, 1); raw_spin_unlock_irqrestore(&mcip_lock, flags); } +static void idu_irq_mask(struct irq_data *data) +{ + idu_irq_mask_raw(data->hwirq); +} + static void idu_irq_unmask(struct irq_data *data) { unsigned long flags; @@ -174,7 +179,6 @@ static void idu_irq_unmask(struct irq_data *data) raw_spin_unlock_irqrestore(&mcip_lock, flags); } -#ifdef CONFIG_SMP static int idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, bool force) @@ -204,27 +208,43 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, return IRQ_SET_MASK_OK; } -#endif + +static void idu_irq_enable(struct irq_data *data) +{ + /* + * By default send all common interrupts to all available online CPUs. + * The affinity of common interrupts in IDU must be set manually since + * in some cases the kernel will not call irq_set_affinity() by itself: + * 1. When the kernel is not configured with support of SMP. + * 2. When the kernel is configured with support of SMP but upper + * interrupt controllers does not support setting of the affinity + * and cannot propagate it to IDU. + */ + idu_irq_set_affinity(data, cpu_online_mask, false); + idu_irq_unmask(data); +} static struct irq_chip idu_irq_chip = { .name = "MCIP IDU Intc", .irq_mask = idu_irq_mask, .irq_unmask = idu_irq_unmask, + .irq_enable = idu_irq_enable, #ifdef CONFIG_SMP .irq_set_affinity = idu_irq_set_affinity, #endif }; -static irq_hw_number_t idu_first_hwirq; - static void idu_cascade_isr(struct irq_desc *desc) { struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); + struct irq_chip *core_chip = irq_desc_get_chip(desc); irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); - irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq; + irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ; + chained_irq_enter(core_chip, desc); generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); + chained_irq_exit(core_chip, desc); } static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) @@ -235,45 +255,8 @@ static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t return 0; } -static int idu_irq_xlate(struct irq_domain *d, struct device_node *n, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_type) -{ - irq_hw_number_t hwirq = *out_hwirq = intspec[0]; - int distri = intspec[1]; - unsigned long flags; - - *out_type = IRQ_TYPE_NONE; - - /* XXX: validate distribution scheme again online cpu mask */ - if (distri == 0) { - /* 0 - Round Robin to all cpus, otherwise 1 bit per core */ - raw_spin_lock_irqsave(&mcip_lock, flags); - idu_set_dest(hwirq, BIT(num_online_cpus()) - 1); - idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR); - raw_spin_unlock_irqrestore(&mcip_lock, flags); - } else { - /* - * DEST based distribution for Level Triggered intr can only - * have 1 CPU, so generalize it to always contain 1 cpu - */ - int cpu = ffs(distri); - - if (cpu != fls(distri)) - pr_warn("IDU irq %lx distri mode set to cpu %x\n", - hwirq, cpu); - - raw_spin_lock_irqsave(&mcip_lock, flags); - idu_set_dest(hwirq, cpu); - idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST); - raw_spin_unlock_irqrestore(&mcip_lock, flags); - } - - return 0; -} - static const struct irq_domain_ops idu_irq_ops = { - .xlate = idu_irq_xlate, + .xlate = irq_domain_xlate_onecell, .map = idu_irq_map, }; @@ -288,33 +271,37 @@ static int __init idu_of_init(struct device_node *intc, struct device_node *parent) { struct irq_domain *domain; - /* Read IDU BCR to confirm nr_irqs */ - int nr_irqs = of_irq_count(intc); + int nr_irqs; int i, virq; struct mcip_bcr mp; + struct mcip_idu_bcr idu_bcr; READ_BCR(ARC_REG_MCIP_BCR, mp); if (!mp.idu) panic("IDU not detected, but DeviceTree using it"); - pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs); + READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr); + nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr); + + pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs); domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); /* Parent interrupts (core-intc) are already mapped */ for (i = 0; i < nr_irqs; i++) { + /* Mask all common interrupts by default */ + idu_irq_mask_raw(i); + /* * Return parent uplink IRQs (towards core intc) 24,25,..... * this step has been done before already * however we need it to get the parent virq and set IDU handler * as first level isr */ - virq = irq_of_parse_and_map(intc, i); - if (!i) - idu_first_hwirq = irqd_to_hwirq(irq_get_irq_data(virq)); - + virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ); + BUG_ON(!virq); irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain); } diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c index 42e964db2967..3d99a6091332 100644 --- a/arch/arc/kernel/module.c +++ b/arch/arc/kernel/module.c @@ -32,8 +32,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, #ifdef CONFIG_ARC_DW2_UNWIND mod->arch.unw_sec_idx = 0; mod->arch.unw_info = NULL; - mod->arch.secstr = secstr; #endif + mod->arch.secstr = secstr; return 0; } @@ -113,8 +113,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, } +#ifdef CONFIG_ARC_DW2_UNWIND if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0) module->arch.unw_sec_idx = tgtsec; +#endif return 0; diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 88674d972c9d..2afbafadb6ab 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus) */ static volatile int wake_flag; +#ifdef CONFIG_ISA_ARCOMPACT + +#define __boot_read(f) f +#define __boot_write(f, v) f = v + +#else + +#define __boot_read(f) arc_read_uncached_32(&f) +#define __boot_write(f, v) arc_write_uncached_32(&f, v) + +#endif + static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) { BUG_ON(cpu == 0); - wake_flag = cpu; + + __boot_write(wake_flag, cpu); } void arc_platform_smp_wait_to_boot(int cpu) { - while (wake_flag != cpu) + /* for halt-on-reset, we've waited already */ + if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET)) + return; + + while (__boot_read(wake_flag) != cpu) ; - wake_flag = 0; - __asm__ __volatile__("j @first_lines_of_secondary \n"); + __boot_write(wake_flag, 0); } - const char *arc_platform_smp_cpuinfo(void) { return plat_smp_ops.info ? : ""; diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c index abd961f3e763..5f69c3bd59bb 100644 --- a/arch/arc/kernel/unaligned.c +++ b/arch/arc/kernel/unaligned.c @@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, if (state.fault) goto fault; + /* clear any remanants of delay slot */ if (delay_mode(regs)) { - regs->ret = regs->bta; + regs->ret = regs->bta & ~1U; regs->status32 &= ~STATUS_DE_MASK; } else { regs->ret += state.instr_len; |