diff options
Diffstat (limited to 'drivers/irqchip/irq-sifive-plic.c')
-rw-r--r-- | drivers/irqchip/irq-sifive-plic.c | 116 |
1 files changed, 77 insertions, 39 deletions
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 357e9daf94ae..cf755964f2f8 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -59,62 +59,83 @@ static void __iomem *plic_regs; struct plic_handler { bool present; - int ctxid; + void __iomem *hart_base; + /* + * Protect mask operations on the registers given that we can't + * assume atomic memory operations work on them. + */ + raw_spinlock_t enable_lock; + void __iomem *enable_base; }; static DEFINE_PER_CPU(struct plic_handler, plic_handlers); -static inline void __iomem *plic_hart_offset(int ctxid) -{ - return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART; -} - -static inline u32 __iomem *plic_enable_base(int ctxid) -{ - return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART; -} - -/* - * Protect mask operations on the registers given that we can't assume that - * atomic memory operations work on them. - */ -static DEFINE_RAW_SPINLOCK(plic_toggle_lock); - -static inline void plic_toggle(int ctxid, int hwirq, int enable) +static inline void plic_toggle(struct plic_handler *handler, + int hwirq, int enable) { - u32 __iomem *reg = plic_enable_base(ctxid) + (hwirq / 32); + u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32); u32 hwirq_mask = 1 << (hwirq % 32); - raw_spin_lock(&plic_toggle_lock); + raw_spin_lock(&handler->enable_lock); if (enable) writel(readl(reg) | hwirq_mask, reg); else writel(readl(reg) & ~hwirq_mask, reg); - raw_spin_unlock(&plic_toggle_lock); + raw_spin_unlock(&handler->enable_lock); } -static inline void plic_irq_toggle(struct irq_data *d, int enable) +static inline void plic_irq_toggle(const struct cpumask *mask, + int hwirq, int enable) { int cpu; - writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); - for_each_cpu(cpu, irq_data_get_affinity_mask(d)) { + writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID); + for_each_cpu(cpu, mask) { struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); if (handler->present) - plic_toggle(handler->ctxid, d->hwirq, enable); + plic_toggle(handler, hwirq, enable); } } static void plic_irq_enable(struct irq_data *d) { - plic_irq_toggle(d, 1); + unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d), + cpu_online_mask); + if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) + return; + plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); } static void plic_irq_disable(struct irq_data *d) { - plic_irq_toggle(d, 0); + plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); } +#ifdef CONFIG_SMP +static int plic_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, bool force) +{ + unsigned int cpu; + + if (force) + cpu = cpumask_first(mask_val); + else + cpu = cpumask_any_and(mask_val, cpu_online_mask); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + if (!irqd_irq_disabled(d)) { + plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); + plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); + } + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK_DONE; +} +#endif + static struct irq_chip plic_chip = { .name = "SiFive PLIC", /* @@ -123,6 +144,9 @@ static struct irq_chip plic_chip = { */ .irq_enable = plic_irq_enable, .irq_disable = plic_irq_disable, +#ifdef CONFIG_SMP + .irq_set_affinity = plic_set_affinity, +#endif }; static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, @@ -150,7 +174,7 @@ static struct irq_domain *plic_irqdomain; static void plic_handle_irq(struct pt_regs *regs) { struct plic_handler *handler = this_cpu_ptr(&plic_handlers); - void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM; + void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; irq_hw_number_t hwirq; WARN_ON_ONCE(!handler->present); @@ -186,7 +210,7 @@ static int plic_find_hart_id(struct device_node *node) static int __init plic_init(struct device_node *node, struct device_node *parent) { - int error = 0, nr_handlers, nr_mapped = 0, i; + int error = 0, nr_contexts, nr_handlers = 0, i; u32 nr_irqs; if (plic_regs) { @@ -203,10 +227,10 @@ static int __init plic_init(struct device_node *node, if (WARN_ON(!nr_irqs)) goto out_iounmap; - nr_handlers = of_irq_count(node); - if (WARN_ON(!nr_handlers)) + nr_contexts = of_irq_count(node); + if (WARN_ON(!nr_contexts)) goto out_iounmap; - if (WARN_ON(nr_handlers < num_possible_cpus())) + if (WARN_ON(nr_contexts < num_possible_cpus())) goto out_iounmap; error = -ENOMEM; @@ -215,7 +239,7 @@ static int __init plic_init(struct device_node *node, if (WARN_ON(!plic_irqdomain)) goto out_iounmap; - for (i = 0; i < nr_handlers; i++) { + for (i = 0; i < nr_contexts; i++) { struct of_phandle_args parent; struct plic_handler *handler; irq_hw_number_t hwirq; @@ -237,19 +261,33 @@ static int __init plic_init(struct device_node *node, } cpu = riscv_hartid_to_cpuid(hartid); + if (cpu < 0) { + pr_warn("Invalid cpuid for context %d\n", i); + continue; + } + handler = per_cpu_ptr(&plic_handlers, cpu); + if (handler->present) { + pr_warn("handler already present for context %d.\n", i); + continue; + } + handler->present = true; - handler->ctxid = i; + handler->hart_base = + plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART; + raw_spin_lock_init(&handler->enable_lock); + handler->enable_base = + plic_regs + ENABLE_BASE + i * ENABLE_PER_HART; /* priority must be > threshold to trigger an interrupt */ - writel(0, plic_hart_offset(i) + CONTEXT_THRESHOLD); + writel(0, handler->hart_base + CONTEXT_THRESHOLD); for (hwirq = 1; hwirq <= nr_irqs; hwirq++) - plic_toggle(i, hwirq, 0); - nr_mapped++; + plic_toggle(handler, hwirq, 0); + nr_handlers++; } - pr_info("mapped %d interrupts to %d (out of %d) handlers.\n", - nr_irqs, nr_mapped, nr_handlers); + pr_info("mapped %d interrupts with %d handlers for %d contexts.\n", + nr_irqs, nr_handlers, nr_contexts); set_handle_irq(plic_handle_irq); return 0; |