diff options
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/cpuhotplug.c | 21 | ||||
-rw-r--r-- | kernel/irq/irqdesc.c | 1 | ||||
-rw-r--r-- | kernel/irq/irqdomain.c | 18 | ||||
-rw-r--r-- | kernel/irq/manage.c | 52 | ||||
-rw-r--r-- | kernel/irq/proc.c | 42 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 1 |
6 files changed, 109 insertions, 26 deletions
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c index 6c7ca2e983a5..02236b13b359 100644 --- a/kernel/irq/cpuhotplug.c +++ b/kernel/irq/cpuhotplug.c @@ -12,6 +12,7 @@ #include <linux/interrupt.h> #include <linux/ratelimit.h> #include <linux/irq.h> +#include <linux/sched/isolation.h> #include "internals.h" @@ -171,6 +172,20 @@ void irq_migrate_all_off_this_cpu(void) } } +static bool hk_should_isolate(struct irq_data *data, unsigned int cpu) +{ + const struct cpumask *hk_mask; + + if (!housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) + return false; + + hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ); + if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask)) + return false; + + return cpumask_test_cpu(cpu, hk_mask); +} + static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu) { struct irq_data *data = irq_desc_get_irq_data(desc); @@ -188,9 +203,11 @@ static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu) /* * If the interrupt can only be directed to a single target * CPU then it is already assigned to a CPU in the affinity - * mask. No point in trying to move it around. + * mask. No point in trying to move it around unless the + * isolation mechanism requests to move it to an upcoming + * housekeeping CPU. */ - if (!irqd_is_single_target(data)) + if (!irqd_is_single_target(data) || hk_should_isolate(data, cpu)) irq_set_affinity_locked(data, affinity, false); } diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 5b8fdd659e54..98a5f10d1900 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -891,6 +891,7 @@ __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, } void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) + __releases(&desc->lock) { raw_spin_unlock_irqrestore(&desc->lock, flags); if (bus) diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index dd822fd8a7d5..7527e5ef6fe5 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -987,6 +987,23 @@ const struct irq_domain_ops irq_domain_simple_ops = { EXPORT_SYMBOL_GPL(irq_domain_simple_ops); /** + * irq_domain_translate_onecell() - Generic translate for direct one cell + * bindings + */ +int irq_domain_translate_onecell(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + if (WARN_ON(fwspec->param_count < 1)) + return -EINVAL; + *out_hwirq = fwspec->param[0]; + *out_type = IRQ_TYPE_NONE; + return 0; +} +EXPORT_SYMBOL_GPL(irq_domain_translate_onecell); + +/** * irq_domain_translate_twocell() - Generic translate for direct two cell * bindings * @@ -1459,6 +1476,7 @@ int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg) if (rv) { /* Restore the original irq_data. */ *root_irq_data = *child_irq_data; + kfree(child_irq_data); goto error; } diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 1753486b440c..3089a60ea8f9 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -18,6 +18,7 @@ #include <linux/sched.h> #include <linux/sched/rt.h> #include <linux/sched/task.h> +#include <linux/sched/isolation.h> #include <uapi/linux/sched/types.h> #include <linux/task_work.h> @@ -217,7 +218,45 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, if (!chip || !chip->irq_set_affinity) return -EINVAL; - ret = chip->irq_set_affinity(data, mask, force); + /* + * If this is a managed interrupt and housekeeping is enabled on + * it check whether the requested affinity mask intersects with + * a housekeeping CPU. If so, then remove the isolated CPUs from + * the mask and just keep the housekeeping CPU(s). This prevents + * the affinity setter from routing the interrupt to an isolated + * CPU to avoid that I/O submitted from a housekeeping CPU causes + * interrupts on an isolated one. + * + * If the masks do not intersect or include online CPU(s) then + * keep the requested mask. The isolated target CPUs are only + * receiving interrupts when the I/O operation was submitted + * directly from them. + * + * If all housekeeping CPUs in the affinity mask are offline, the + * interrupt will be migrated by the CPU hotplug code once a + * housekeeping CPU which belongs to the affinity mask comes + * online. + */ + if (irqd_affinity_is_managed(data) && + housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) { + const struct cpumask *hk_mask, *prog_mask; + + static DEFINE_RAW_SPINLOCK(tmp_mask_lock); + static struct cpumask tmp_mask; + + hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ); + + raw_spin_lock(&tmp_mask_lock); + cpumask_and(&tmp_mask, mask, hk_mask); + if (!cpumask_intersects(&tmp_mask, cpu_online_mask)) + prog_mask = mask; + else + prog_mask = &tmp_mask; + ret = chip->irq_set_affinity(data, prog_mask, force); + raw_spin_unlock(&tmp_mask_lock); + } else { + ret = chip->irq_set_affinity(data, mask, force); + } switch (ret) { case IRQ_SET_MASK_OK: case IRQ_SET_MASK_OK_DONE: @@ -692,6 +731,13 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) * * Wakeup mode lets this IRQ wake the system from sleep * states like "suspend to RAM". + * + * Note: irq enable/disable state is completely orthogonal + * to the enable/disable state of irq wake. An irq can be + * disabled with disable_irq() and still wake the system as + * long as the irq has wake enabled. If this does not hold, + * then the underlying irq chip and the related driver need + * to be investigated. */ int irq_set_irq_wake(unsigned int irq, unsigned int on) { @@ -1500,8 +1546,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) * has. The type flags are unreliable as the * underlying chip implementation can override them. */ - pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", - irq); + pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n", + new->name, irq); ret = -EINVAL; goto out_unlock; } diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index cfc4f088a0e7..9e5783d98033 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -176,20 +176,20 @@ static int irq_affinity_list_proc_open(struct inode *inode, struct file *file) return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode)); } -static const struct file_operations irq_affinity_proc_fops = { - .open = irq_affinity_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = irq_affinity_proc_write, +static const struct proc_ops irq_affinity_proc_ops = { + .proc_open = irq_affinity_proc_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = single_release, + .proc_write = irq_affinity_proc_write, }; -static const struct file_operations irq_affinity_list_proc_fops = { - .open = irq_affinity_list_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = irq_affinity_list_proc_write, +static const struct proc_ops irq_affinity_list_proc_ops = { + .proc_open = irq_affinity_list_proc_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = single_release, + .proc_write = irq_affinity_list_proc_write, }; #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK @@ -246,12 +246,12 @@ static int default_affinity_open(struct inode *inode, struct file *file) return single_open(file, default_affinity_show, PDE_DATA(inode)); } -static const struct file_operations default_affinity_proc_fops = { - .open = default_affinity_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = default_affinity_write, +static const struct proc_ops default_affinity_proc_ops = { + .proc_open = default_affinity_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = single_release, + .proc_write = default_affinity_write, }; static int irq_node_proc_show(struct seq_file *m, void *v) @@ -342,7 +342,7 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) #ifdef CONFIG_SMP /* create /proc/irq/<irq>/smp_affinity */ proc_create_data("smp_affinity", 0644, desc->dir, - &irq_affinity_proc_fops, irqp); + &irq_affinity_proc_ops, irqp); /* create /proc/irq/<irq>/affinity_hint */ proc_create_single_data("affinity_hint", 0444, desc->dir, @@ -350,7 +350,7 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) /* create /proc/irq/<irq>/smp_affinity_list */ proc_create_data("smp_affinity_list", 0644, desc->dir, - &irq_affinity_list_proc_fops, irqp); + &irq_affinity_list_proc_ops, irqp); proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show, irqp); @@ -401,7 +401,7 @@ static void register_default_affinity_proc(void) { #ifdef CONFIG_SMP proc_create("irq/default_smp_affinity", 0644, NULL, - &default_affinity_proc_fops); + &default_affinity_proc_ops); #endif } diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 2ed97a7c9b2a..f865e5f4d382 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -34,6 +34,7 @@ static atomic_t irq_poll_active; * true and let the handler run. */ bool irq_wait_for_poll(struct irq_desc *desc) + __must_hold(&desc->lock) { if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), "irq poll in progress on cpu %d for irq %d\n", |