From 212bd846223c718b6577d4df16fd8d05a55ad914 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 8 Nov 2016 17:15:02 -0800 Subject: genirq/affinity: Handle pre/post vectors in irq_calc_affinity_vectors() Only calculate the affinity for the main I/O vectors, and skip the pre or post vectors specified by struct irq_affinity. Also remove the irq_affinity cpumask argument that has never been used. If we ever need it in the future we can pass it through struct irq_affinity. Signed-off-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Acked-by: Jens Axboe Cc: linux-block@vger.kernel.org Cc: linux-pci@vger.kernel.org Link: http://lkml.kernel.org/r/1478654107-7384-3-git-send-email-hch@lst.de Signed-off-by: Thomas Gleixner --- kernel/irq/affinity.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 17f51d63da56..8d9259727cb4 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -131,24 +131,20 @@ out: } /** - * irq_calc_affinity_vectors - Calculate to optimal number of vectors for a given affinity mask - * @affinity: The affinity mask to spread. If NULL cpu_online_mask - * is used - * @maxvec: The maximum number of vectors available + * irq_calc_affinity_vectors - Calculate the optimal number of vectors + * @maxvec: The maximum number of vectors available + * @affd: Description of the affinity requirements */ -int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec) +int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd) { - int cpus, ret; + int resv = affd->pre_vectors + affd->post_vectors; + int vecs = maxvec - resv; + int cpus; /* Stabilize the cpumasks */ get_online_cpus(); - /* If the supplied affinity mask is NULL, use cpu online mask */ - if (!affinity) - affinity = cpu_online_mask; - - cpus = cpumask_weight(affinity); - ret = (cpus < maxvec) ? cpus : maxvec; - + cpus = cpumask_weight(cpu_online_mask); put_online_cpus(); - return ret; + + return min(cpus, vecs) + resv; } -- cgit v1.2.3 From 67c93c218dc5d1b45d547771f1fdb44a381e1faf Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 8 Nov 2016 17:15:03 -0800 Subject: genirq/affinity: Handle pre/post vectors in irq_create_affinity_masks() Only calculate the affinity for the main I/O vectors, and skip the pre or post vectors specified by struct irq_affinity. Also remove the irq_affinity cpumask argument that has never been used. If we ever need it in the future we can pass it through struct irq_affinity. Signed-off-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Acked-by: Bjorn Helgaas Acked-by: Jens Axboe Cc: linux-block@vger.kernel.org Cc: linux-pci@vger.kernel.org Link: http://lkml.kernel.org/r/1478654107-7384-4-git-send-email-hch@lst.de Signed-off-by: Thomas Gleixner --- drivers/pci/msi.c | 6 ++++-- include/linux/interrupt.h | 4 ++-- kernel/irq/affinity.c | 46 +++++++++++++++++++++++++--------------------- 3 files changed, 31 insertions(+), 25 deletions(-) (limited to 'kernel') diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index dad2da7cf80e..f4a108b59336 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -553,12 +553,13 @@ error_attrs: static struct msi_desc * msi_setup_entry(struct pci_dev *dev, int nvec, bool affinity) { + static const struct irq_affinity default_affd; struct cpumask *masks = NULL; struct msi_desc *entry; u16 control; if (affinity) { - masks = irq_create_affinity_masks(dev->irq_affinity, nvec); + masks = irq_create_affinity_masks(nvec, &default_affd); if (!masks) pr_err("Unable to allocate affinity masks, ignoring\n"); } @@ -692,12 +693,13 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, struct msix_entry *entries, int nvec, bool affinity) { + static const struct irq_affinity default_affd; struct cpumask *curmsk, *masks = NULL; struct msi_desc *entry; int ret, i; if (affinity) { - masks = irq_create_affinity_masks(dev->irq_affinity, nvec); + masks = irq_create_affinity_masks(nvec, &default_affd); if (!masks) pr_err("Unable to allocate affinity masks, ignoring\n"); } diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 9081f23bc0ff..53144e78a369 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -290,7 +290,7 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); -struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec); +struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd); #else /* CONFIG_SMP */ @@ -325,7 +325,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) } static inline struct cpumask * -irq_create_affinity_masks(const struct cpumask *affinity, int nvec) +irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) { return NULL; } diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 8d9259727cb4..17360bd9619b 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -51,16 +51,16 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk) /** * irq_create_affinity_masks - Create affinity masks for multiqueue spreading - * @affinity: The affinity mask to spread. If NULL cpu_online_mask - * is used - * @nvecs: The number of vectors + * @nvecs: The total number of vectors + * @affd: Description of the affinity requirements * * Returns the masks pointer or NULL if allocation failed. */ -struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, - int nvec) +struct cpumask * +irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) { - int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec = 0; + int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec; + int affv = nvecs - affd->pre_vectors - affd->post_vectors; nodemask_t nodemsk = NODE_MASK_NONE; struct cpumask *masks; cpumask_var_t nmsk; @@ -68,46 +68,46 @@ struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) return NULL; - masks = kzalloc(nvec * sizeof(*masks), GFP_KERNEL); + masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); if (!masks) goto out; + /* Fill out vectors at the beginning that don't need affinity */ + for (curvec = 0; curvec < affd->pre_vectors; curvec++) + cpumask_copy(masks + curvec, cpu_possible_mask); + /* Stabilize the cpumasks */ get_online_cpus(); - /* If the supplied affinity mask is NULL, use cpu online mask */ - if (!affinity) - affinity = cpu_online_mask; - - nodes = get_nodes_in_cpumask(affinity, &nodemsk); + nodes = get_nodes_in_cpumask(cpu_online_mask, &nodemsk); /* * If the number of nodes in the mask is less than or equal the * number of vectors we just spread the vectors across the nodes. */ - if (nvec <= nodes) { + if (affv <= nodes) { for_each_node_mask(n, nodemsk) { cpumask_copy(masks + curvec, cpumask_of_node(n)); - if (++curvec == nvec) + if (++curvec == affv) break; } - goto outonl; + goto done; } /* Spread the vectors per node */ - vecs_per_node = nvec / nodes; + vecs_per_node = affv / nodes; /* Account for rounding errors */ - extra_vecs = nvec - (nodes * vecs_per_node); + extra_vecs = affv - (nodes * vecs_per_node); for_each_node_mask(n, nodemsk) { int ncpus, v, vecs_to_assign = vecs_per_node; /* Get the cpus on this node which are in the mask */ - cpumask_and(nmsk, affinity, cpumask_of_node(n)); + cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n)); /* Calculate the number of cpus per vector */ ncpus = cpumask_weight(nmsk); - for (v = 0; curvec < nvec && v < vecs_to_assign; curvec++, v++) { + for (v = 0; curvec < affv && v < vecs_to_assign; curvec++, v++) { cpus_per_vec = ncpus / vecs_to_assign; /* Account for extra vectors to compensate rounding errors */ @@ -119,12 +119,16 @@ struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); } - if (curvec >= nvec) + if (curvec >= affv) break; } -outonl: +done: put_online_cpus(); + + /* Fill out vectors at the end that don't need affinity */ + for (; curvec < nvecs; curvec++) + cpumask_copy(masks + curvec, cpu_possible_mask); out: free_cpumask_var(nmsk); return masks; -- cgit v1.2.3 From bfe130773862bb3a02cdc4d4c2169f7f0210a46b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 15 Nov 2016 10:12:58 +0100 Subject: genirq/affinity: Take reserved vectors into account when spreading irqs The recent addition of reserved vectors at the beginning or the end of the vector space did not take the reserved vectors at the beginning into account for the various loop exit conditions. As a consequence the last vectors of the spread area are not included into the spread algorithm and are treated like the reserved vectors at the end of the vector space and get the default affinity mask assigned. Sum up the affinity vectors and the reserved vectors at the beginning and use the sum as exit condition. [ tglx: Fixed all conditions instead of only one and massaged changelog ] Signed-off-by: Christoph Hellwig Link: http://lkml.kernel.org/r/1479201178-29604-2-git-send-email-hch@lst.de Signed-off-by: Thomas Gleixner --- kernel/irq/affinity.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 17360bd9619b..49eb38d48816 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -61,6 +61,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) { int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec; int affv = nvecs - affd->pre_vectors - affd->post_vectors; + int last_affv = affv + affd->pre_vectors; nodemask_t nodemsk = NODE_MASK_NONE; struct cpumask *masks; cpumask_var_t nmsk; @@ -87,7 +88,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) if (affv <= nodes) { for_each_node_mask(n, nodemsk) { cpumask_copy(masks + curvec, cpumask_of_node(n)); - if (++curvec == affv) + if (++curvec == last_affv) break; } goto done; @@ -107,7 +108,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) /* Calculate the number of cpus per vector */ ncpus = cpumask_weight(nmsk); - for (v = 0; curvec < affv && v < vecs_to_assign; curvec++, v++) { + for (v = 0; curvec < last_affv && v < vecs_to_assign; + curvec++, v++) { cpus_per_vec = ncpus / vecs_to_assign; /* Account for extra vectors to compensate rounding errors */ @@ -119,7 +121,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); } - if (curvec >= affv) + if (curvec >= last_affv) break; } -- cgit v1.2.3 From b6e5d5b947527558afac4aa0cdfa2ac586332e03 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 16 Nov 2016 18:36:44 +0100 Subject: genirq/affinity: Use default affinity mask for reserved vectors The reserved vectors at the beginning and the end of the vector space get cpu_possible_mask assigned as their affinity mask. All other non-auto affine interrupts get the default irq affinity mask assigned. Using cpu_possible_mask breaks that rule. Treat them like any other interrupt and use irq_default_affinity as target mask. Signed-off-by: Thomas Gleixner Cc: Christoph Hellwig --- kernel/irq/affinity.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 49eb38d48816..9be9bda7c1f9 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -75,7 +75,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) /* Fill out vectors at the beginning that don't need affinity */ for (curvec = 0; curvec < affd->pre_vectors; curvec++) - cpumask_copy(masks + curvec, cpu_possible_mask); + cpumask_copy(masks + curvec, irq_default_affinity); /* Stabilize the cpumasks */ get_online_cpus(); @@ -130,7 +130,7 @@ done: /* Fill out vectors at the end that don't need affinity */ for (; curvec < nvecs; curvec++) - cpumask_copy(masks + curvec, cpu_possible_mask); + cpumask_copy(masks + curvec, irq_default_affinity); out: free_cpumask_var(nmsk); return masks; -- cgit v1.2.3 From 4e201566402c878a225d4425df8a4a664c6f251e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 22 Nov 2016 09:21:16 +0000 Subject: genirq/msi: Drop artificial PCI dependency The generic MSI layer doesn't have any PCI ties anymore, and the build hack should have been removed some time ago. Fixes: d9109698be6e ("genirq: Introduce msi_domain_alloc/free_irqs()") Signed-off-by: Marc Zyngier Link: http://lkml.kernel.org/r/1479806476-20801-1-git-send-email-marc.zyngier@arm.com Signed-off-by: Thomas Gleixner --- kernel/irq/msi.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index 8a3e872798f3..ee230063f033 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -14,9 +14,7 @@ #include #include #include - -/* Temparory solution for building, will be removed later */ -#include +#include /** * alloc_msi_entry - Allocate an initialize msi_entry -- cgit v1.2.3