summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-06-29 11:24:38 +0200
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-29 19:26:22 +0200
commita53da52fd743fd637637572838c0a7af23a2d038 (patch)
treeb62c205b609356db5bca378c6d04d6d899bfd499
parent[PATCH] genirq: sem2mutex probe_sem -> probing_active (diff)
downloadlinux-a53da52fd743fd637637572838c0a7af23a2d038.tar.xz
linux-a53da52fd743fd637637572838c0a7af23a2d038.zip
[PATCH] genirq: cleanup: merge irq_affinity[] into irq_desc[]
Consolidation: remove the irq_affinity[NR_IRQS] array and move it into the irq_desc[NR_IRQS].affinity field. [akpm@osdl.org: sparc64 build fix] Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/alpha/kernel/irq.c2
-rw-r--r--arch/i386/kernel/irq.c2
-rw-r--r--arch/ia64/kernel/irq.c4
-rw-r--r--arch/parisc/kernel/irq.c8
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/platforms/pseries/xics.c4
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/ppc/syslib/open_pic.c4
-rw-r--r--arch/sparc64/kernel/irq.c2
-rw-r--r--arch/x86_64/kernel/irq.c2
-rw-r--r--include/linux/irq.h7
-rw-r--r--kernel/irq/handle.c5
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/irq/proc.c4
14 files changed, 26 insertions, 24 deletions
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index ec9d243d42c9..63af36cf7f6e 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -56,7 +56,7 @@ select_smp_affinity(unsigned int irq)
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
last_cpu = cpu;
- irq_affinity[irq] = cpumask_of_cpu(cpu);
+ irq_desc[irq].affinity = cpumask_of_cpu(cpu);
irq_desc[irq].chip->set_affinity(irq, cpumask_of_cpu(cpu));
return 0;
}
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index b942a5918dab..3336cef9605a 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -291,7 +291,7 @@ void fixup_irqs(cpumask_t map)
if (irq == 2)
continue;
- cpus_and(mask, irq_affinity[irq], map);
+ cpus_and(mask, irq_desc[irq].affinity, map);
if (any_online_cpu(mask) == NR_CPUS) {
printk("Breaking affinity for irq %i\n", irq);
mask = map;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 2645153dba8a..c4e1b3b60b48 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -100,7 +100,7 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
cpu_set(cpu_logical_id(hwid), mask);
if (irq < NR_IRQS) {
- irq_affinity[irq] = mask;
+ irq_desc[irq].affinity = mask;
irq_redir[irq] = (char) (redir & 0xff);
}
}
@@ -131,7 +131,7 @@ static void migrate_irqs(void)
if (desc->status == IRQ_PER_CPU)
continue;
- cpus_and(mask, irq_affinity[irq], cpu_online_map);
+ cpus_and(mask, irq_desc[irq].affinity, cpu_online_map);
if (any_online_cpu(mask) == NR_CPUS) {
/*
* Save it for phase 2 processing
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 26e69f73fdb0..ea6a55e1a0c5 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -94,7 +94,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
if (irq == TIMER_IRQ || irq == IPI_IRQ) {
/* Bad linux design decision. The mask has already
* been set; we must reset it */
- irq_affinity[irq] = CPU_MASK_ALL;
+ irq_desc[irq].affinity = CPU_MASK_ALL;
return -EINVAL;
}
@@ -110,7 +110,7 @@ static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest)
if (cpu_check_affinity(irq, &dest))
return;
- irq_affinity[irq] = dest;
+ irq_desc[irq].affinity = dest;
}
#endif
@@ -265,7 +265,7 @@ int txn_alloc_irq(unsigned int bits_wide)
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
{
#ifdef CONFIG_SMP
- irq_affinity[irq] = cpumask_of_cpu(cpu);
+ irq_desc[irq].affinity = cpumask_of_cpu(cpu);
#endif
return cpu_data[cpu].txn_addr;
@@ -326,7 +326,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
/* Work our way from MSb to LSb...same order we alloc EIRs */
for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
#ifdef CONFIG_SMP
- cpumask_t dest = irq_affinity[irq];
+ cpumask_t dest = irq_desc[irq].affinity;
#endif
if (!(bit & eirr_val))
continue;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 8cfc779d882d..24f6050aa4ab 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -164,7 +164,7 @@ void fixup_irqs(cpumask_t map)
if (irq_desc[irq].status & IRQ_PER_CPU)
continue;
- cpus_and(mask, irq_affinity[irq], map);
+ cpus_and(mask, irq_desc[irq].affinity, map);
if (any_online_cpu(mask) == NR_CPUS) {
printk("Breaking affinity for irq %i\n", irq);
mask = map;
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index bdc9e26a93cf..19c03dd43000 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -238,7 +238,7 @@ static int get_irq_server(unsigned int irq)
{
unsigned int server;
/* For the moment only implement delivery to all cpus or one cpu */
- cpumask_t cpumask = irq_affinity[irq];
+ cpumask_t cpumask = irq_desc[irq].affinity;
cpumask_t tmp = CPU_MASK_NONE;
if (!distribute_irqs)
@@ -729,7 +729,7 @@ void xics_migrate_irqs_away(void)
/* Reset affinity to all cpus */
desc->chip->set_affinity(virq, CPU_MASK_ALL);
- irq_affinity[virq] = CPU_MASK_ALL;
+ irq_desc[irq].affinity = CPU_MASK_ALL;
unlock:
spin_unlock_irqrestore(&desc->lock, flags);
}
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index f4613ee6b7a2..28df9c827ca6 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -906,7 +906,7 @@ void mpic_setup_this_cpu(void)
/* let the mpic know we want intrs. default affinity is 0xffffffff
* until changed via /proc. That's how it's done on x86. If we want
* it differently, then we should make sure we also change the default
- * values of irq_affinity in irq.c.
+ * values of irq_desc[].affinity in irq.c.
*/
if (distribute_irqs) {
for (i = 0; i < mpic->num_sources ; i++)
diff --git a/arch/ppc/syslib/open_pic.c b/arch/ppc/syslib/open_pic.c
index 822058c2983e..767a0bc95817 100644
--- a/arch/ppc/syslib/open_pic.c
+++ b/arch/ppc/syslib/open_pic.c
@@ -615,8 +615,8 @@ void __devinit do_openpic_setup_cpu(void)
/* let the openpic know we want intrs. default affinity
* is 0xffffffff until changed via /proc
* That's how it's done on x86. If we want it differently, then
- * we should make sure we also change the default values of irq_affinity
- * in irq.c.
+ * we should make sure we also change the default values of
+ * irq_desc[].affinity in irq.c.
*/
for (i = 0; i < NumSources; i++)
openpic_mapirq(i, msk, CPU_MASK_ALL);
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 5d7c821ab7f6..ab9e640df228 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -224,7 +224,7 @@ static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq)
{
- cpumask_t mask = irq_affinity[virt_irq];
+ cpumask_t mask = irq_desc[virt_irq].affinity;
int cpuid;
if (cpus_equal(mask, CPU_MASK_ALL)) {
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index 0c2fb2c77bd0..a1f1df5f7bfc 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -146,7 +146,7 @@ void fixup_irqs(cpumask_t map)
if (irq == 2)
continue;
- cpus_and(mask, irq_affinity[irq], map);
+ cpus_and(mask, irq_desc[irq].affinity, map);
if (any_online_cpu(mask) == NR_CPUS) {
printk("Breaking affinity for irq %i\n", irq);
mask = map;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 9597a6904239..6e3ad6245bd3 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -77,6 +77,9 @@ typedef struct irq_desc {
unsigned int irq_count; /* For detecting broken interrupts */
unsigned int irqs_unhandled;
spinlock_t lock;
+#ifdef CONFIG_SMP
+ cpumask_t affinity;
+#endif
#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
unsigned int move_irq; /* Flag need to re-target intr dest*/
#endif
@@ -96,12 +99,10 @@ irq_descp (int irq)
extern int setup_irq(unsigned int irq, struct irqaction * new);
#ifdef CONFIG_GENERIC_HARDIRQS
-extern cpumask_t irq_affinity[NR_IRQS];
-
#ifdef CONFIG_SMP
static inline void set_native_irq_info(int irq, cpumask_t mask)
{
- irq_affinity[irq] = mask;
+ irq_desc[irq].affinity = mask;
}
#else
static inline void set_native_irq_info(int irq, cpumask_t mask)
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index f9d95705a4ac..cc786aaf30d6 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -32,7 +32,10 @@ irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
[0 ... NR_IRQS-1] = {
.status = IRQ_DISABLED,
.chip = &no_irq_type,
- .lock = SPIN_LOCK_UNLOCKED
+ .lock = SPIN_LOCK_UNLOCKED,
+#ifdef CONFIG_SMP
+ .affinity = CPU_MASK_ALL
+#endif
}
};
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 31ee1f3bfcf9..c53662edc73d 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -16,8 +16,6 @@
#ifdef CONFIG_SMP
-cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
-
#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
#endif
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 90fe05f23e69..847b98a611e0 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -36,7 +36,7 @@ void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
{
set_balance_irq_affinity(irq, mask_val);
- irq_affinity[irq] = mask_val;
+ irq_desc[irq].affinity = mask_val;
irq_desc[irq].chip->set_affinity(irq, mask_val);
}
#endif
@@ -44,7 +44,7 @@ void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
static int irq_affinity_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
- int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
+ int len = cpumask_scnprintf(page, count, irq_desc[(long)data].affinity);
if (count - len < 2)
return -EINVAL;