diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2016-07-08 16:56:04 +0200 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2016-07-08 18:39:55 +0200 |
commit | 19a469a58720ea96b649b06fb09ddfd3e831aa69 (patch) | |
tree | aa50f6bce42d8e4ed1aaf29b6c0025192b71f695 /drivers/perf | |
parent | arm64: Fix vdso-offsets.h dependency (diff) | |
download | linux-19a469a58720ea96b649b06fb09ddfd3e831aa69.tar.xz linux-19a469a58720ea96b649b06fb09ddfd3e831aa69.zip |
drivers/perf: arm-pmu: Handle per-interrupt affinity mask
On a big-little system, PMUs can be wired to CPUs using per CPU
interrups (PPI). In this case, it is important to make sure that
the enable/disable do happen on the right set of CPUs.
So instead of relying on the interrupt-affinity property, we can
use the actual percpu affinity that DT exposes as part of the
interrupt specifier. The DT binding is also updated to reflect
the fact that the interrupt-affinity property shouldn't be used
in that case.
Acked-by: Rob Herring <robh@kernel.org>
Tested-by: Caesar Wang <wxt@rock-chips.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'drivers/perf')
-rw-r--r-- | drivers/perf/arm_pmu.c | 27 |
1 files changed, 22 insertions, 5 deletions
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 140436a046c0..8e4d7f590b06 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -603,7 +603,8 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) irq = platform_get_irq(pmu_device, 0); if (irq >= 0 && irq_is_percpu(irq)) { - on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); + on_each_cpu_mask(&cpu_pmu->supported_cpus, + cpu_pmu_disable_percpu_irq, &irq, 1); free_percpu_irq(irq, &hw_events->percpu_pmu); } else { for (i = 0; i < irqs; ++i) { @@ -645,7 +646,9 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) irq); return err; } - on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); + + on_each_cpu_mask(&cpu_pmu->supported_cpus, + cpu_pmu_enable_percpu_irq, &irq, 1); } else { for (i = 0; i < irqs; ++i) { int cpu = i; @@ -961,9 +964,23 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) i++; } while (1); - /* If we didn't manage to parse anything, claim to support all CPUs */ - if (cpumask_weight(&pmu->supported_cpus) == 0) - cpumask_setall(&pmu->supported_cpus); + /* If we didn't manage to parse anything, try the interrupt affinity */ + if (cpumask_weight(&pmu->supported_cpus) == 0) { + if (!using_spi) { + /* If using PPIs, check the affinity of the partition */ + int ret, irq; + + irq = platform_get_irq(pdev, 0); + ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); + if (ret) { + kfree(irqs); + return ret; + } + } else { + /* Otherwise default to all CPUs */ + cpumask_setall(&pmu->supported_cpus); + } + } /* If we matched up the IRQ affinities, use them to route the SPIs */ if (using_spi && i == pdev->num_resources) |