diff options
author | Athira Rajeev <atrajeev@linux.vnet.ibm.com> | 2021-02-03 07:55:36 +0100 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2021-02-08 15:09:44 +0100 |
commit | e79b76e03b712e42c58d9649c92571e346abc38b (patch) | |
tree | 4e324cfb63a0ca6a880fa0360f575d93946ca2b7 /arch/powerpc/perf | |
parent | powerpc/perf: Include PMCs as part of per-cpu cpuhw_events struct (diff) | |
download | linux-e79b76e03b712e42c58d9649c92571e346abc38b.tar.xz linux-e79b76e03b712e42c58d9649c92571e346abc38b.zip |
powerpc/perf: Expose Performance Monitor Counter SPR's as part of extended regs
Currently Monitor Mode Control Registers and Sampling registers are
part of extended regs. Patch adds support to include Performance Monitor
Counter Registers (PMC1 to PMC6 ) as part of extended registers.
PMCs are saved in the perf interrupt handler as part of
per-cpu array 'pmcs' in struct cpu_hw_events. While capturing
the register values for extended regs, fetch these saved PMC values.
Simplified the PERF_REG_PMU_MASK_300/31 definition to include PMU
SPRs MMCR0 to PMC6. Exclude the unsupported SPRs (MMCR3, SIER2, SIER3)
from extended mask value for CPU_FTR_ARCH_300 in the new definition.
PERF_REG_EXTENDED_MAX is used to check if any index beyond the extended
registers is requested in the sample. Have one PERF_REG_EXTENDED_MAX
for CPU_FTR_ARCH_300/CPU_FTR_ARCH_31 since perf_reg_validate function
already checks the extended mask for the presence of any unsupported
register.
Signed-off-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1612335337-1888-3-git-send-email-atrajeev@linux.vnet.ibm.com
Diffstat (limited to 'arch/powerpc/perf')
-rw-r--r-- | arch/powerpc/perf/core-book3s.c | 11 | ||||
-rw-r--r-- | arch/powerpc/perf/perf_regs.c | 13 |
2 files changed, 15 insertions, 9 deletions
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 72bedd812b60..278974f3264d 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -146,6 +146,17 @@ bool is_sier_available(void) return false; } +/* + * Return PMC value corresponding to the + * index passed. + */ +unsigned long get_pmcs_ext_regs(int idx) +{ + struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + + return cpuhw->pmcs[idx]; +} + static bool regs_use_siar(struct pt_regs *regs) { /* diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c index 6f681b105eec..b931eed482c9 100644 --- a/arch/powerpc/perf/perf_regs.c +++ b/arch/powerpc/perf/perf_regs.c @@ -75,6 +75,8 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = { static u64 get_ext_regs_value(int idx) { switch (idx) { + case PERF_REG_POWERPC_PMC1 ... PERF_REG_POWERPC_PMC6: + return get_pmcs_ext_regs(idx - PERF_REG_POWERPC_PMC1); case PERF_REG_POWERPC_MMCR0: return mfspr(SPRN_MMCR0); case PERF_REG_POWERPC_MMCR1: @@ -95,13 +97,6 @@ static u64 get_ext_regs_value(int idx) u64 perf_reg_value(struct pt_regs *regs, int idx) { - u64 perf_reg_extended_max = PERF_REG_POWERPC_MAX; - - if (cpu_has_feature(CPU_FTR_ARCH_31)) - perf_reg_extended_max = PERF_REG_MAX_ISA_31; - else if (cpu_has_feature(CPU_FTR_ARCH_300)) - perf_reg_extended_max = PERF_REG_MAX_ISA_300; - if (idx == PERF_REG_POWERPC_SIER && (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) || IS_ENABLED(CONFIG_PPC32) || @@ -113,14 +108,14 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) IS_ENABLED(CONFIG_PPC32))) return 0; - if (idx >= PERF_REG_POWERPC_MAX && idx < perf_reg_extended_max) + if (idx >= PERF_REG_POWERPC_MAX && idx < PERF_REG_EXTENDED_MAX) return get_ext_regs_value(idx); /* * If the idx is referring to value beyond the * supported registers, return 0 with a warning */ - if (WARN_ON_ONCE(idx >= perf_reg_extended_max)) + if (WARN_ON_ONCE(idx >= PERF_REG_EXTENDED_MAX)) return 0; return regs_get_register(regs, pt_regs_offset[idx]); |