summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-05-14 05:29:14 +0200
committerIngo Molnar <mingo@elte.hu>2009-05-15 16:38:55 +0200
commitef923214a4816c289e4af2d67a9ebb1a31e4ac61 (patch)
treebf850f4f53a4f8391b6b9c0335e58364668586d9 /arch/powerpc/kernel/perf_counter.c
parentperf_counter: frequency based adaptive irq_period, 32-bit fix (diff)
downloadlinux-ef923214a4816c289e4af2d67a9ebb1a31e4ac61.tar.xz
linux-ef923214a4816c289e4af2d67a9ebb1a31e4ac61.zip
perf_counter: powerpc: use u64 for event codes internally
Although the perf_counter API allows 63-bit raw event codes, internally in the powerpc back-end we had been using 32-bit event codes. This expands them to 64 bits so that we can add bits for specifying threshold start/stop events and instruction sampling modes later. This also corrects the return value of can_go_on_limited_pmc; we were returning an event code rather than just a 0/1 value in some circumstances. That didn't particularly matter while event codes were 32-bit, but now that event codes are 64-bit it might, so this fixes it. [ Impact: extend PowerPC perfcounter interfaces from u32 to u64 ] Signed-off-by: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <18955.36874.472452.353104@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel/perf_counter.c')
-rw-r--r--arch/powerpc/kernel/perf_counter.c26
1 files changed, 12 insertions, 14 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index db8d5cafc159..8d4cafc84b82 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -26,7 +26,7 @@ struct cpu_hw_counters {
int n_limited;
u8 pmcs_enabled;
struct perf_counter *counter[MAX_HWCOUNTERS];
- unsigned int events[MAX_HWCOUNTERS];
+ u64 events[MAX_HWCOUNTERS];
unsigned int flags[MAX_HWCOUNTERS];
u64 mmcr[3];
struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
@@ -131,11 +131,11 @@ static void write_pmc(int idx, unsigned long val)
* and see if any combination of alternative codes is feasible.
* The feasible set is returned in event[].
*/
-static int power_check_constraints(unsigned int event[], unsigned int cflags[],
+static int power_check_constraints(u64 event[], unsigned int cflags[],
int n_ev)
{
u64 mask, value, nv;
- unsigned int alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+ u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
@@ -564,7 +564,7 @@ void hw_perf_enable(void)
}
static int collect_events(struct perf_counter *group, int max_count,
- struct perf_counter *ctrs[], unsigned int *events,
+ struct perf_counter *ctrs[], u64 *events,
unsigned int *flags)
{
int n = 0;
@@ -752,11 +752,11 @@ struct pmu power_pmu = {
* that a limited PMC can count, doesn't require interrupts, and
* doesn't exclude any processor mode.
*/
-static int can_go_on_limited_pmc(struct perf_counter *counter, unsigned int ev,
+static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
unsigned int flags)
{
int n;
- unsigned int alt[MAX_EVENT_ALTERNATIVES];
+ u64 alt[MAX_EVENT_ALTERNATIVES];
if (counter->hw_event.exclude_user
|| counter->hw_event.exclude_kernel
@@ -776,10 +776,8 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, unsigned int ev,
flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
n = ppmu->get_alternatives(ev, flags, alt);
- if (n)
- return alt[0];
- return 0;
+ return n > 0;
}
/*
@@ -787,10 +785,9 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, unsigned int ev,
* and return the event code, or 0 if there is no such alternative.
* (Note: event code 0 is "don't count" on all machines.)
*/
-static unsigned long normal_pmc_alternative(unsigned long ev,
- unsigned long flags)
+static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
{
- unsigned int alt[MAX_EVENT_ALTERNATIVES];
+ u64 alt[MAX_EVENT_ALTERNATIVES];
int n;
flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
@@ -820,9 +817,10 @@ static void hw_perf_counter_destroy(struct perf_counter *counter)
const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
{
- unsigned long ev, flags;
+ u64 ev;
+ unsigned long flags;
struct perf_counter *ctrs[MAX_HWCOUNTERS];
- unsigned int events[MAX_HWCOUNTERS];
+ u64 events[MAX_HWCOUNTERS];
unsigned int cflags[MAX_HWCOUNTERS];
int n;
int err;