summaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-04-06 11:45:07 +0200
committerIngo Molnar <mingo@elte.hu>2009-04-07 10:48:58 +0200
commit79f146415623fe74f39af67c0f6adc208939a410 (patch)
tree2b8b3ac045b21cce1169b55bd298a93dba4a19b3 /kernel/perf_counter.c
parentperf_counter: PERF_RECORD_TIME (diff)
downloadlinux-79f146415623fe74f39af67c0f6adc208939a410.tar.xz
linux-79f146415623fe74f39af67c0f6adc208939a410.zip
perf_counter: counter overflow limit
Provide means to auto-disable the counter after 'n' overflow events. Create the counter with hw_event.disabled = 1, and then issue an ioctl(fd, PREF_COUNTER_IOC_REFRESH, n); to set the limit and enable the counter. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090406094518.083139737@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c51
1 files changed, 41 insertions, 10 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 19990d1f0215..c05e10354bc9 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -744,6 +744,12 @@ static void perf_counter_enable(struct perf_counter *counter)
spin_unlock_irq(&ctx->lock);
}
+static void perf_counter_refresh(struct perf_counter *counter, int refresh)
+{
+ atomic_add(refresh, &counter->event_limit);
+ perf_counter_enable(counter);
+}
+
/*
* Enable a counter and all its children.
*/
@@ -1311,6 +1317,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case PERF_COUNTER_IOC_DISABLE:
perf_counter_disable_family(counter);
break;
+ case PERF_COUNTER_IOC_REFRESH:
+ perf_counter_refresh(counter, arg);
+ break;
default:
err = -ENOTTY;
}
@@ -1590,14 +1599,6 @@ void perf_counter_wakeup(struct perf_counter *counter)
kill_fasync(&counter->fasync, SIGIO, POLL_IN);
}
-static void perf_pending_wakeup(struct perf_pending_entry *entry)
-{
- struct perf_counter *counter = container_of(entry,
- struct perf_counter, pending);
-
- perf_counter_wakeup(counter);
-}
-
/*
* Pending wakeups
*
@@ -1607,6 +1608,22 @@ static void perf_pending_wakeup(struct perf_pending_entry *entry)
* single linked list and use cmpxchg() to add entries lockless.
*/
+static void perf_pending_counter(struct perf_pending_entry *entry)
+{
+ struct perf_counter *counter = container_of(entry,
+ struct perf_counter, pending);
+
+ if (counter->pending_disable) {
+ counter->pending_disable = 0;
+ perf_counter_disable(counter);
+ }
+
+ if (counter->pending_wakeup) {
+ counter->pending_wakeup = 0;
+ perf_counter_wakeup(counter);
+ }
+}
+
#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
@@ -1715,8 +1732,9 @@ struct perf_output_handle {
static inline void __perf_output_wakeup(struct perf_output_handle *handle)
{
if (handle->nmi) {
+ handle->counter->pending_wakeup = 1;
perf_pending_queue(&handle->counter->pending,
- perf_pending_wakeup);
+ perf_pending_counter);
} else
perf_counter_wakeup(handle->counter);
}
@@ -2063,8 +2081,21 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
int perf_counter_overflow(struct perf_counter *counter,
int nmi, struct pt_regs *regs)
{
+ int events = atomic_read(&counter->event_limit);
+ int ret = 0;
+
+ if (events && atomic_dec_and_test(&counter->event_limit)) {
+ ret = 1;
+ if (nmi) {
+ counter->pending_disable = 1;
+ perf_pending_queue(&counter->pending,
+ perf_pending_counter);
+ } else
+ perf_counter_disable(counter);
+ }
+
perf_counter_output(counter, nmi, regs);
- return 0;
+ return ret;
}
/*