summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/perf_counter.h3
-rw-r--r--init/Kconfig5
-rw-r--r--kernel/perf_counter.c43
3 files changed, 51 insertions, 0 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 08c11a6afebc..065984c1ff57 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -53,6 +53,8 @@ enum hw_event_types {
PERF_COUNT_PAGE_FAULTS_MAJ = -7,
PERF_SW_EVENTS_MIN = -8,
+
+ PERF_TP_EVENTS_MIN = -65536
};
/*
@@ -222,6 +224,7 @@ struct perf_counter {
struct perf_data *usrdata;
struct perf_data data[2];
+ void (*destroy)(struct perf_counter *);
struct rcu_head rcu_head;
#endif
};
diff --git a/init/Kconfig b/init/Kconfig
index 38a2ecd47c37..4f647142f2e6 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -947,6 +947,11 @@ config PERF_COUNTERS
Say Y if unsure.
+config EVENT_PROFILE
+ bool "Tracepoint profile sources"
+ depends on PERF_COUNTERS && EVENT_TRACER
+ default y
+
endmenu
config VM_EVENT_COUNTERS
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 97f891ffeb40..0bbe3e45ba0d 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1152,6 +1152,9 @@ static void free_counter_rcu(struct rcu_head *head)
static void free_counter(struct perf_counter *counter)
{
+ if (counter->destroy)
+ counter->destroy(counter);
+
call_rcu(&counter->rcu_head, free_counter_rcu);
}
@@ -1727,6 +1730,45 @@ static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
.read = cpu_migrations_perf_counter_read,
};
+#ifdef CONFIG_EVENT_PROFILE
+void perf_tpcounter_event(int event_id)
+{
+ perf_swcounter_event(PERF_TP_EVENTS_MIN + event_id, 1, 1,
+ task_pt_regs(current));
+}
+
+extern int ftrace_profile_enable(int);
+extern void ftrace_profile_disable(int);
+
+static void tp_perf_counter_destroy(struct perf_counter *counter)
+{
+ int event_id = counter->hw_event.type - PERF_TP_EVENTS_MIN;
+
+ ftrace_profile_disable(event_id);
+}
+
+static const struct hw_perf_counter_ops *
+tp_perf_counter_init(struct perf_counter *counter)
+{
+ int event_id = counter->hw_event.type - PERF_TP_EVENTS_MIN;
+ int ret;
+
+ ret = ftrace_profile_enable(event_id);
+ if (ret)
+ return NULL;
+
+ counter->destroy = tp_perf_counter_destroy;
+
+ return &perf_ops_generic;
+}
+#else
+static const struct hw_perf_counter_ops *
+tp_perf_counter_init(struct perf_counter *counter)
+{
+ return NULL;
+}
+#endif
+
static const struct hw_perf_counter_ops *
sw_perf_counter_init(struct perf_counter *counter)
{
@@ -1772,6 +1814,7 @@ sw_perf_counter_init(struct perf_counter *counter)
hw_ops = &perf_ops_cpu_migrations;
break;
default:
+ hw_ops = tp_perf_counter_init(counter);
break;
}