summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_benchmark.c
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2014-06-06 05:34:02 +0200
committerSteven Rostedt <rostedt@goodmis.org>2014-06-06 06:41:38 +0200
commit34839f5a69989c0ee48386a788fba37eb75910f7 (patch)
tree5f93240558f6028500b0373e740e907990680091 /kernel/trace/trace_benchmark.c
parenttracing: Convert stddev into u64 in tracepoint benchmark (diff)
downloadlinux-34839f5a69989c0ee48386a788fba37eb75910f7.tar.xz
linux-34839f5a69989c0ee48386a788fba37eb75910f7.zip
tracing: Only calculate stats of tracepoint benchmarks for 2^32 times
When calculating the average and standard deviation, it is required that the count be less than UINT_MAX, otherwise the do_div() will get undefined results. After 2^32 counts of data, the average and standard deviation should pretty much be set anyway. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_benchmark.c')
-rw-r--r--kernel/trace/trace_benchmark.c37
1 files changed, 30 insertions, 7 deletions
diff --git a/kernel/trace/trace_benchmark.c b/kernel/trace/trace_benchmark.c
index 8bd3365a65b2..40a14cbcf8e0 100644
--- a/kernel/trace/trace_benchmark.c
+++ b/kernel/trace/trace_benchmark.c
@@ -16,7 +16,10 @@ static u64 bm_last;
static u64 bm_max;
static u64 bm_min;
static u64 bm_first;
-static s64 bm_cnt;
+static u64 bm_cnt;
+static u64 bm_stddev;
+static unsigned int bm_avg;
+static unsigned int bm_std;
/*
* This gets called in a loop recording the time it took to write
@@ -66,22 +69,35 @@ static void trace_do_benchmark(void)
bm_last = delta;
- bm_total += delta;
- bm_totalsq += delta * delta;
-
if (delta > bm_max)
bm_max = delta;
if (!bm_min || delta < bm_min)
bm_min = delta;
+ /*
+ * When bm_cnt is greater than UINT_MAX, it breaks the statistics
+ * accounting. Freeze the statistics when that happens.
+ * We should have enough data for the avg and stddev anyway.
+ */
+ if (bm_cnt > UINT_MAX) {
+ scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
+ "last=%llu first=%llu max=%llu min=%llu ** avg=%u std=%d std^2=%lld",
+ bm_last, bm_first, bm_max, bm_min, bm_avg, bm_std, bm_stddev);
+ return;
+ }
+
+ bm_total += delta;
+ bm_totalsq += delta * delta;
+
+
if (bm_cnt > 1) {
/*
* Apply Welford's method to calculate standard deviation:
* s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
*/
stddev = (u64)bm_cnt * bm_totalsq - bm_total * bm_total;
- do_div(stddev, bm_cnt);
- do_div(stddev, bm_cnt - 1);
+ do_div(stddev, (u32)bm_cnt);
+ do_div(stddev, (u32)bm_cnt - 1);
} else
stddev = 0;
@@ -119,6 +135,10 @@ static void trace_do_benchmark(void)
scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
"last=%llu first=%llu max=%llu min=%llu avg=%u std=%d std^2=%lld",
bm_last, bm_first, bm_max, bm_min, avg, std, stddev);
+
+ bm_std = std;
+ bm_avg = avg;
+ bm_stddev = stddev;
}
static int benchmark_event_kthread(void *arg)
@@ -170,6 +190,9 @@ void trace_benchmark_unreg(void)
bm_max = 0;
bm_min = 0;
bm_cnt = 0;
- /* bm_first doesn't need to be reset but reset it anyway */
+ /* These don't need to be reset but reset them anyway */
bm_first = 0;
+ bm_std = 0;
+ bm_avg = 0;
+ bm_stddev = 0;
}