summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2014-11-20 16:05:36 +0100
committerSteven Rostedt <rostedt@goodmis.org>2014-11-20 16:05:36 +0100
commit0af26492d5f5c00a08d52e9f3f3831faead90246 (patch)
treee97d37b4f96b467eec741ba1d3b64356b0edb4b9 /kernel/trace
parenttracing: Deletion of an unnecessary check before iput() (diff)
downloadlinux-0af26492d5f5c00a08d52e9f3f3831faead90246.tar.xz
linux-0af26492d5f5c00a08d52e9f3f3831faead90246.zip
tracing/trivial: Fix typos and make an int into a bool
Fix up a few typos in comments and convert an int into a bool in update_traceon_count(). Link: http://lkml.kernel.org/r/546DD445.5080108@hitachi.com Suggested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/trace/trace_functions.c6
2 files changed, 4 insertions, 4 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index fa0f36bb32e9..588af40d33db 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1119,7 +1119,7 @@ static struct ftrace_ops global_ops = {
/*
* This is used by __kernel_text_address() to return true if the
- * the address is on a dynamically allocated trampoline that would
+ * address is on a dynamically allocated trampoline that would
* not return true for either core_kernel_text() or
* is_module_text_address().
*/
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 973db52eb070..fcd41a166405 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -261,14 +261,14 @@ static struct tracer function_trace __tracer_data =
};
#ifdef CONFIG_DYNAMIC_FTRACE
-static void update_traceon_count(void **data, int on)
+static void update_traceon_count(void **data, bool on)
{
long *count = (long *)data;
long old_count = *count;
/*
* Tracing gets disabled (or enabled) once per count.
- * This function can be called at the same time on mulitple CPUs.
+ * This function can be called at the same time on multiple CPUs.
* It is fine if both disable (or enable) tracing, as disabling
* (or enabling) the second time doesn't do anything as the
* state of the tracer is already disabled (or enabled).
@@ -288,7 +288,7 @@ static void update_traceon_count(void **data, int on)
* the new state is visible before changing the counter by
* one minus the old counter. This guarantees that another CPU
* executing this code will see the new state before seeing
- * the new counter value, and would not do anthing if the new
+ * the new counter value, and would not do anything if the new
* counter is seen.
*
* Note, there is no synchronization between this and a user