summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--kernel/trace/ring_buffer.c8
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_hw_branches.c2
4 files changed, 7 insertions, 7 deletions
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 3c103d636da3..8e6646a54acf 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -8,7 +8,7 @@ struct ring_buffer;
struct ring_buffer_iter;
/*
- * Don't reference this struct directly, use functions below.
+ * Don't refer to this struct directly, use functions below.
*/
struct ring_buffer_event {
u32 type:2, len:3, time_delta:27;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 10d202ea06f3..fa64e1f003eb 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -91,7 +91,7 @@ EXPORT_SYMBOL_GPL(tracing_off);
* tracing_off_permanent - permanently disable ring buffers
*
* This function, once called, will disable all ring buffers
- * permanenty.
+ * permanently.
*/
void tracing_off_permanent(void)
{
@@ -210,7 +210,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
struct buffer_data_page {
u64 time_stamp; /* page time stamp */
- local_t commit; /* write commited index */
+ local_t commit; /* write committed index */
unsigned char data[]; /* data of buffer page */
};
@@ -260,7 +260,7 @@ struct ring_buffer_per_cpu {
struct list_head pages;
struct buffer_page *head_page; /* read from head */
struct buffer_page *tail_page; /* write to tail */
- struct buffer_page *commit_page; /* commited pages */
+ struct buffer_page *commit_page; /* committed pages */
struct buffer_page *reader_page;
unsigned long overrun;
unsigned long entries;
@@ -303,7 +303,7 @@ struct ring_buffer_iter {
* check_pages - integrity check of buffer pages
* @cpu_buffer: CPU buffer with pages to test
*
- * As a safty measure we check to make sure the data pages have not
+ * As a safety measure we check to make sure the data pages have not
* been corrupted.
*/
static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d89821283b47..d7c175a442df 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1963,7 +1963,7 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf,
struct tracer_opt *trace_opts = current_trace->flags->opts;
- /* calulate max size */
+ /* calculate max size */
for (i = 0; trace_options[i]; i++) {
len += strlen(trace_options[i]);
len += 3; /* "no" and space */
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index e3e7db61c067..0794dd33f27b 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -75,7 +75,7 @@ static void bts_trace_start(struct trace_array *tr)
}
/*
- * Start tracing on the current cpu.
+ * Stop tracing on the current cpu.
* The argument is ignored.
*
* pre: bts_tracer_mutex must be locked.