summaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2018-05-16 17:17:06 +0200
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2018-06-04 23:28:19 +0200
commit6167c205ca9c21c82f19978295d99fb60efcf98b (patch)
tree89b66440dd76a0fb1303c340d8e2f3d73392b0ec /kernel/trace/ring_buffer.c
parenttracing/selftest: Add test to test simple snapshot trigger for trace_marker (diff)
downloadlinux-6167c205ca9c21c82f19978295d99fb60efcf98b.tar.xz
linux-6167c205ca9c21c82f19978295d99fb60efcf98b.zip
ring-buffer: Fix a bunch of typos in comments
An anonymous source sent me a bunch of typo fixes in the comments of ring_buffer.c file. That source did not want to be associated to this patch because they don't want to be known as "one of those" commiters (you know who you are!). They gave me permission to sign this off in my own name. Suggested-by: One-of-those-commiters@YouKnowWhoYouAre.org Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index c9cb9767d49b..6a46af21765c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -809,7 +809,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
*
* You can see, it is legitimate for the previous pointer of
* the head (or any page) not to point back to itself. But only
- * temporarially.
+ * temporarily.
*/
#define RB_PAGE_NORMAL 0UL
@@ -906,7 +906,7 @@ static void rb_list_head_clear(struct list_head *list)
}
/*
- * rb_head_page_dactivate - clears head page ptr (for free list)
+ * rb_head_page_deactivate - clears head page ptr (for free list)
*/
static void
rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
@@ -1780,7 +1780,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
put_online_cpus();
} else {
- /* Make sure this CPU has been intitialized */
+ /* Make sure this CPU has been initialized */
if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
goto out;
@@ -2325,7 +2325,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
/*
* If we need to add a timestamp, then we
- * add it to the start of the resevered space.
+ * add it to the start of the reserved space.
*/
if (unlikely(info->add_timestamp)) {
bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);
@@ -2681,7 +2681,7 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
* ring_buffer_nest_start - Allow to trace while nested
* @buffer: The ring buffer to modify
*
- * The ring buffer has a safty mechanism to prevent recursion.
+ * The ring buffer has a safety mechanism to prevent recursion.
* But there may be a case where a trace needs to be done while
* tracing something else. In this case, calling this function
* will allow this function to nest within a currently active
@@ -2699,7 +2699,7 @@ void ring_buffer_nest_start(struct ring_buffer *buffer)
preempt_disable_notrace();
cpu = raw_smp_processor_id();
cpu_buffer = buffer->buffers[cpu];
- /* This is the shift value for the above recusive locking */
+ /* This is the shift value for the above recursive locking */
cpu_buffer->nest += NESTED_BITS;
}
@@ -2718,7 +2718,7 @@ void ring_buffer_nest_end(struct ring_buffer *buffer)
/* disabled by ring_buffer_nest_start() */
cpu = raw_smp_processor_id();
cpu_buffer = buffer->buffers[cpu];
- /* This is the shift value for the above recusive locking */
+ /* This is the shift value for the above recursive locking */
cpu_buffer->nest -= NESTED_BITS;
preempt_enable_notrace();
}
@@ -2907,7 +2907,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
* @buffer: the ring buffer to reserve from
* @length: the length of the data to reserve (excluding event header)
*
- * Returns a reseverd event on the ring buffer to copy directly to.
+ * Returns a reserved event on the ring buffer to copy directly to.
* The user of this interface will need to get the body to write into
* and can use the ring_buffer_event_data() interface.
*
@@ -3009,7 +3009,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
* This function lets the user discard an event in the ring buffer
* and then that event will not be read later.
*
- * This function only works if it is called before the the item has been
+ * This function only works if it is called before the item has been
* committed. It will try to free the event from the ring buffer
* if another event has not been added behind it.
*
@@ -4127,7 +4127,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
* through the buffer. Memory is allocated, buffer recording
* is disabled, and the iterator pointer is returned to the caller.
*
- * Disabling buffer recordng prevents the reading from being
+ * Disabling buffer recording prevents the reading from being
* corrupted. This is not a consuming read, so a producer is not
* expected.
*