summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-23 19:08:01 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-23 19:08:01 +0100
commit9191423872f764dccc024d6bc4b68dfd138ccc38 (patch)
tree06efe7b39593f8ce2e5347fd9c6c01eea100e2f5 /kernel/trace
parentMerge tag 'trace-tools-v6.3' of git://git.kernel.org/pub/scm/linux/kernel/git... (diff)
parentring-buffer: Handle race between rb_move_tail and rb_check_pages (diff)
downloadlinux-9191423872f764dccc024d6bc4b68dfd138ccc38.tar.xz
linux-9191423872f764dccc024d6bc4b68dfd138ccc38.zip
Merge tag 'trace-v6.2-rc7-3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull tracing fix from Steven Rostedt: "Fix race that causes a warning of corrupt ring buffer With the change that allows to read the "trace" file without disabling writing to the ring buffer, there was an integrity check of the ring buffer in the iterator read code, that expected the ring buffer to be write disabled. This caused the integrity check to trigger when stress reading the "trace" file while writing was happening. The integrity check is a bit aggressive (and has never triggered in practice). Change it so that it checks just the integrity of the linked pages without clearing the flags inside the pointers. This removes the warning that was being triggered" [ Heh. This was supposed to have gone in last week before the 6.2 release, but Steven forgot to actually add me to the participants of the pull request, so here it is, a week later - Linus ] * tag 'trace-v6.2-rc7-3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: ring-buffer: Handle race between rb_move_tail and rb_check_pages
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ring_buffer.c42
1 files changed, 10 insertions, 32 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index c366a0a9ddba..b641cab2745e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1581,19 +1581,6 @@ static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
}
/**
- * rb_check_list - make sure a pointer to a list has the last bits zero
- */
-static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
- struct list_head *list)
-{
- if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
- return 1;
- if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
- return 1;
- return 0;
-}
-
-/**
* rb_check_pages - integrity check of buffer pages
* @cpu_buffer: CPU buffer with pages to test
*
@@ -1602,36 +1589,27 @@ static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
*/
static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
{
- struct list_head *head = cpu_buffer->pages;
- struct buffer_page *bpage, *tmp;
+ struct list_head *head = rb_list_head(cpu_buffer->pages);
+ struct list_head *tmp;
- /* Reset the head page if it exists */
- if (cpu_buffer->head_page)
- rb_set_head_page(cpu_buffer);
-
- rb_head_page_deactivate(cpu_buffer);
-
- if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
- return -1;
- if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
+ if (RB_WARN_ON(cpu_buffer,
+ rb_list_head(rb_list_head(head->next)->prev) != head))
return -1;
- if (rb_check_list(cpu_buffer, head))
+ if (RB_WARN_ON(cpu_buffer,
+ rb_list_head(rb_list_head(head->prev)->next) != head))
return -1;
- list_for_each_entry_safe(bpage, tmp, head, list) {
+ for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
if (RB_WARN_ON(cpu_buffer,
- bpage->list.next->prev != &bpage->list))
+ rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
return -1;
+
if (RB_WARN_ON(cpu_buffer,
- bpage->list.prev->next != &bpage->list))
- return -1;
- if (rb_check_list(cpu_buffer, &bpage->list))
+ rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
return -1;
}
- rb_head_page_activate(cpu_buffer);
-
return 0;
}