diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-10-01 17:14:54 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-14 10:39:08 +0200 |
commit | e4c2ce82ca2710e17cb4df8eb2b249fa2eb5af30 (patch) | |
tree | 186a96aa4cace7fe51ede64c8b4f426a5007f007 /kernel | |
parent | ftrace: type cast filter+verifier (diff) | |
download | linux-e4c2ce82ca2710e17cb4df8eb2b249fa2eb5af30.tar.xz linux-e4c2ce82ca2710e17cb4df8eb2b249fa2eb5af30.zip |
ring_buffer: allocate buffer page pointer
The current method of overlaying the page frame as the buffer page pointer
can be very dangerous and limits our ability to do other things with
a page from the buffer, like send it off to disk.
This patch allocates the buffer_page instead of overlaying the page's
page frame. The use of the buffer_page has hardly changed due to this.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/ring_buffer.c | 54 |
1 files changed, 32 insertions, 22 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 9631abf2ae29..98145718988d 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -115,16 +115,10 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) * Thanks to Peter Zijlstra for suggesting this idea. */ struct buffer_page { - union { - struct { - unsigned long flags; /* mandatory */ - atomic_t _count; /* mandatory */ - u64 time_stamp; /* page time stamp */ - unsigned size; /* size of page data */ - struct list_head list; /* list of free pages */ - }; - struct page page; - }; + u64 time_stamp; /* page time stamp */ + unsigned size; /* size of page data */ + struct list_head list; /* list of free pages */ + void *page; /* Actual data page */ }; /* @@ -133,9 +127,9 @@ struct buffer_page { */ static inline void free_buffer_page(struct buffer_page *bpage) { - reset_page_mapcount(&bpage->page); - bpage->page.mapping = NULL; - __free_page(&bpage->page); + if (bpage->page) + __free_page(bpage->page); + kfree(bpage); } /* @@ -237,11 +231,16 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned i; for (i = 0; i < nr_pages; i++) { + page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), + GFP_KERNEL, cpu_to_node(cpu)); + if (!page) + goto free_pages; + list_add(&page->list, &pages); + addr = __get_free_page(GFP_KERNEL); if (!addr) goto free_pages; - page = (struct buffer_page *)virt_to_page(addr); - list_add(&page->list, &pages); + page->page = (void *)addr; } list_splice(&pages, head); @@ -262,6 +261,7 @@ static struct ring_buffer_per_cpu * rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; + struct buffer_page *page; unsigned long addr; int ret; @@ -275,10 +275,17 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) spin_lock_init(&cpu_buffer->lock); INIT_LIST_HEAD(&cpu_buffer->pages); + page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), + GFP_KERNEL, cpu_to_node(cpu)); + if (!page) + goto fail_free_buffer; + + cpu_buffer->reader_page = page; addr = __get_free_page(GFP_KERNEL); if (!addr) - goto fail_free_buffer; - cpu_buffer->reader_page = (struct buffer_page *)virt_to_page(addr); + goto fail_free_reader; + page->page = (void *)addr; + INIT_LIST_HEAD(&cpu_buffer->reader_page->list); cpu_buffer->reader_page->size = 0; @@ -523,11 +530,16 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) for_each_buffer_cpu(buffer, cpu) { for (i = 0; i < new_pages; i++) { + page = kzalloc_node(ALIGN(sizeof(*page), + cache_line_size()), + GFP_KERNEL, cpu_to_node(cpu)); + if (!page) + goto free_pages; + list_add(&page->list, &pages); addr = __get_free_page(GFP_KERNEL); if (!addr) goto free_pages; - page = (struct buffer_page *)virt_to_page(addr); - list_add(&page->list, &pages); + page->page = (void *)addr; } } @@ -567,9 +579,7 @@ static inline int rb_null_event(struct ring_buffer_event *event) static inline void *rb_page_index(struct buffer_page *page, unsigned index) { - void *addr = page_address(&page->page); - - return addr + index; + return page->page + index; } static inline struct ring_buffer_event * |