summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2012-07-29 20:22:29 +0200
committerIngo Molnar <mingo@kernel.org>2012-07-30 11:27:20 +0200
commit665605a2a207dbe1fa429b474f932d6ea138ba92 (patch)
treea9f99fcb3972a73066af4591dd3e818b8b14ab27 /kernel
parentuprobes: Clean up and document write_opcode()->lock_page(old_page) (diff)
downloadlinux-665605a2a207dbe1fa429b474f932d6ea138ba92.tar.xz
linux-665605a2a207dbe1fa429b474f932d6ea138ba92.zip
uprobes: Uprobe_mmap/munmap needs list_for_each_entry_safe()
The bug was introduced by me in 449d0d7c ("uprobes: Simplify the usage of uprobe->pending_list"). Yes, we do not care about uprobe->pending_list after return and nobody can remove the current list entry, but put_uprobe(uprobe) can actually free it and thus we need list_for_each_safe(). Reported-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Srikar Dronamraju <srikar.vnet.ibm.com> Cc: Anton Arapov <anton@redhat.com> Link: http://lkml.kernel.org/r/20120729182229.GA20329@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/uprobes.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 5db150b306d2..bed2161620d7 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1010,7 +1010,7 @@ static void build_probe_list(struct inode *inode, struct list_head *head)
int uprobe_mmap(struct vm_area_struct *vma)
{
struct list_head tmp_list;
- struct uprobe *uprobe;
+ struct uprobe *uprobe, *u;
struct inode *inode;
int ret, count;
@@ -1028,7 +1028,7 @@ int uprobe_mmap(struct vm_area_struct *vma)
ret = 0;
count = 0;
- list_for_each_entry(uprobe, &tmp_list, pending_list) {
+ list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
if (!ret) {
loff_t vaddr = vma_address(vma, uprobe->offset);
@@ -1076,7 +1076,7 @@ int uprobe_mmap(struct vm_area_struct *vma)
void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
struct list_head tmp_list;
- struct uprobe *uprobe;
+ struct uprobe *uprobe, *u;
struct inode *inode;
if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
@@ -1093,7 +1093,7 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
mutex_lock(uprobes_mmap_hash(inode));
build_probe_list(inode, &tmp_list);
- list_for_each_entry(uprobe, &tmp_list, pending_list) {
+ list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
loff_t vaddr = vma_address(vma, uprobe->offset);
if (vaddr >= start && vaddr < end) {