summaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-01 12:47:58 +0100
committerIngo Molnar <mingo@elte.hu>2009-03-01 12:47:58 +0100
commit55f2b78995826d549401bdf20abeac1832636bb6 (patch)
tree931b31f3b6e0879df0f9a1d58ffd040d9a652f2e /mm/vmalloc.c
parentRevert "gpu/drm, x86, PAT: PAT support for io_mapping_*" (diff)
parentx86: i915 needs pgprot_writecombine() and is_io_mapping_possible() (diff)
downloadlinux-55f2b78995826d549401bdf20abeac1832636bb6.tar.xz
linux-55f2b78995826d549401bdf20abeac1832636bb6.zip
Merge branch 'x86/urgent' into x86/pat
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 7774c6328970..11a929872ebd 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -323,6 +323,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
unsigned long addr;
int purged = 0;
+ BUG_ON(!size);
BUG_ON(size & ~PAGE_MASK);
va = kmalloc_node(sizeof(struct vmap_area),
@@ -334,6 +335,9 @@ retry:
addr = ALIGN(vstart, align);
spin_lock(&vmap_area_lock);
+ if (addr + size - 1 < addr)
+ goto overflow;
+
/* XXX: could have a last_hole cache */
n = vmap_area_root.rb_node;
if (n) {
@@ -365,6 +369,8 @@ retry:
while (addr + size > first->va_start && addr + size <= vend) {
addr = ALIGN(first->va_end + PAGE_SIZE, align);
+ if (addr + size - 1 < addr)
+ goto overflow;
n = rb_next(&first->rb_node);
if (n)
@@ -375,6 +381,7 @@ retry:
}
found:
if (addr + size > vend) {
+overflow:
spin_unlock(&vmap_area_lock);
if (!purged) {
purge_vmap_area_lazy();
@@ -498,6 +505,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
static DEFINE_SPINLOCK(purge_lock);
LIST_HEAD(valist);
struct vmap_area *va;
+ struct vmap_area *n_va;
int nr = 0;
/*
@@ -537,7 +545,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
if (nr) {
spin_lock(&vmap_area_lock);
- list_for_each_entry(va, &valist, purge_list)
+ list_for_each_entry_safe(va, n_va, &valist, purge_list)
__free_vmap_area(va);
spin_unlock(&vmap_area_lock);
}