summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorVenki Pallipadi <venkatesh.pallipadi@intel.com>2008-08-20 01:28:01 +0200
committerIngo Molnar <mingo@elte.hu>2008-08-20 12:08:37 +0200
commit80c5e73d6028e0f03ab0c70e7c4cbf98ac2e0c43 (patch)
treeb1ae13cc96dd94e7090942e5602df6277a4126e4 /arch/x86/mm
parentx86: fix "kernel won't boot on a Cyrix MediaGXm (Geode)" (diff)
downloadlinux-80c5e73d6028e0f03ab0c70e7c4cbf98ac2e0c43.tar.xz
linux-80c5e73d6028e0f03ab0c70e7c4cbf98ac2e0c43.zip
x86: fix Xorg startup/shutdown slowdown with PAT
Rene Herman reported significant Xorg startup/shutdown slowdown due to PAT. It turns out that the memtype list has thousands of entries. Add cached_entry to list add routine, in order to speed up the lookup for sequential reserve_memtype calls. Reported-by: Rene Herman <rene.herman@keyaccess.nl> Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/pat.c33
1 files changed, 31 insertions, 2 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 2fe30916d4b6..bb6e8a267bfe 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -207,6 +207,9 @@ static int chk_conflict(struct memtype *new, struct memtype *entry,
return -EBUSY;
}
+static struct memtype *cached_entry;
+static u64 cached_start;
+
/*
* req_type typically has one of the:
* - _PAGE_CACHE_WB
@@ -280,11 +283,17 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
spin_lock(&memtype_lock);
+ if (cached_entry && start >= cached_start)
+ entry = cached_entry;
+ else
+ entry = list_entry(&memtype_list, struct memtype, nd);
+
/* Search for existing mapping that overlaps the current range */
where = NULL;
- list_for_each_entry(entry, &memtype_list, nd) {
+ list_for_each_entry_continue(entry, &memtype_list, nd) {
if (end <= entry->start) {
where = entry->nd.prev;
+ cached_entry = list_entry(where, struct memtype, nd);
break;
} else if (start <= entry->start) { /* end > entry->start */
err = chk_conflict(new, entry, new_type);
@@ -292,6 +301,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
dprintk("Overlap at 0x%Lx-0x%Lx\n",
entry->start, entry->end);
where = entry->nd.prev;
+ cached_entry = list_entry(where,
+ struct memtype, nd);
}
break;
} else if (start < entry->end) { /* start > entry->start */
@@ -299,7 +310,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
if (!err) {
dprintk("Overlap at 0x%Lx-0x%Lx\n",
entry->start, entry->end);
- where = &entry->nd;
+ cached_entry = list_entry(entry->nd.prev,
+ struct memtype, nd);
+
+ /*
+ * Move to right position in the linked
+ * list to add this new entry
+ */
+ list_for_each_entry_continue(entry,
+ &memtype_list, nd) {
+ if (start <= entry->start) {
+ where = entry->nd.prev;
+ break;
+ }
+ }
}
break;
}
@@ -314,6 +338,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
return err;
}
+ cached_start = start;
+
if (where)
list_add(&new->nd, where);
else
@@ -343,6 +369,9 @@ int free_memtype(u64 start, u64 end)
spin_lock(&memtype_lock);
list_for_each_entry(entry, &memtype_list, nd) {
if (entry->start == start && entry->end == end) {
+ if (cached_entry == entry || cached_start == start)
+ cached_entry = NULL;
+
list_del(&entry->nd);
kfree(entry);
err = 0;