summaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/mmu.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-05-27 00:31:20 +0200
committerThomas Gleixner <tglx@linutronix.de>2008-05-27 10:11:37 +0200
commitcf0923ea295ba08ae656ef04164a43cb6553ba99 (patch)
treec5c8ea1a226edcf7a29bec1953f65469a21756e5 /arch/x86/xen/mmu.c
parentxen: add configurable max domain size (diff)
downloadlinux-cf0923ea295ba08ae656ef04164a43cb6553ba99.tar.xz
linux-cf0923ea295ba08ae656ef04164a43cb6553ba99.zip
xen: efficiently support a holey p2m table
When using sparsemem and memory hotplug, the kernel's pseudo-physical address space can be discontigious. Previously this was dealt with by having the upper parts of the radix tree stubbed off. Unfortunately, this is incompatible with save/restore, which requires a complete p2m table. The solution is to have a special distinguished all-invalid p2m leaf page, which we can point all the hole areas at. This allows the tools to see a complete p2m table, but it only costs a page for all memory holes. It also simplifies the code since it removes a few special cases. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to '')
-rw-r--r--arch/x86/xen/mmu.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 644232aa7bfb..da7b45b05066 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -57,8 +57,17 @@
#include "mmu.h"
#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
+#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
-static unsigned long *p2m_top[MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE];
+/* Placeholder for holes in the address space */
+static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE]
+ __attribute__((section(".data.page_aligned"))) =
+ { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
+
+ /* Array of pointers to pages containing p2m entries */
+static unsigned long *p2m_top[TOP_ENTRIES]
+ __attribute__((section(".data.page_aligned"))) =
+ { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
static inline unsigned p2m_top_index(unsigned long pfn)
{
@@ -92,9 +101,6 @@ unsigned long get_phys_to_machine(unsigned long pfn)
return INVALID_P2M_ENTRY;
topidx = p2m_top_index(pfn);
- if (p2m_top[topidx] == NULL)
- return INVALID_P2M_ENTRY;
-
idx = p2m_index(pfn);
return p2m_top[topidx][idx];
}
@@ -110,7 +116,7 @@ static void alloc_p2m(unsigned long **pp)
for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
p[i] = INVALID_P2M_ENTRY;
- if (cmpxchg(pp, NULL, p) != NULL)
+ if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
free_page((unsigned long)p);
}
@@ -129,7 +135,7 @@ void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
}
topidx = p2m_top_index(pfn);
- if (p2m_top[topidx] == NULL) {
+ if (p2m_top[topidx] == p2m_missing) {
/* no need to allocate a page to store an invalid entry */
if (mfn == INVALID_P2M_ENTRY)
return;