diff options
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/fault.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/kmemcheck/error.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/mmio-mod.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/numa.c | 15 | ||||
-rw-r--r-- | arch/x86/mm/numa_32.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/pageattr-test.c | 3 |
6 files changed, 24 insertions, 10 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 2dbf6bf4c7e5..4d09df054e39 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1059,7 +1059,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) if (unlikely(error_code & PF_RSVD)) pgtable_bad(regs, error_code, address); - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* * If we're in an interrupt, have no user context or are running @@ -1161,11 +1161,11 @@ good_area: if (flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } if (fault & VM_FAULT_RETRY) { diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c index 704a37cedddb..dab41876cdd5 100644 --- a/arch/x86/mm/kmemcheck/error.c +++ b/arch/x86/mm/kmemcheck/error.c @@ -185,7 +185,7 @@ void kmemcheck_error_save(enum kmemcheck_shadow state, e->trace.entries = e->trace_entries; e->trace.max_entries = ARRAY_SIZE(e->trace_entries); e->trace.skip = 0; - save_stack_trace_regs(&e->trace, regs); + save_stack_trace_regs(regs, &e->trace); /* Round address down to nearest 16 bytes */ shadow_copy = kmemcheck_shadow_lookup(address diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c index 3adff7dcc148..67421f38a215 100644 --- a/arch/x86/mm/mmio-mod.c +++ b/arch/x86/mm/mmio-mod.c @@ -34,7 +34,7 @@ #include <asm/pgtable.h> #include <linux/mmiotrace.h> #include <asm/e820.h> /* for ISA_START_ADDRESS */ -#include <asm/atomic.h> +#include <linux/atomic.h> #include <linux/percpu.h> #include <linux/cpu.h> diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index f5510d889a22..fbeaaf416610 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -496,6 +496,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) static int __init numa_register_memblks(struct numa_meminfo *mi) { + unsigned long uninitialized_var(pfn_align); int i, nid; /* Account for nodes with cpus and no memory */ @@ -511,6 +512,20 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) /* for out of order entries */ sort_node_map(); + + /* + * If sections array is gonna be used for pfn -> nid mapping, check + * whether its granularity is fine enough. + */ +#ifdef NODE_NOT_IN_PAGE_FLAGS + pfn_align = node_map_pfn_alignment(); + if (pfn_align && pfn_align < PAGES_PER_SECTION) { + printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n", + PFN_PHYS(pfn_align) >> 20, + PFN_PHYS(PAGES_PER_SECTION) >> 20); + return -EINVAL; + } +#endif if (!numa_meminfo_cover_memory(mi)) return -EINVAL; diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 849a975d3fa0..3adebe7e536a 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -41,7 +41,7 @@ * physnode_map[16-31] = 1; * physnode_map[32- ] = -1; */ -s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1}; +s8 physnode_map[MAX_SECTIONS] __read_mostly = { [0 ... (MAX_SECTIONS - 1)] = -1}; EXPORT_SYMBOL(physnode_map); void memory_present(int nid, unsigned long start, unsigned long end) @@ -52,8 +52,8 @@ void memory_present(int nid, unsigned long start, unsigned long end) nid, start, end); printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid); printk(KERN_DEBUG " "); - for (pfn = start; pfn < end; pfn += PAGES_PER_ELEMENT) { - physnode_map[pfn / PAGES_PER_ELEMENT] = nid; + for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { + physnode_map[pfn / PAGES_PER_SECTION] = nid; printk(KERN_CONT "%lx ", pfn); } printk(KERN_CONT "\n"); diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index e1d106909218..b0086567271c 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -123,12 +123,11 @@ static int pageattr_test(void) if (print) printk(KERN_INFO "CPA self-test:\n"); - bm = vmalloc((max_pfn_mapped + 7) / 8); + bm = vzalloc((max_pfn_mapped + 7) / 8); if (!bm) { printk(KERN_ERR "CPA Cannot vmalloc bitmap\n"); return -ENOMEM; } - memset(bm, 0, (max_pfn_mapped + 7) / 8); failed += print_split(&sa); srandom32(100); |