diff options
-rw-r--r-- | arch/x86/kernel/aperture_64.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/main.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/e820_32.c | 26 | ||||
-rw-r--r-- | arch/x86/kernel/e820_64.c | 27 | ||||
-rw-r--r-- | arch/x86/kernel/head_32.S | 2 | ||||
-rw-r--r-- | arch/x86/kernel/pci-dma_64.c | 54 | ||||
-rw-r--r-- | arch/x86/kernel/quirks.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/reboot.c | 18 | ||||
-rw-r--r-- | arch/x86/kernel/setup64.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/setup_64.c | 2 | ||||
-rw-r--r-- | arch/x86/mach-visws/traps.c | 5 | ||||
-rw-r--r-- | arch/x86/mm/numa_64.c | 3 | ||||
-rw-r--r-- | include/asm-x86/cmpxchg_32.h | 32 | ||||
-rw-r--r-- | include/asm-x86/e820_32.h | 2 | ||||
-rw-r--r-- | include/asm-x86/e820_64.h | 2 | ||||
-rw-r--r-- | include/asm-x86/page.h | 4 | ||||
-rw-r--r-- | include/asm-x86/pci_64.h | 1 | ||||
-rw-r--r-- | include/asm-x86/sync_bitops.h | 9 |
18 files changed, 170 insertions, 39 deletions
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 608152a2a05e..00df126169b4 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -18,6 +18,7 @@ #include <linux/pci.h> #include <linux/bitops.h> #include <linux/ioport.h> +#include <linux/suspend.h> #include <asm/e820.h> #include <asm/io.h> #include <asm/gart.h> @@ -76,6 +77,8 @@ static u32 __init allocate_aperture(void) printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", aper_size >> 10, __pa(p)); insert_aperture_resource((u32)__pa(p), aper_size); + register_nosave_region((u32)__pa(p) >> PAGE_SHIFT, + (u32)__pa(p+aper_size) >> PAGE_SHIFT); return (u32)__pa(p); } diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index be83336fddba..a6450b3ae759 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -711,7 +711,8 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) trim_size = end_pfn; trim_size <<= PAGE_SHIFT; trim_size -= trim_start; - add_memory_region(trim_start, trim_size, E820_RESERVED); + update_memory_range(trim_start, trim_size, E820_RAM, + E820_RESERVED); update_e820(); return 1; } diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c index 4e16ef4a2659..80444c5c9b14 100644 --- a/arch/x86/kernel/e820_32.c +++ b/arch/x86/kernel/e820_32.c @@ -749,6 +749,32 @@ static int __init parse_memmap(char *arg) return 0; } early_param("memmap", parse_memmap); +void __init update_memory_range(u64 start, u64 size, unsigned old_type, + unsigned new_type) +{ + int i; + + BUG_ON(old_type == new_type); + + for (i = 0; i < e820.nr_map; i++) { + struct e820entry *ei = &e820.map[i]; + u64 final_start, final_end; + if (ei->type != old_type) + continue; + /* totally covered? */ + if (ei->addr >= start && ei->size <= size) { + ei->type = new_type; + continue; + } + /* partially covered */ + final_start = max(start, ei->addr); + final_end = min(start + size, ei->addr + ei->size); + if (final_start >= final_end) + continue; + add_memory_region(final_start, final_end - final_start, + new_type); + } +} void __init update_e820(void) { u8 nr_map; diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c index 9f65b4cc323c..9be697126013 100644 --- a/arch/x86/kernel/e820_64.c +++ b/arch/x86/kernel/e820_64.c @@ -744,6 +744,33 @@ void __init finish_e820_parsing(void) } } +void __init update_memory_range(u64 start, u64 size, unsigned old_type, + unsigned new_type) +{ + int i; + + BUG_ON(old_type == new_type); + + for (i = 0; i < e820.nr_map; i++) { + struct e820entry *ei = &e820.map[i]; + u64 final_start, final_end; + if (ei->type != old_type) + continue; + /* totally covered? */ + if (ei->addr >= start && ei->size <= size) { + ei->type = new_type; + continue; + } + /* partially covered */ + final_start = max(start, ei->addr); + final_end = min(start + size, ei->addr + ei->size); + if (final_start >= final_end) + continue; + add_memory_region(final_start, final_end - final_start, + new_type); + } +} + void __init update_e820(void) { u8 nr_map; diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index fd8ca53943a8..74d87ea85b5c 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -657,7 +657,7 @@ int_msg: .asciz "Unknown interrupt or fault at EIP %p %p %p\n" fault_msg: - .ascii \ + .asciz \ /* fault info: */ "BUG: Int %d: CR2 %p\n" \ /* pusha regs: */ " EDI %p ESI %p EBP %p ESP %p\n" \ " EBX %p EDX %p ECX %p EAX %p\n" \ diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c index a82473d192a3..8bc1e185e557 100644 --- a/arch/x86/kernel/pci-dma_64.c +++ b/arch/x86/kernel/pci-dma_64.c @@ -8,6 +8,8 @@ #include <linux/pci.h> #include <linux/module.h> #include <linux/dmar.h> +#include <linux/bootmem.h> +#include <asm/proto.h> #include <asm/io.h> #include <asm/gart.h> #include <asm/calgary.h> @@ -53,11 +55,6 @@ dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) int node; node = dev_to_node(dev); - if (node == -1) - node = numa_node_id(); - - if (node < first_node(node_online_map)) - node = first_node(node_online_map); page = alloc_pages_node(node, gfp, order); return page ? page_address(page) : NULL; @@ -291,8 +288,55 @@ static __init int iommu_setup(char *p) } early_param("iommu", iommu_setup); +static __initdata void *dma32_bootmem_ptr; +static unsigned long dma32_bootmem_size __initdata = (128ULL<<20); + +static int __init parse_dma32_size_opt(char *p) +{ + if (!p) + return -EINVAL; + dma32_bootmem_size = memparse(p, &p); + return 0; +} +early_param("dma32_size", parse_dma32_size_opt); + +void __init dma32_reserve_bootmem(void) +{ + unsigned long size, align; + if (end_pfn <= MAX_DMA32_PFN) + return; + + align = 64ULL<<20; + size = round_up(dma32_bootmem_size, align); + dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, + __pa(MAX_DMA_ADDRESS)); + if (dma32_bootmem_ptr) + dma32_bootmem_size = size; + else + dma32_bootmem_size = 0; +} +static void __init dma32_free_bootmem(void) +{ + int node; + + if (end_pfn <= MAX_DMA32_PFN) + return; + + if (!dma32_bootmem_ptr) + return; + + for_each_online_node(node) + free_bootmem_node(NODE_DATA(node), __pa(dma32_bootmem_ptr), + dma32_bootmem_size); + + dma32_bootmem_ptr = NULL; + dma32_bootmem_size = 0; +} + void __init pci_iommu_alloc(void) { + /* free the range so iommu could get some range less than 4G */ + dma32_free_bootmem(); /* * The order of these functions is important for * fall-back/fail-over reasons diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index c47208fc5932..d89a648fe710 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -363,6 +363,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051, nvidia_force_enable_hpet); /* LPC bridges */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260, + nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360, nvidia_force_enable_hpet); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361, diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 55ceb8cdef75..484c4a80d38a 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -152,6 +152,24 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "0WF810"), }, }, + { /* Handle problems with rebooting on Dell Optiplex 745's DFF*/ + .callback = set_bios_reboot, + .ident = "Dell OptiPlex 745", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"), + DMI_MATCH(DMI_BOARD_NAME, "0MM599"), + }, + }, + { /* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */ + .callback = set_bios_reboot, + .ident = "Dell OptiPlex 745", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"), + DMI_MATCH(DMI_BOARD_NAME, "0KW626"), + }, + }, { /* Handle problems with rebooting on Dell 2400's */ .callback = set_bios_reboot, .ident = "Dell PowerEdge 2400", diff --git a/arch/x86/kernel/setup64.c b/arch/x86/kernel/setup64.c index 309366f8f603..e24c45677094 100644 --- a/arch/x86/kernel/setup64.c +++ b/arch/x86/kernel/setup64.c @@ -142,14 +142,16 @@ void __init setup_per_cpu_areas(void) printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size); for_each_cpu_mask (i, cpu_possible_map) { char *ptr; +#ifndef CONFIG_NEED_MULTIPLE_NODES + ptr = alloc_bootmem_pages(size); +#else + int node = early_cpu_to_node(i); - if (!NODE_DATA(early_cpu_to_node(i))) { - printk("cpu with no node %d, num_online_nodes %d\n", - i, num_online_nodes()); + if (!node_online(node) || !NODE_DATA(node)) ptr = alloc_bootmem_pages(size); - } else { - ptr = alloc_bootmem_pages_node(NODE_DATA(early_cpu_to_node(i)), size); - } + else + ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); +#endif if (!ptr) panic("Cannot allocate cpu data for CPU %d\n", i); cpu_pda(i)->data_offset = ptr - __per_cpu_start; diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index 7637dc91c79b..a775fe3de955 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -389,6 +389,8 @@ void __init setup_arch(char **cmdline_p) early_res_to_bootmem(); + dma32_reserve_bootmem(); + #ifdef CONFIG_ACPI_SLEEP /* * Reserve low memory region for sleep support. diff --git a/arch/x86/mach-visws/traps.c b/arch/x86/mach-visws/traps.c index 843b67acf43b..bfac6ba10f8a 100644 --- a/arch/x86/mach-visws/traps.c +++ b/arch/x86/mach-visws/traps.c @@ -46,8 +46,9 @@ static __init void cobalt_init(void) */ set_fixmap(FIX_APIC_BASE, APIC_DEFAULT_PHYS_BASE); setup_local_APIC(); - printk(KERN_INFO "Local APIC Version %#lx, ID %#lx\n", - apic_read(APIC_LVR), apic_read(APIC_ID)); + printk(KERN_INFO "Local APIC Version %#x, ID %#x\n", + (unsigned int)apic_read(APIC_LVR), + (unsigned int)apic_read(APIC_ID)); set_fixmap(FIX_CO_CPU, CO_CPU_PHYS); set_fixmap(FIX_CO_APIC, CO_APIC_PHYS); diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 8ccfee10f5b5..16b82ad34b96 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -221,8 +221,7 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, bootmap_pages<<PAGE_SHIFT, PAGE_SIZE); if (bootmap == NULL) { if (nodedata_phys < start || nodedata_phys >= end) - free_bootmem((unsigned long)node_data[nodeid], - pgdat_size); + free_bootmem(nodedata_phys, pgdat_size); node_data[nodeid] = NULL; return; } diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h index cea1dae288a7..959fad00dff5 100644 --- a/include/asm-x86/cmpxchg_32.h +++ b/include/asm-x86/cmpxchg_32.h @@ -269,22 +269,26 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, ({ \ __typeof__(*(ptr)) __ret; \ if (likely(boot_cpu_data.x86 > 3)) \ - __ret = __cmpxchg((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr))); \ + __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \ + (unsigned long)(o), (unsigned long)(n), \ + sizeof(*(ptr))); \ else \ - __ret = cmpxchg_386((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr))); \ + __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \ + (unsigned long)(o), (unsigned long)(n), \ + sizeof(*(ptr))); \ __ret; \ }) #define cmpxchg_local(ptr, o, n) \ ({ \ __typeof__(*(ptr)) __ret; \ if (likely(boot_cpu_data.x86 > 3)) \ - __ret = __cmpxchg_local((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr))); \ + __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \ + (unsigned long)(o), (unsigned long)(n), \ + sizeof(*(ptr))); \ else \ - __ret = cmpxchg_386((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr))); \ + __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \ + (unsigned long)(o), (unsigned long)(n), \ + sizeof(*(ptr))); \ __ret; \ }) #endif @@ -301,10 +305,12 @@ extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64); ({ \ __typeof__(*(ptr)) __ret; \ if (likely(boot_cpu_data.x86 > 4)) \ - __ret = __cmpxchg64((ptr), (unsigned long long)(o), \ + __ret = (__typeof__(*(ptr)))__cmpxchg64((ptr), \ + (unsigned long long)(o), \ (unsigned long long)(n)); \ else \ - __ret = cmpxchg_486_u64((ptr), (unsigned long long)(o), \ + __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \ + (unsigned long long)(o), \ (unsigned long long)(n)); \ __ret; \ }) @@ -312,10 +318,12 @@ extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64); ({ \ __typeof__(*(ptr)) __ret; \ if (likely(boot_cpu_data.x86 > 4)) \ - __ret = __cmpxchg64_local((ptr), (unsigned long long)(o), \ + __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \ + (unsigned long long)(o), \ (unsigned long long)(n)); \ else \ - __ret = cmpxchg_486_u64((ptr), (unsigned long long)(o), \ + __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \ + (unsigned long long)(o), \ (unsigned long long)(n)); \ __ret; \ }) diff --git a/include/asm-x86/e820_32.h b/include/asm-x86/e820_32.h index f1da7ebd1905..e7207a6de3e0 100644 --- a/include/asm-x86/e820_32.h +++ b/include/asm-x86/e820_32.h @@ -28,6 +28,8 @@ extern void find_max_pfn(void); extern void register_bootmem_low_pages(unsigned long max_low_pfn); extern void add_memory_region(unsigned long long start, unsigned long long size, int type); +extern void update_memory_range(u64 start, u64 size, unsigned old_type, + unsigned new_type); extern void e820_register_memory(void); extern void limit_regions(unsigned long long size); extern void print_memory_map(char *who); diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h index a560c4f5d500..22ede73ae724 100644 --- a/include/asm-x86/e820_64.h +++ b/include/asm-x86/e820_64.h @@ -18,6 +18,8 @@ extern unsigned long find_e820_area(unsigned long start, unsigned long end, unsigned size, unsigned long align); extern void add_memory_region(unsigned long start, unsigned long size, int type); +extern void update_memory_range(u64 start, u64 size, unsigned old_type, + unsigned new_type); extern void setup_memory_region(void); extern void contig_e820_setup(void); extern unsigned long e820_end_of_ram(void); diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index 1cb7c51bc296..a05b2896492f 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h @@ -52,13 +52,13 @@ extern int page_is_ram(unsigned long pagenr); struct page; -static void inline clear_user_page(void *page, unsigned long vaddr, +static inline void clear_user_page(void *page, unsigned long vaddr, struct page *pg) { clear_page(page); } -static void inline copy_user_page(void *to, void *from, unsigned long vaddr, +static inline void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage) { copy_page(to, from); diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h index 374690314539..da8266a08005 100644 --- a/include/asm-x86/pci_64.h +++ b/include/asm-x86/pci_64.h @@ -25,6 +25,7 @@ extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int l +extern void dma32_reserve_bootmem(void); extern void pci_iommu_alloc(void); /* The PCI address space does equal the physical memory diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h index cbce08a2d135..6b775c905666 100644 --- a/include/asm-x86/sync_bitops.h +++ b/include/asm-x86/sync_bitops.h @@ -23,10 +23,6 @@ * This function is atomic and may not be reordered. See __set_bit() * if you do not require the atomic guarantees. * - * Note: there are no guarantees that this function will not be reordered - * on non-x86 architectures, so if you are writing portable code, - * make sure not to rely on its reordering guarantees. - * * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ @@ -61,8 +57,7 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr) * @nr: Bit to change * @addr: Address to start counting from * - * change_bit() is atomic and may not be reordered. It may be - * reordered on other architectures than x86. + * sync_change_bit() is atomic and may not be reordered. * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ @@ -80,7 +75,6 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr) * @addr: Address to count from * * This operation is atomic and cannot be reordered. - * It may be reordered on other architectures than x86. * It also implies a memory barrier. */ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) @@ -99,7 +93,6 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) * @addr: Address to count from * * This operation is atomic and cannot be reordered. - * It can be reorderdered on other architectures other than x86. * It also implies a memory barrier. */ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) |