diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/mach-rpc/riscpc.c | 6 | ||||
-rw-r--r-- | arch/x86/configs/i386_defconfig | 6 | ||||
-rw-r--r-- | arch/x86/configs/x86_64_defconfig | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/iomap.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/pat.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess_32.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess_64.h | 9 | ||||
-rw-r--r-- | arch/x86/kernel/alternative.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/apic/summit_32.c | 57 | ||||
-rw-r--r-- | arch/x86/kernel/e820.c | 3 | ||||
-rw-r--r-- | arch/x86/mm/iomap_32.c | 58 | ||||
-rw-r--r-- | arch/x86/mm/pat.c | 46 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 3 |
13 files changed, 149 insertions, 64 deletions
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c index e88d417736af..c7fc01e9d1f6 100644 --- a/arch/arm/mach-rpc/riscpc.c +++ b/arch/arm/mach-rpc/riscpc.c @@ -19,6 +19,7 @@ #include <linux/serial_8250.h> #include <linux/ata_platform.h> #include <linux/io.h> +#include <linux/i2c.h> #include <asm/elf.h> #include <asm/mach-types.h> @@ -201,8 +202,13 @@ static struct platform_device *devs[] __initdata = { &pata_device, }; +static struct i2c_board_info i2c_rtc = { + I2C_BOARD_INFO("pcf8583", 0x50) +}; + static int __init rpc_init(void) { + i2c_register_board_info(0, &i2c_rtc, 1); return platform_add_devices(devs, ARRAY_SIZE(devs)); } diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 5c023f6f652c..235b81d0f6f2 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.29-rc4 -# Thu Feb 12 12:57:57 2009 +# Tue Feb 24 15:50:58 2009 # # CONFIG_64BIT is not set CONFIG_X86_32=y @@ -266,7 +266,9 @@ CONFIG_PREEMPT_VOLUNTARY=y CONFIG_X86_LOCAL_APIC=y CONFIG_X86_IO_APIC=y CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y -# CONFIG_X86_MCE is not set +CONFIG_X86_MCE=y +CONFIG_X86_MCE_NONFATAL=y +CONFIG_X86_MCE_P4THERMAL=y CONFIG_VM86=y # CONFIG_TOSHIBA is not set # CONFIG_I8K is not set diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 4157cc4a2bde..9fe5d212ab4c 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.29-rc4 -# Thu Feb 12 12:57:29 2009 +# Tue Feb 24 15:44:16 2009 # CONFIG_64BIT=y # CONFIG_X86_32 is not set @@ -266,7 +266,9 @@ CONFIG_PREEMPT_VOLUNTARY=y CONFIG_X86_LOCAL_APIC=y CONFIG_X86_IO_APIC=y CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y -# CONFIG_X86_MCE is not set +CONFIG_X86_MCE=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y # CONFIG_I8K is not set CONFIG_MICROCODE=y CONFIG_MICROCODE_INTEL=y diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h index c1f06289b14b..bd46495ff7de 100644 --- a/arch/x86/include/asm/iomap.h +++ b/arch/x86/include/asm/iomap.h @@ -23,6 +23,12 @@ #include <asm/pgtable.h> #include <asm/tlbflush.h> +int +reserve_io_memtype_wc(u64 base, unsigned long size, pgprot_t *prot); + +void +free_io_memtype(u64 base, unsigned long size); + void * iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h index 9709fdff6615..b0e70056838e 100644 --- a/arch/x86/include/asm/pat.h +++ b/arch/x86/include/asm/pat.h @@ -15,4 +15,7 @@ extern int reserve_memtype(u64 start, u64 end, unsigned long req_type, unsigned long *ret_type); extern int free_memtype(u64 start, u64 end); +extern int kernel_map_sync_memtype(u64 base, unsigned long size, + unsigned long flag); + #endif /* _ASM_X86_PAT_H */ diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 5e06259e90e5..a0ba61386972 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -157,7 +157,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) } static __always_inline unsigned long __copy_from_user_nocache(void *to, - const void __user *from, unsigned long n) + const void __user *from, unsigned long n, unsigned long total) { might_fault(); if (__builtin_constant_p(n)) { @@ -180,7 +180,7 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to, const void __user *from, - unsigned long n) + unsigned long n, unsigned long total) { return __copy_from_user_ll_nocache_nozero(to, from, n); } diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 987a2c10fe20..dcaa0404cf7b 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -189,7 +189,7 @@ extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest); static inline int __copy_from_user_nocache(void *dst, const void __user *src, - unsigned size) + unsigned size, unsigned long total) { might_sleep(); /* @@ -198,17 +198,16 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src, * non-temporal stores here. Smaller writes get handled * via regular __copy_from_user(): */ - if (likely(size >= PAGE_SIZE)) + if (likely(total >= PAGE_SIZE)) return __copy_user_nocache(dst, src, size, 1); else return __copy_from_user(dst, src, size); } static inline int __copy_from_user_inatomic_nocache(void *dst, - const void __user *src, - unsigned size) + const void __user *src, unsigned size, unsigned total) { - if (likely(size >= PAGE_SIZE)) + if (likely(total >= PAGE_SIZE)) return __copy_user_nocache(dst, src, size, 0); else return __copy_from_user_inatomic(dst, src, size); diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index a84ac7b570e6..6907b8e85d52 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -498,12 +498,12 @@ void *text_poke_early(void *addr, const void *opcode, size_t len) */ void *__kprobes text_poke(void *addr, const void *opcode, size_t len) { - unsigned long flags; char *vaddr; int nr_pages = 2; struct page *pages[2]; int i; + might_sleep(); if (!core_kernel_text((unsigned long)addr)) { pages[0] = vmalloc_to_page(addr); pages[1] = vmalloc_to_page(addr + PAGE_SIZE); @@ -517,9 +517,9 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len) nr_pages = 1; vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); BUG_ON(!vaddr); - local_irq_save(flags); + local_irq_disable(); memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); - local_irq_restore(flags); + local_irq_enable(); vunmap(vaddr); sync_core(); /* Could also do a CLFLUSH here to speed up CPU recovery; but diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index cfe7b09015d8..32838b57a945 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c @@ -48,7 +48,7 @@ #include <linux/gfp.h> #include <linux/smp.h> -static inline unsigned summit_get_apic_id(unsigned long x) +static unsigned summit_get_apic_id(unsigned long x) { return (x >> 24) & 0xFF; } @@ -58,7 +58,7 @@ static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector) default_send_IPI_mask_sequence_logical(mask, vector); } -static inline void summit_send_IPI_allbutself(int vector) +static void summit_send_IPI_allbutself(int vector) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); @@ -67,7 +67,7 @@ static inline void summit_send_IPI_allbutself(int vector) summit_send_IPI_mask(&mask, vector); } -static inline void summit_send_IPI_all(int vector) +static void summit_send_IPI_all(int vector) { summit_send_IPI_mask(&cpu_online_map, vector); } @@ -82,8 +82,8 @@ extern void setup_summit(void); #define setup_summit() {} #endif -static inline int -summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) +static int summit_mps_oem_check(struct mpc_table *mpc, char *oem, + char *productid) { if (!strncmp(oem, "IBM ENSW", 8) && (!strncmp(productid, "VIGIL SMP", 9) @@ -98,7 +98,7 @@ summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) } /* Hook from generic ACPI tables.c */ -static inline int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id) +static int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERVIGIL", 8) @@ -186,7 +186,7 @@ static inline int is_WPEG(struct rio_detail *rio){ #define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) -static inline const cpumask_t *summit_target_cpus(void) +static const cpumask_t *summit_target_cpus(void) { /* CPU_MASK_ALL (0xff) has undefined behaviour with * dest_LowestPrio mode logical clustered apic interrupt routing @@ -195,19 +195,18 @@ static inline const cpumask_t *summit_target_cpus(void) return &cpumask_of_cpu(0); } -static inline unsigned long -summit_check_apicid_used(physid_mask_t bitmap, int apicid) +static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid) { return 0; } /* we don't use the phys_cpu_present_map to indicate apicid presence */ -static inline unsigned long summit_check_apicid_present(int bit) +static unsigned long summit_check_apicid_present(int bit) { return 1; } -static inline void summit_init_apic_ldr(void) +static void summit_init_apic_ldr(void) { unsigned long val, id; int count = 0; @@ -234,18 +233,18 @@ static inline void summit_init_apic_ldr(void) apic_write(APIC_LDR, val); } -static inline int summit_apic_id_registered(void) +static int summit_apic_id_registered(void) { return 1; } -static inline void summit_setup_apic_routing(void) +static void summit_setup_apic_routing(void) { printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", nr_ioapics); } -static inline int summit_apicid_to_node(int logical_apicid) +static int summit_apicid_to_node(int logical_apicid) { #ifdef CONFIG_SMP return apicid_2_node[hard_smp_processor_id()]; @@ -266,7 +265,7 @@ static inline int summit_cpu_to_logical_apicid(int cpu) #endif } -static inline int summit_cpu_present_to_apicid(int mps_cpu) +static int summit_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < nr_cpu_ids) return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); @@ -274,28 +273,23 @@ static inline int summit_cpu_present_to_apicid(int mps_cpu) return BAD_APICID; } -static inline physid_mask_t -summit_ioapic_phys_id_map(physid_mask_t phys_id_map) +static physid_mask_t summit_ioapic_phys_id_map(physid_mask_t phys_id_map) { /* For clustered we don't have a good way to do this yet - hack */ return physids_promote(0x0F); } -static inline physid_mask_t summit_apicid_to_cpu_present(int apicid) +static physid_mask_t summit_apicid_to_cpu_present(int apicid) { return physid_mask_of_physid(0); } -static inline void summit_setup_portio_remap(void) -{ -} - -static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) +static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) { return 1; } -static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) +static unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) { int cpus_found = 0; int num_bits_set; @@ -303,12 +297,10 @@ static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) int cpu; num_bits_set = cpus_weight(*cpumask); - /* Return id to all */ if (num_bits_set >= nr_cpu_ids) - return 0xFF; + return BAD_APICID; /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of target_cpus(): + * The cpus in the mask must all be on the apic cluster. */ cpu = first_cpu(*cpumask); apicid = summit_cpu_to_logical_apicid(cpu); @@ -318,9 +310,9 @@ static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) int new_apicid = summit_cpu_to_logical_apicid(cpu); if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { - printk ("%s: Not a valid mask!\n", __func__); + printk("%s: Not a valid mask!\n", __func__); - return 0xFF; + return BAD_APICID; } apicid = apicid | new_apicid; cpus_found++; @@ -330,8 +322,7 @@ static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) return apicid; } -static inline unsigned int -summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, +static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, const struct cpumask *andmask) { int apicid = summit_cpu_to_logical_apicid(0); @@ -356,7 +347,7 @@ summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, * * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. */ -static inline int summit_phys_pkg_id(int cpuid_apic, int index_msb) +static int summit_phys_pkg_id(int cpuid_apic, int index_msb) { return hard_smp_processor_id() >> index_msb; } diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index e85826829cf2..508bec1cee27 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -858,6 +858,9 @@ void __init reserve_early_overlap_ok(u64 start, u64 end, char *name) */ void __init reserve_early(u64 start, u64 end, char *name) { + if (start >= end) + return; + drop_overlaps_that_are_ok(start, end); __reserve_early(start, end, name, 0); } diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index ca53224fc56c..d5e28424622c 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -20,6 +20,64 @@ #include <asm/pat.h> #include <linux/module.h> +#ifdef CONFIG_X86_PAE +static int +is_io_mapping_possible(resource_size_t base, unsigned long size) +{ + return 1; +} +#else +static int +is_io_mapping_possible(resource_size_t base, unsigned long size) +{ + /* There is no way to map greater than 1 << 32 address without PAE */ + if (base + size > 0x100000000ULL) + return 0; + + return 1; +} +#endif + +int +reserve_io_memtype_wc(u64 base, unsigned long size, pgprot_t *prot) +{ + unsigned long ret_flag; + + if (!is_io_mapping_possible(base, size)) + goto out_err; + + if (!pat_enabled) { + *prot = pgprot_noncached(PAGE_KERNEL); + return 0; + } + + if (reserve_memtype(base, base + size, _PAGE_CACHE_WC, &ret_flag)) + goto out_err; + + if (ret_flag == _PAGE_CACHE_WB) + goto out_free; + + if (kernel_map_sync_memtype(base, size, ret_flag)) + goto out_free; + + *prot = __pgprot(__PAGE_KERNEL | ret_flag); + return 0; + +out_free: + free_memtype(base, base + size); +out_err: + return -EINVAL; +} +EXPORT_SYMBOL_GPL(reserve_io_memtype_wc); + +void +free_io_memtype(u64 base, unsigned long size) +{ + if (pat_enabled) + free_memtype(base, base + size); +} +EXPORT_SYMBOL_GPL(free_io_memtype); + /* Map 'pfn' using fixed map 'type' and protections 'prot' */ void * diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 05f9aef6818a..fdfedb65d45a 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -634,6 +634,33 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) } /* + * Change the memory type for the physial address range in kernel identity + * mapping space if that range is a part of identity map. + */ +int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) +{ + unsigned long id_sz; + + if (!pat_enabled || base >= __pa(high_memory)) + return 0; + + id_sz = (__pa(high_memory) < base + size) ? + __pa(high_memory) - base : + size; + + if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) { + printk(KERN_INFO + "%s:%d ioremap_change_attr failed %s " + "for %Lx-%Lx\n", + current->comm, current->pid, + cattr_name(flags), + base, (unsigned long long)(base + size)); + return -EINVAL; + } + return 0; +} + +/* * Internal interface to reserve a range of physical memory with prot. * Reserved non RAM regions only and after successful reserve_memtype, * this func also keeps identity mapping (if any) in sync with this new prot. @@ -642,7 +669,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, int strict_prot) { int is_ram = 0; - int id_sz, ret; + int ret; unsigned long flags; unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); @@ -679,23 +706,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, flags); } - /* Need to keep identity mapping in sync */ - if (paddr >= __pa(high_memory)) - return 0; - - id_sz = (__pa(high_memory) < paddr + size) ? - __pa(high_memory) - paddr : - size; - - if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) { + if (kernel_map_sync_memtype(paddr, size, flags) < 0) { free_memtype(paddr, paddr + size); - printk(KERN_ERR - "%s:%d reserve_pfn_range ioremap_change_attr failed %s " - "for %Lx-%Lx\n", - current->comm, current->pid, - cattr_name(flags), - (unsigned long long)paddr, - (unsigned long long)(paddr + size)); return -EINVAL; } return 0; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 86497d5f44cd..c52f4034c7fd 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -940,6 +940,9 @@ asmlinkage void __init xen_start_kernel(void) possible map and a non-dummy shared_info. */ per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; + local_irq_disable(); + early_boot_irqs_off(); + xen_raw_console_write("mapping kernel into physical memory\n"); pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); |