summaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 21:34:57 +0200
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 21:34:57 +0200
commita989705c4cf6e6c1a339c95f9daf658b4ba88ca8 (patch)
treed1925b831ec9fbae65db1b193dbad1869c43a9bc /arch/ia64/mm
parentMerge branch 'server-cluster-locking-api' of git://linux-nfs.org/~bfields/linux (diff)
parentPull mem-attribute into release branch (diff)
downloadlinux-a989705c4cf6e6c1a339c95f9daf658b4ba88ca8.tar.xz
linux-a989705c4cf6e6c1a339c95f9daf658b4ba88ca8.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] update memory attribute aliasing documentation & test cases [IA64] fail mmaps that span areas with incompatible attributes [IA64] allow WB /sys/.../legacy_mem mmaps [IA64] make ioremap avoid unsupported attributes [IA64] rename ioremap variables to match i386 [IA64] relax per-cpu TLB requirement to DTC [IA64] remove per-cpu ia64_phys_stacked_size_p8 [IA64] Fix example error injection program [IA64] Itanium MC Error Injection Tool: pal_mc_error_inject() interface [IA64] Itanium MC Error Injection Tool: Makefile changes [IA64] Itanium MC Error Injection Tool: Driver sysfs interface [IA64] Itanium MC Error Injection Tool: Doc and sample application [IA64] Itanium MC Error Injection Tool: Kernel configuration
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r--arch/ia64/mm/init.c11
-rw-r--r--arch/ia64/mm/ioremap.c78
2 files changed, 65 insertions, 24 deletions
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 2da841110727..cffb1e8325e8 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -355,7 +355,7 @@ setup_gate (void)
void __devinit
ia64_mmu_init (void *my_cpu_data)
{
- unsigned long psr, pta, impl_va_bits;
+ unsigned long pta, impl_va_bits;
extern void __devinit tlb_init (void);
#ifdef CONFIG_DISABLE_VHPT
@@ -364,15 +364,6 @@ ia64_mmu_init (void *my_cpu_data)
# define VHPT_ENABLE_BIT 1
#endif
- /* Pin mapping for percpu area into TLB */
- psr = ia64_clear_ic();
- ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
- pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
- PERCPU_PAGE_SHIFT);
-
- ia64_set_psr(psr);
- ia64_srlz_i();
-
/*
* Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
* address space. The IA-64 architecture guarantees that at least 50 bits of
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 4280c074d64e..2a140627dfd6 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -1,5 +1,5 @@
/*
- * (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
+ * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -10,51 +10,101 @@
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/efi.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
#include <asm/io.h>
#include <asm/meminit.h>
static inline void __iomem *
-__ioremap (unsigned long offset, unsigned long size)
+__ioremap (unsigned long phys_addr)
{
- return (void __iomem *) (__IA64_UNCACHED_OFFSET | offset);
+ return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
}
void __iomem *
-ioremap (unsigned long offset, unsigned long size)
+ioremap (unsigned long phys_addr, unsigned long size)
{
+ void __iomem *addr;
+ struct vm_struct *area;
+ unsigned long offset;
+ pgprot_t prot;
u64 attr;
unsigned long gran_base, gran_size;
+ unsigned long page_base;
/*
* For things in kern_memmap, we must use the same attribute
* as the rest of the kernel. For more details, see
* Documentation/ia64/aliasing.txt.
*/
- attr = kern_mem_attribute(offset, size);
+ attr = kern_mem_attribute(phys_addr, size);
if (attr & EFI_MEMORY_WB)
- return (void __iomem *) phys_to_virt(offset);
+ return (void __iomem *) phys_to_virt(phys_addr);
else if (attr & EFI_MEMORY_UC)
- return __ioremap(offset, size);
+ return __ioremap(phys_addr);
/*
* Some chipsets don't support UC access to memory. If
* WB is supported for the whole granule, we prefer that.
*/
- gran_base = GRANULEROUNDDOWN(offset);
- gran_size = GRANULEROUNDUP(offset + size) - gran_base;
+ gran_base = GRANULEROUNDDOWN(phys_addr);
+ gran_size = GRANULEROUNDUP(phys_addr + size) - gran_base;
if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
- return (void __iomem *) phys_to_virt(offset);
+ return (void __iomem *) phys_to_virt(phys_addr);
- return __ioremap(offset, size);
+ /*
+ * WB is not supported for the whole granule, so we can't use
+ * the region 7 identity mapping. If we can safely cover the
+ * area with kernel page table mappings, we can use those
+ * instead.
+ */
+ page_base = phys_addr & PAGE_MASK;
+ size = PAGE_ALIGN(phys_addr + size) - page_base;
+ if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) {
+ prot = PAGE_KERNEL;
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+
+ /*
+ * Ok, go for it..
+ */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+
+ area->phys_addr = phys_addr;
+ addr = (void __iomem *) area->addr;
+ if (ioremap_page_range((unsigned long) addr,
+ (unsigned long) addr + size, phys_addr, prot)) {
+ vunmap((void __force *) addr);
+ return NULL;
+ }
+
+ return (void __iomem *) (offset + (char __iomem *)addr);
+ }
+
+ return __ioremap(phys_addr);
}
EXPORT_SYMBOL(ioremap);
void __iomem *
-ioremap_nocache (unsigned long offset, unsigned long size)
+ioremap_nocache (unsigned long phys_addr, unsigned long size)
{
- if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB)
+ if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
return NULL;
- return __ioremap(offset, size);
+ return __ioremap(phys_addr);
}
EXPORT_SYMBOL(ioremap_nocache);
+
+void
+iounmap (volatile void __iomem *addr)
+{
+ if (REGION_NUMBER(addr) == RGN_GATE)
+ vunmap((void *) ((unsigned long) addr & PAGE_MASK));
+}
+EXPORT_SYMBOL(iounmap);