diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2007-04-21 11:16:48 +0200 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2007-04-21 21:35:52 +0200 |
commit | 4a56c1e41f19393577bdd5c774c289c199b7269d (patch) | |
tree | a2c36198e3b4d78ccc8e373c3748112bc0645b0e /arch/arm/mm | |
parent | [ARM] mm 2: clean up create_mapping() (diff) | |
download | linux-4a56c1e41f19393577bdd5c774c289c199b7269d.tar.xz linux-4a56c1e41f19393577bdd5c774c289c199b7269d.zip |
[ARM] mm 3: separate out supersection mappings, avoid for <4GB
Catalin Marinas at ARM Ltd says:
> The CPU architects in ARM intended supersections only as a way to map
> addresses >= 4GB. Supersections are not mandated by the architecture
> and there is no easy way to detect their hardware support at run-time
> (other than checking for a specific core). From the analysis done in
> ARM, there wasn't a clear performance gain by using supersections
> rather than sections (no significant improvement in the TLB misses).
Therefore, we should avoid using supersections unless there's a real
need (iow, we're mapping addresses >= 4GB).
This means that we can simplify create_mapping() a bit since we will
only use supersection mappings for addresses >= 4GB, which means that
the physical, virtual and length must be multiples of the supersection
mapping size.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/ioremap.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 130 |
2 files changed, 62 insertions, 70 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 0ac615c0f798..800855b2dc83 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -302,7 +302,7 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, #ifndef CONFIG_SMP if (DOMAIN_IO == 0 && (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || - cpu_is_xsc3()) && + cpu_is_xsc3()) && pfn >= 0x100000 && !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { area->flags |= VM_ARM_SECTION_MAPPING; err = remap_area_supersections(addr, pfn, size, flags); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e359f3685433..32139800d939 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -398,21 +398,6 @@ alloc_init_section(unsigned long virt, unsigned long phys, int prot) } /* - * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT - */ -static inline void -alloc_init_supersection(unsigned long virt, unsigned long phys, int prot) -{ - int i; - - for (i = 0; i < 16; i += 1) { - alloc_init_section(virt, phys, prot | PMD_SECT_SUPER); - - virt += (PGDIR_SIZE / 2); - } -} - -/* * Add a PAGE mapping between VIRT and PHYS in domain * DOMAIN with protection PROT. Note that due to the * way we map the PTEs, we must allocate two PTE_SIZE'd @@ -436,6 +421,64 @@ alloc_init_page(unsigned long virt, unsigned long phys, const struct mem_type *t set_pte_ext(ptep, pfn_pte(phys >> PAGE_SHIFT, __pgprot(type->prot_pte)), 0); } +static void __init create_36bit_mapping(struct map_desc *md, + const struct mem_type *type) +{ + unsigned long phys, addr, length, end; + pgd_t *pgd; + + addr = md->virtual; + phys = (unsigned long)__pfn_to_phys(md->pfn); + length = PAGE_ALIGN(md->length); + + if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { + printk(KERN_ERR "MM: CPU does not support supersection " + "mapping for 0x%08llx at 0x%08lx\n", + __pfn_to_phys((u64)md->pfn), addr); + return; + } + + /* N.B. ARMv6 supersections are only defined to work with domain 0. + * Since domain assignments can in fact be arbitrary, the + * 'domain == 0' check below is required to insure that ARMv6 + * supersections are only allocated for domain 0 regardless + * of the actual domain assignments in use. + */ + if (type->domain) { + printk(KERN_ERR "MM: invalid domain in supersection " + "mapping for 0x%08llx at 0x%08lx\n", + __pfn_to_phys((u64)md->pfn), addr); + return; + } + + if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { + printk(KERN_ERR "MM: cannot create mapping for " + "0x%08llx at 0x%08lx invalid alignment\n", + __pfn_to_phys((u64)md->pfn), addr); + return; + } + + /* + * Shift bits [35:32] of address into bits [23:20] of PMD + * (See ARMv6 spec). + */ + phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); + + pgd = pgd_offset_k(addr); + end = addr + length; + do { + pmd_t *pmd = pmd_offset(pgd, addr); + int i; + + for (i = 0; i < 16; i++) + *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER); + + addr += SUPERSECTION_SIZE; + phys += SUPERSECTION_SIZE; + pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; + } while (addr != end); +} + /* * Create the page directory entries and any necessary * page tables for the mapping specified by `md'. We @@ -468,26 +511,9 @@ void __init create_mapping(struct map_desc *md) /* * Catch 36-bit addresses */ - if(md->pfn >= 0x100000) { - if (type->domain) { - printk(KERN_ERR "MM: invalid domain in supersection " - "mapping for 0x%08llx at 0x%08lx\n", - __pfn_to_phys((u64)md->pfn), md->virtual); - return; - } - if((md->virtual | md->length | __pfn_to_phys(md->pfn)) - & ~SUPERSECTION_MASK) { - printk(KERN_ERR "MM: cannot create mapping for " - "0x%08llx at 0x%08lx invalid alignment\n", - __pfn_to_phys((u64)md->pfn), md->virtual); - return; - } - - /* - * Shift bits [35:32] of address into bits [23:20] of PMD - * (See ARMv6 spec). - */ - off |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); + if (md->pfn >= 0x100000) { + create_36bit_mapping(md, type); + return; } virt = md->virtual; @@ -509,40 +535,6 @@ void __init create_mapping(struct map_desc *md) length -= PAGE_SIZE; } - /* N.B. ARMv6 supersections are only defined to work with domain 0. - * Since domain assignments can in fact be arbitrary, the - * 'domain == 0' check below is required to insure that ARMv6 - * supersections are only allocated for domain 0 regardless - * of the actual domain assignments in use. - */ - if ((cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3()) - && type->domain == 0) { - /* - * Align to supersection boundary if !high pages. - * High pages have already been checked for proper - * alignment above and they will fail the SUPSERSECTION_MASK - * check because of the way the address is encoded into - * offset. - */ - if (md->pfn <= 0x100000) { - while ((virt & ~SUPERSECTION_MASK || - (virt + off) & ~SUPERSECTION_MASK) && - length >= (PGDIR_SIZE / 2)) { - alloc_init_section(virt, virt + off, type->prot_sect); - - virt += (PGDIR_SIZE / 2); - length -= (PGDIR_SIZE / 2); - } - } - - while (length >= SUPERSECTION_SIZE) { - alloc_init_supersection(virt, virt + off, type->prot_sect); - - virt += SUPERSECTION_SIZE; - length -= SUPERSECTION_SIZE; - } - } - /* * A section mapping covers half a "pgdir" entry. */ |