From 5d9b4b19f118abfb75e352841f7bf74580d7e427 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 13 Dec 2009 14:38:50 +0000 Subject: sh: Definitions for 3-level page table layout If using 64-bit PTEs and 4K pages then each page table has 512 entries (as opposed to 1024 entries with 32-bit PTEs). Unlike MIPS, SH follows the convention that all structures in the page table (pgd_t, pmd_t, pgprot_t, etc) must be the same size. Therefore, 64-bit PTEs require 64-bit PGD entries, etc. Using 2-levels of page tables and 64-bit PTEs it is only possible to map 1GB of virtual address space. In order to map all 4GB of virtual address space we need to adopt a 3-level page table layout. This actually works out better for CONFIG_SUPERH32 because we only waste 2 PGD entries on the P1 and P2 areas (which are untranslated) instead of 256. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/mm/Kconfig | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) (limited to 'arch/sh/mm/Kconfig') diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 0e7ba8e891cf..b3f6c1a30b22 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -189,6 +189,24 @@ config ARCH_MEMORY_PROBE def_bool y depends on MEMORY_HOTPLUG +choice + prompt "Page table layout" + default PGTABLE_LEVELS_3 if X2TLB + default PGTABLE_LEVELS_2 + +config PGTABLE_LEVELS_2 + bool "2 Levels" + help + This is the default page table layout for all SuperH CPUs. + +config PGTABLE_LEVELS_3 + bool "3 Levels" + depends on X2TLB + help + This enables a 3 level page table structure. + +endchoice + choice prompt "Kernel page size" default PAGE_SIZE_8KB if X2TLB @@ -196,13 +214,13 @@ choice config PAGE_SIZE_4KB bool "4kB" - depends on !MMU || !X2TLB + depends on !MMU || !X2TLB || PGTABLE_LEVELS_3 help This is the default page size used by all SuperH CPUs. config PAGE_SIZE_8KB bool "8kB" - depends on !MMU || X2TLB + depends on !MMU || X2TLB && !PGTABLE_LEVELS_3 help This enables 8kB pages as supported by SH-X2 and later MMUs. @@ -214,7 +232,7 @@ config PAGE_SIZE_16KB config PAGE_SIZE_64KB bool "64kB" - depends on !MMU || CPU_SH4 || CPU_SH5 + depends on !MMU || CPU_SH4 && !PGTABLE_LEVELS_3 || CPU_SH5 help This enables support for 64kB pages, possible on all SH-4 CPUs and later. -- cgit v1.2.3 From 3f5ab768164df9a44721660b96e0accb92eb2c24 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Thu, 24 Dec 2009 20:38:45 +0000 Subject: sh: Correct the PTRS_PER_PMD and PMD_SHIFT values The previous expressions were wrong which made free_pmd_range() explode when using anything other than 4KB pages (which is why 8KB and 64KB pages were disabled with the 3-level page table layout). The problem was that pmd_offset() was returning an index of non-zero when it should have been returning 0. This non-zero offset was used to calculate the address of the pmd table to free in free_pmd_range(), which ended up trying to free an object that was not aligned on a page boundary. Now 3-level page tables should work with 4KB, 8KB and 64KB pages. Signed-off-by: Matt Fleming --- arch/sh/include/asm/pgtable_pmd.h | 4 ++-- arch/sh/mm/Kconfig | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/sh/mm/Kconfig') diff --git a/arch/sh/include/asm/pgtable_pmd.h b/arch/sh/include/asm/pgtable_pmd.h index 78dc36e1c2dd..42a180e534a8 100644 --- a/arch/sh/include/asm/pgtable_pmd.h +++ b/arch/sh/include/asm/pgtable_pmd.h @@ -17,11 +17,11 @@ #define USER_PTRS_PER_PGD 2 /* PMD bits */ -#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTE_MAGNITUDE)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) -#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t)) +#define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE) #define pmd_ERROR(e) \ printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index b3f6c1a30b22..1b4364871899 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -220,7 +220,7 @@ config PAGE_SIZE_4KB config PAGE_SIZE_8KB bool "8kB" - depends on !MMU || X2TLB && !PGTABLE_LEVELS_3 + depends on !MMU || X2TLB help This enables 8kB pages as supported by SH-X2 and later MMUs. @@ -232,7 +232,7 @@ config PAGE_SIZE_16KB config PAGE_SIZE_64KB bool "64kB" - depends on !MMU || CPU_SH4 && !PGTABLE_LEVELS_3 || CPU_SH5 + depends on !MMU || CPU_SH4 || CPU_SH5 help This enables support for 64kB pages, possible on all SH-4 CPUs and later. -- cgit v1.2.3 From a0ab36689a36e583b6e736f1c99ac8c9aebdad59 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 13 Jan 2010 18:31:48 +0900 Subject: sh: fixed PMB mode refactoring. This introduces some much overdue chainsawing of the fixed PMB support. fixed PMB was introduced initially to work around the fact that dynamic PMB mode was relatively broken, though they were never intended to converge. The main areas where there are differences are whether the system is booted in 29-bit mode or 32-bit mode, and whether legacy mappings are to be preserved. Any system booting in true 32-bit mode will not care about legacy mappings, so these are roughly decoupled. Regardless of the entry point, PMB and 32BIT are directly related as far as the kernel is concerned, so we also switch back to having one select the other. With legacy mappings iterated through and applied in the initialization path it's now possible to finally merge the two implementations and permit dynamic remapping overtop of remaining entries regardless of whether boot mappings are crafted by hand or inherited from the boot loader. Signed-off-by: Paul Mundt --- arch/sh/boot/Makefile | 9 +--- arch/sh/include/asm/addrspace.h | 4 +- arch/sh/include/asm/io.h | 27 ++++++---- arch/sh/include/asm/mmu.h | 18 +++++++ arch/sh/include/asm/page.h | 2 +- arch/sh/kernel/head_32.S | 4 +- arch/sh/kernel/setup.c | 3 -- arch/sh/kernel/vmlinux.lds.S | 15 +++--- arch/sh/mm/Kconfig | 24 ++------- arch/sh/mm/Makefile | 2 +- arch/sh/mm/pmb.c | 106 +++++++++++++++++++++++----------------- 11 files changed, 117 insertions(+), 97 deletions(-) (limited to 'arch/sh/mm/Kconfig') diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile index 743ce0c8d98d..1ce63624c9b9 100644 --- a/arch/sh/boot/Makefile +++ b/arch/sh/boot/Makefile @@ -46,15 +46,8 @@ $(obj)/romImage: $(obj)/romimage/vmlinux FORCE $(obj)/romimage/vmlinux: $(obj)/zImage FORCE $(Q)$(MAKE) $(build)=$(obj)/romimage $@ -KERNEL_MEMORY := 0x00000000 -ifeq ($(CONFIG_PMB_FIXED),y) -KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \ +KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \ $$[$(CONFIG_MEMORY_START) & 0x1fffffff]') -endif -ifeq ($(CONFIG_29BIT),y) -KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \ - $$[$(CONFIG_MEMORY_START)]') -endif KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \ $$[$(CONFIG_PAGE_OFFSET) + \ diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h index 99d6b3ecbe22..bcd7d4d78f6b 100644 --- a/arch/sh/include/asm/addrspace.h +++ b/arch/sh/include/asm/addrspace.h @@ -28,7 +28,7 @@ /* Returns the privileged segment base of a given address */ #define PXSEG(a) (((unsigned long)(a)) & 0xe0000000) -#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED) +#ifdef CONFIG_29BIT /* * Map an address to a certain privileged segment */ @@ -40,7 +40,7 @@ ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG)) #define P4SEGADDR(a) \ ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG)) -#endif /* 29BIT || PMB_FIXED */ +#endif /* 29BIT */ #endif /* P1SEG */ /* Check if an address can be reached in 29 bits */ diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 026dd659a640..f4314d8b05b8 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -244,18 +244,11 @@ __ioremap(unsigned long offset, unsigned long size, unsigned long flags) } static inline void __iomem * -__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) +__ioremap_29bit(unsigned long offset, unsigned long size, unsigned long flags) { -#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) +#ifdef CONFIG_29BIT unsigned long last_addr = offset + size - 1; -#endif - void __iomem *ret; - ret = __ioremap_trapped(offset, size); - if (ret) - return ret; - -#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) /* * For P1 and P2 space this is trivial, as everything is already * mapped. Uncached access for P1 addresses are done through P2. @@ -274,6 +267,22 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) return (void __iomem *)P4SEGADDR(offset); #endif + return NULL; +} + +static inline void __iomem * +__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) +{ + void __iomem *ret; + + ret = __ioremap_trapped(offset, size); + if (ret) + return ret; + + ret = __ioremap_29bit(offset, size, flags); + if (ret) + return ret; + return __ioremap(offset, size, flags); } #else diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index c7426ad9926e..4b0882bf5183 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -65,11 +65,29 @@ struct pmb_entry { struct pmb_entry *link; }; +#ifdef CONFIG_PMB /* arch/sh/mm/pmb.c */ long pmb_remap(unsigned long virt, unsigned long phys, unsigned long size, unsigned long flags); void pmb_unmap(unsigned long addr); int pmb_init(void); +#else +static inline long pmb_remap(unsigned long virt, unsigned long phys, + unsigned long size, unsigned long flags) +{ + return -EINVAL +} + +static inline void pmb_unmap(unsigned long addr) +{ +} + +static inline int pmb_init(void) +{ + return -ENODEV; +} +#endif /* CONFIG_PMB */ + #endif /* __ASSEMBLY__ */ #endif /* __MMU_H */ diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 81bffc0d6860..a86c0f1d05d4 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -127,7 +127,7 @@ typedef struct page *pgtable_t; * is not visible (it is part of the PMB mapping) and so needs to be * added or subtracted as required. */ -#if defined(CONFIG_PMB_FIXED) +#if defined(CONFIG_PMB_LEGACY) /* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */ #define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START)) #define __pa(x) ((unsigned long)(x) - PMB_OFFSET) diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S index 1151ecdffa71..e5d421db4c83 100644 --- a/arch/sh/kernel/head_32.S +++ b/arch/sh/kernel/head_32.S @@ -13,6 +13,8 @@ #include #include #include +#include +#include #ifdef CONFIG_CPU_SH4A #define SYNCO() synco @@ -33,7 +35,7 @@ ENTRY(empty_zero_page) .long 1 /* LOADER_TYPE */ .long 0x00000000 /* INITRD_START */ .long 0x00000000 /* INITRD_SIZE */ -#if defined(CONFIG_32BIT) && defined(CONFIG_PMB_FIXED) +#ifdef CONFIG_32BIT .long 0x53453f00 + 32 /* "SE?" = 32 bit */ #else .long 0x53453f00 + 29 /* "SE?" = 29 bit */ diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 8b0e69792cf4..f79ebe32a24a 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -455,10 +455,7 @@ void __init setup_arch(char **cmdline_p) sh_mv.mv_setup(cmdline_p); paging_init(); - -#ifdef CONFIG_PMB_ENABLE pmb_init(); -#endif #ifdef CONFIG_SMP plat_smp_setup(); diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index a1e4ec24f1f5..9e5a5878eeae 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -14,17 +14,16 @@ OUTPUT_ARCH(sh) #include #include +#if defined(CONFIG_32BIT) && !defined(CONFIG_PMB_LEGACY) +#define MEMORY_OFFSET 0 +#else +#define MEMORY_OFFSET (CONFIG_MEMORY_START & 0x1fffffff) +#endif + ENTRY(_start) SECTIONS { -#ifdef CONFIG_PMB_FIXED - . = CONFIG_PAGE_OFFSET + (CONFIG_MEMORY_START & 0x1fffffff) + - CONFIG_ZERO_PAGE_OFFSET; -#elif defined(CONFIG_32BIT) - . = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET; -#else - . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET; -#endif + . = CONFIG_PAGE_OFFSET + MEMORY_OFFSET + CONFIG_ZERO_PAGE_OFFSET; _text = .; /* Text and read-only data */ diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 358c860aeb9b..860cd24b4205 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -80,30 +80,18 @@ config 32BIT bool default y if CPU_SH5 -config PMB_ENABLE - bool "Support 32-bit physical addressing through PMB" - depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP - help - If you say Y here, physical addressing will be extended to - 32-bits through the SH-4A PMB. If this is not set, legacy - 29-bit physical addressing will be used. - -choice - prompt "PMB handling type" - depends on PMB_ENABLE - default PMB_FIXED - config PMB - bool "PMB" + bool "Support 32-bit physical addressing through PMB" depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP + select 32BIT help If you say Y here, physical addressing will be extended to 32-bits through the SH-4A PMB. If this is not set, legacy 29-bit physical addressing will be used. -config PMB_FIXED - bool "fixed PMB" - depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP +config PMB_LEGACY + bool "Support legacy boot mappings for PMB" + depends on PMB select 32BIT help If this option is enabled, fixed PMB mappings are inherited @@ -111,8 +99,6 @@ config PMB_FIXED management. This is the closest to legacy 29-bit physical mode, and allows systems to support up to 512MiB of system memory. -endchoice - config X2TLB bool "Enable extended TLB mode" depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 9fa11d655044..edde8bdd681d 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -33,7 +33,7 @@ obj-y += $(tlb-y) endif obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_PMB_ENABLE) += pmb.o +obj-$(CONFIG_PMB) += pmb.o obj-$(CONFIG_NUMA) += numa.o # Special flags for fault_64.o. This puts restrictions on the number of diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 280f6a166035..8f7dbf183fb0 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -3,7 +3,7 @@ * * Privileged Space Mapping Buffer (PMB) Support. * - * Copyright (C) 2005, 2006, 2007 Paul Mundt + * Copyright (C) 2005 - 2010 Paul Mundt * * P1/P2 Section mapping definitions from map32.h, which was: * @@ -279,51 +279,12 @@ static void __pmb_unmap(struct pmb_entry *pmbe) } while (pmbe); } -#ifdef CONFIG_PMB -int __uses_jump_to_uncached pmb_init(void) -{ - unsigned int i; - long size, ret; - - jump_to_uncached(); - - /* - * Insert PMB entries for the P1 and P2 areas so that, after - * we've switched the MMU to 32-bit mode, the semantics of P1 - * and P2 are the same as in 29-bit mode, e.g. - * - * P1 - provides a cached window onto physical memory - * P2 - provides an uncached window onto physical memory - */ - size = __MEMORY_START + __MEMORY_SIZE; - - ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C); - BUG_ON(ret != size); - - ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB); - BUG_ON(ret != size); - - ctrl_outl(0, PMB_IRMCR); - - /* PMB.SE and UB[7] */ - ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); - - /* Flush out the TLB */ - i = ctrl_inl(MMUCR); - i |= MMUCR_TI; - ctrl_outl(i, MMUCR); - - back_to_cached(); - - return 0; -} -#else -int __uses_jump_to_uncached pmb_init(void) +#ifdef CONFIG_PMB_LEGACY +static int pmb_apply_legacy_mappings(void) { int i; unsigned long addr, data; - - jump_to_uncached(); + unsigned int applied = 0; for (i = 0; i < PMB_ENTRY_MAX; i++) { struct pmb_entry *pmbe; @@ -357,13 +318,69 @@ int __uses_jump_to_uncached pmb_init(void) pmbe = pmb_alloc(vpn, ppn, flags, i); WARN_ON(IS_ERR(pmbe)); + + applied++; + } + + return (applied == 0); +} +#else +static inline int pmb_apply_legacy_mappings(void) +{ + return 1; +} +#endif + +int __uses_jump_to_uncached pmb_init(void) +{ + unsigned int i; + unsigned long size, ret; + + jump_to_uncached(); + + /* + * Attempt to apply the legacy boot mappings if configured. If + * this is successful then we simply carry on with those and + * don't bother establishing additional memory mappings. Dynamic + * device mappings through pmb_remap() can still be bolted on + * after this. + */ + ret = pmb_apply_legacy_mappings(); + if (ret == 0) { + back_to_cached(); + return 0; } + /* + * Insert PMB entries for the P1 and P2 areas so that, after + * we've switched the MMU to 32-bit mode, the semantics of P1 + * and P2 are the same as in 29-bit mode, e.g. + * + * P1 - provides a cached window onto physical memory + * P2 - provides an uncached window onto physical memory + */ + size = (unsigned long)__MEMORY_START + __MEMORY_SIZE; + + ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C); + BUG_ON(ret != size); + + ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB); + BUG_ON(ret != size); + + ctrl_outl(0, PMB_IRMCR); + + /* PMB.SE and UB[7] */ + ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); + + /* Flush out the TLB */ + i = ctrl_inl(MMUCR); + i |= MMUCR_TI; + ctrl_outl(i, MMUCR); + back_to_cached(); return 0; } -#endif /* CONFIG_PMB */ static int pmb_seq_show(struct seq_file *file, void *iter) { @@ -462,6 +479,5 @@ static int __init pmb_sysdev_init(void) { return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver); } - subsys_initcall(pmb_sysdev_init); #endif -- cgit v1.2.3 From 782bb5a532f883540bf403afb19f735a4eefd95b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 13 Jan 2010 19:11:14 +0900 Subject: sh: default to extended TLB support. All SH-X2 and SH-X3 parts support an extended TLB mode, which has been left as experimental since support was originally merged. Now that it's had some time to stabilize and get some exposure to various platforms, we can drop it as an option and default enable it across the board. This is also good future proofing for newer parts that will drop support for the legacy TLB mode completely. This will also force 3-level page tables for all newer parts, which is necessary both for the varying page sizes and larger memories. Signed-off-by: Paul Mundt --- arch/sh/include/asm/pgalloc.h | 2 +- arch/sh/include/asm/pgtable.h | 2 +- arch/sh/include/asm/pgtable_nopmd.h | 11 ++++++----- arch/sh/include/asm/pgtable_pmd.h | 5 +++-- arch/sh/mm/Kconfig | 29 ++--------------------------- arch/sh/mm/pgtable.c | 9 ++++----- 6 files changed, 17 insertions(+), 41 deletions(-) (limited to 'arch/sh/mm/Kconfig') diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index f8982f4e0405..8c00785c60d5 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -9,7 +9,7 @@ extern pgd_t *pgd_alloc(struct mm_struct *); extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); -#ifdef CONFIG_PGTABLE_LEVELS_3 +#if PAGETABLE_LEVELS > 2 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address); extern void pmd_free(struct mm_struct *mm, pmd_t *pmd); diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 78598ec33d0a..856ece07d31b 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -12,7 +12,7 @@ #ifndef __ASM_SH_PGTABLE_H #define __ASM_SH_PGTABLE_H -#ifdef CONFIG_PGTABLE_LEVELS_3 +#ifdef CONFIG_X2TLB #include #else #include diff --git a/arch/sh/include/asm/pgtable_nopmd.h b/arch/sh/include/asm/pgtable_nopmd.h index f0b525b3cb4a..b8355e4057cf 100644 --- a/arch/sh/include/asm/pgtable_nopmd.h +++ b/arch/sh/include/asm/pgtable_nopmd.h @@ -6,17 +6,18 @@ /* * traditional two-level paging structure */ +#define PAGETABLE_LEVELS 2 /* PTE bits */ -#define PTE_MAGNITUDE 2 /* 32-bit PTEs */ +#define PTE_MAGNITUDE 2 /* 32-bit PTEs */ -#define PTE_SHIFT PAGE_SHIFT -#define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE) +#define PTE_SHIFT PAGE_SHIFT +#define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE) /* PGD bits */ -#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) +#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) -#define PTRS_PER_PGD (PAGE_SIZE / (1 << PTE_MAGNITUDE)) +#define PTRS_PER_PGD (PAGE_SIZE / (1 << PTE_MAGNITUDE)) #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) #endif /* __ASM_SH_PGTABLE_NOPMD_H */ diff --git a/arch/sh/include/asm/pgtable_pmd.h b/arch/sh/include/asm/pgtable_pmd.h index 42a180e534a8..587b05e1d04f 100644 --- a/arch/sh/include/asm/pgtable_pmd.h +++ b/arch/sh/include/asm/pgtable_pmd.h @@ -7,11 +7,12 @@ * Some cores need a 3-level page table layout, for example when using * 64-bit PTEs and 4K pages. */ +#define PAGETABLE_LEVELS 3 -#define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */ +#define PTE_MAGNITUDE 3 /* 64-bit PTEs on SH-X2 TLB */ /* PGD bits */ -#define PGDIR_SHIFT 30 +#define PGDIR_SHIFT 30 #define PTRS_PER_PGD 4 #define USER_PTRS_PER_PGD 2 diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 860cd24b4205..7a4ebc8cbadd 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -100,13 +100,8 @@ config PMB_LEGACY and allows systems to support up to 512MiB of system memory. config X2TLB - bool "Enable extended TLB mode" - depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL - help - Selecting this option will enable the extended mode of the SH-X2 - TLB. For legacy SH-X behaviour and interoperability, say N. For - all of the fun new features and a willingless to submit bug reports, - say Y. + def_bool y + depends on (CPU_SHX2 || CPU_SHX3) && MMU config VSYSCALL bool "Support vsyscall page" @@ -174,32 +169,12 @@ config ARCH_MEMORY_PROBE def_bool y depends on MEMORY_HOTPLUG -choice - prompt "Page table layout" - default PGTABLE_LEVELS_3 if X2TLB - default PGTABLE_LEVELS_2 - -config PGTABLE_LEVELS_2 - bool "2 Levels" - help - This is the default page table layout for all SuperH CPUs. - -config PGTABLE_LEVELS_3 - bool "3 Levels" - depends on X2TLB - help - This enables a 3 level page table structure. - -endchoice - choice prompt "Kernel page size" - default PAGE_SIZE_8KB if X2TLB default PAGE_SIZE_4KB config PAGE_SIZE_4KB bool "4kB" - depends on !MMU || !X2TLB || PGTABLE_LEVELS_3 help This is the default page size used by all SuperH CPUs. diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c index e1bc5483cc07..6f21fb1d8726 100644 --- a/arch/sh/mm/pgtable.c +++ b/arch/sh/mm/pgtable.c @@ -3,8 +3,7 @@ #define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO static struct kmem_cache *pgd_cachep; - -#ifdef CONFIG_PGTABLE_LEVELS_3 +#if PAGETABLE_LEVELS > 2 static struct kmem_cache *pmd_cachep; #endif @@ -22,7 +21,7 @@ void pgtable_cache_init(void) pgd_cachep = kmem_cache_create("pgd_cache", PTRS_PER_PGD * (1< 2 pmd_cachep = kmem_cache_create("pmd_cache", PTRS_PER_PMD * (1< 2 void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) { set_pud(pud, __pud((unsigned long)pmd)); @@ -54,4 +53,4 @@ void pmd_free(struct mm_struct *mm, pmd_t *pmd) { kmem_cache_free(pmd_cachep, pmd); } -#endif /* CONFIG_PGTABLE_LEVELS_3 */ +#endif /* PAGETABLE_LEVELS > 2 */ -- cgit v1.2.3 From 4d35b93a66e9b87df20784fcf130d2e8760be53f Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Thu, 5 Nov 2009 07:54:17 +0000 Subject: sh: Add fixed ioremap support Some devices need to be ioremap'd and accessed very early in the boot process. It is not possible to use the standard ioremap() function in this case because that requires kmalloc()'ing some virtual address space and kmalloc() may not be available so early in boot. This patch provides fixmap mappings that allow physical address ranges to be remapped into the kernel address space during the early boot stages. Signed-off-by: Matt Fleming --- arch/sh/include/asm/fixmap.h | 8 +++ arch/sh/include/asm/io.h | 6 ++ arch/sh/kernel/setup.c | 7 ++- arch/sh/mm/Kconfig | 4 ++ arch/sh/mm/Makefile | 1 + arch/sh/mm/ioremap_fixed.c | 144 +++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 167 insertions(+), 3 deletions(-) create mode 100644 arch/sh/mm/ioremap_fixed.c (limited to 'arch/sh/mm/Kconfig') diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h index 1566d3361ca4..38a1de866873 100644 --- a/arch/sh/include/asm/fixmap.h +++ b/arch/sh/include/asm/fixmap.h @@ -60,6 +60,14 @@ enum fixed_addresses { FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, #endif + /* + * FIX_IOREMAP entries are useful for mapping physical address + * space before ioremap() is useable, e.g. really early in boot + * before kmalloc() is working. + */ +#define FIX_N_IOREMAPS 32 + FIX_IOREMAP_BEGIN, + FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS, __end_of_fixed_addresses }; diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index f4314d8b05b8..bee5965e0a82 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -237,6 +237,12 @@ void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, unsigned long flags, void *caller); void __iounmap(void __iomem *addr); +#ifdef CONFIG_IOREMAP_FIXED +extern void __iomem *ioremap_fixed(resource_size_t, unsigned long, pgprot_t); +extern void iounmap_fixed(void __iomem *); +extern void ioremap_fixed_init(void); +#endif + static inline void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags) { diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index f79ebe32a24a..e187750dd319 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -449,14 +449,15 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif + paging_init(); + pmb_init(); + + ioremap_fixed_init(); /* Perform the machine specific initialisation */ if (likely(sh_mv.mv_setup)) sh_mv.mv_setup(cmdline_p); - paging_init(); - pmb_init(); - #ifdef CONFIG_SMP plat_smp_setup(); #endif diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 7a4ebc8cbadd..b89075256b70 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -169,6 +169,10 @@ config ARCH_MEMORY_PROBE def_bool y depends on MEMORY_HOTPLUG +config IOREMAP_FIXED + def_bool y + depends on X2TLB || SUPERH64 + choice prompt "Kernel page size" default PAGE_SIZE_4KB diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index edde8bdd681d..89ba56c20ade 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -35,6 +35,7 @@ endif obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_PMB) += pmb.o obj-$(CONFIG_NUMA) += numa.o +obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o # Special flags for fault_64.o. This puts restrictions on the number of # caller-save registers that the compiler can target when building this file. diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c new file mode 100644 index 000000000000..3a9d3d88fe8d --- /dev/null +++ b/arch/sh/mm/ioremap_fixed.c @@ -0,0 +1,144 @@ +/* + * Re-map IO memory to kernel address space so that we can access it. + * + * These functions should only be used when it is necessary to map a + * physical address space into the kernel address space before ioremap() + * can be used, e.g. early in boot before paging_init(). + * + * Copyright (C) 2009 Matt Fleming + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct ioremap_map { + void __iomem *addr; + unsigned long size; + unsigned long fixmap_addr; +}; + +static struct ioremap_map ioremap_maps[FIX_N_IOREMAPS]; + +void __init ioremap_fixed_init(void) +{ + struct ioremap_map *map; + int i; + + for (i = 0; i < FIX_N_IOREMAPS; i++) { + map = &ioremap_maps[i]; + map->fixmap_addr = __fix_to_virt(FIX_IOREMAP_BEGIN + i); + } +} + +void __init __iomem * +ioremap_fixed(resource_size_t phys_addr, unsigned long size, pgprot_t prot) +{ + enum fixed_addresses idx0, idx; + resource_size_t last_addr; + struct ioremap_map *map; + unsigned long offset; + unsigned int nrpages; + int i, slot; + + slot = -1; + for (i = 0; i < FIX_N_IOREMAPS; i++) { + map = &ioremap_maps[i]; + if (!map->addr) { + map->size = size; + slot = i; + break; + } + } + + if (slot < 0) + return NULL; + + /* Don't allow wraparound or zero size */ + last_addr = phys_addr + size - 1; + if (!size || last_addr < phys_addr) + return NULL; + + /* + * Fixmap mappings have to be page-aligned + */ + offset = phys_addr & ~PAGE_MASK; + phys_addr &= PAGE_MASK; + size = PAGE_ALIGN(last_addr + 1) - phys_addr; + + /* + * Mappings have to fit in the FIX_IOREMAP area. + */ + nrpages = size >> PAGE_SHIFT; + if (nrpages > FIX_N_IOREMAPS) + return NULL; + + /* + * Ok, go for it.. + */ + idx0 = FIX_IOREMAP_BEGIN + slot; + idx = idx0; + while (nrpages > 0) { + pgprot_val(prot) |= _PAGE_WIRED; + __set_fixmap(idx, phys_addr, prot); + phys_addr += PAGE_SIZE; + idx++; + --nrpages; + } + + map->addr = (void __iomem *)(offset + map->fixmap_addr); + return map->addr; +} + +void __init iounmap_fixed(void __iomem *addr) +{ + enum fixed_addresses idx; + unsigned long virt_addr; + struct ioremap_map *map; + unsigned long offset; + unsigned int nrpages; + int i, slot; + pgprot_t prot; + + slot = -1; + for (i = 0; i < FIX_N_IOREMAPS; i++) { + map = &ioremap_maps[i]; + if (map->addr == addr) { + slot = i; + break; + } + } + + if (slot < 0) + return; + + virt_addr = (unsigned long)addr; + + offset = virt_addr & ~PAGE_MASK; + nrpages = PAGE_ALIGN(offset + map->size - 1) >> PAGE_SHIFT; + + pgprot_val(prot) = _PAGE_WIRED; + + idx = FIX_IOREMAP_BEGIN + slot + nrpages; + while (nrpages > 0) { + __clear_fixmap(idx, prot); + --idx; + --nrpages; + } + + map->size = 0; + map->addr = NULL; +} -- cgit v1.2.3 From b0f3ae03aca0f331b851ae94bc066124e7f104df Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 12 Feb 2010 15:40:00 +0900 Subject: sh: Isolate uncached mapping support. This splits out the uncached mapping support under its own config option, presently only used by 29-bit mode and 32-bit + PMB. This will make it possible to optionally add an uncached mapping on sh64 as well as booting without an uncached mapping for 32-bit. Signed-off-by: Paul Mundt --- arch/sh/include/asm/ptrace.h | 19 +++++++++++++------ arch/sh/include/asm/system.h | 3 ++- arch/sh/include/asm/system_64.h | 1 + arch/sh/mm/Kconfig | 5 +++++ arch/sh/mm/init.c | 11 ++++++++--- 5 files changed, 29 insertions(+), 10 deletions(-) (limited to 'arch/sh/mm/Kconfig') diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h index 201d11ef211f..e879dffa324b 100644 --- a/arch/sh/include/asm/ptrace.h +++ b/arch/sh/include/asm/ptrace.h @@ -102,13 +102,15 @@ struct pt_dspregs { #define PTRACE_GETDSPREGS 55 /* DSP registers */ #define PTRACE_SETDSPREGS 56 -#define PT_TEXT_END_ADDR 240 -#define PT_TEXT_ADDR 244 /* &(struct user)->start_code */ -#define PT_DATA_ADDR 248 /* &(struct user)->start_data */ +#define PT_TEXT_END_ADDR 240 +#define PT_TEXT_ADDR 244 /* &(struct user)->start_code */ +#define PT_DATA_ADDR 248 /* &(struct user)->start_data */ #define PT_TEXT_LEN 252 #ifdef __KERNEL__ #include +#include +#include #define user_mode(regs) (((regs)->sr & 0x40000000)==0) #define instruction_pointer(regs) ((unsigned long)(regs)->pc) @@ -137,9 +139,14 @@ static inline unsigned long profile_pc(struct pt_regs *regs) { unsigned long pc = instruction_pointer(regs); -#ifdef P2SEG - if (pc >= P2SEG && pc < P3SEG) - pc -= 0x20000000; +#ifdef CONFIG_UNCACHED_MAPPING + /* + * If PC points in to the uncached mapping, fix it up and hand + * back the cached equivalent. + */ + if ((pc >= (memory_start + cached_to_uncached)) && + (pc < (memory_start + cached_to_uncached + uncached_size))) + pc -= cached_to_uncached; #endif return pc; diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index 6442f1783fe0..0bd7a17d5e1a 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h @@ -10,7 +10,6 @@ #include #include #include -#include #define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ @@ -114,6 +113,8 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, (unsigned long)_n_, sizeof(*(ptr))); \ }) +struct pt_regs; + extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn)); void free_initmem(void); void free_initrd_mem(unsigned long start, unsigned long end); diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h index 3391bb6b21d8..36338646dfc8 100644 --- a/arch/sh/include/asm/system_64.h +++ b/arch/sh/include/asm/system_64.h @@ -18,6 +18,7 @@ /* * switch_to() should switch tasks to task nr n, first */ +struct thread_struct; struct task_struct *sh64_switch_to(struct task_struct *prev, struct thread_struct *prev_thread, struct task_struct *next, diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index b89075256b70..65cb5b83e072 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -75,6 +75,7 @@ config MEMORY_SIZE config 29BIT def_bool !32BIT depends on SUPERH32 + select UNCACHED_MAPPING config 32BIT bool @@ -84,6 +85,7 @@ config PMB bool "Support 32-bit physical addressing through PMB" depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP select 32BIT + select UNCACHED_MAPPING help If you say Y here, physical addressing will be extended to 32-bits through the SH-4A PMB. If this is not set, legacy @@ -173,6 +175,9 @@ config IOREMAP_FIXED def_bool y depends on X2TLB || SUPERH64 +config UNCACHED_MAPPING + bool + choice prompt "Kernel page size" default PAGE_SIZE_4KB diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index dffa6c749489..58012b6bbe76 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -21,11 +21,12 @@ #include #include #include +#include DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); pgd_t swapper_pg_dir[PTRS_PER_PGD]; -#ifdef CONFIG_SUPERH32 +#ifdef CONFIG_UNCACHED_MAPPING /* * This is the offset of the uncached section from its cached alias. * @@ -36,8 +37,8 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD]; * Default value only valid in 29 bit mode, in 32bit mode this will be * updated by the early PMB initialization code. */ -unsigned long cached_to_uncached = P2SEG - P1SEG; -unsigned long uncached_size = 0x20000000; +unsigned long cached_to_uncached = 0x20000000; +unsigned long uncached_size = SZ_512M; #endif #ifdef CONFIG_MMU @@ -281,7 +282,9 @@ void __init mem_init(void) #endif " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n" +#ifdef CONFIG_UNCACHED_MAPPING " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n" +#endif " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", @@ -299,9 +302,11 @@ void __init mem_init(void) (unsigned long)memory_start, (unsigned long)high_memory, ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, +#ifdef CONFIG_UNCACHED_MAPPING (unsigned long)memory_start + cached_to_uncached, (unsigned long)memory_start + cached_to_uncached + uncached_size, uncached_size >> 20, +#endif (unsigned long)&__init_begin, (unsigned long)&__init_end, ((unsigned long)&__init_end - -- cgit v1.2.3 From d01447b3197c2c470a14666be2c640407bbbfec7 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 18 Feb 2010 18:13:51 +0900 Subject: sh: Merge legacy and dynamic PMB modes. This implements a bit of rework for the PMB code, which permits us to kill off the legacy PMB mode completely. Rather than trusting the boot loader to do the right thing, we do a quick verification of the PMB contents to determine whether to have the kernel setup the initial mappings or whether it needs to mangle them later on instead. If we're booting from legacy mappings, the kernel will now take control of them and make them match the kernel's initial mapping configuration. This is accomplished by breaking the initialization phase out in to multiple steps: synchronization, merging, and resizing. With the recent rework, the synchronization code establishes page links for compound mappings already, so we build on top of this for promoting mappings and reclaiming unused slots. At the same time, the changes introduced for the uncached helpers also permit us to dynamically resize the uncached mapping without any particular headaches. The smallest page size is more than sufficient for mapping all of kernel text, and as we're careful not to jump to any far off locations in the setup code the mapping can safely be resized regardless of whether we are executing from it or not. Signed-off-by: Paul Mundt --- arch/sh/boot/compressed/misc.c | 2 +- arch/sh/include/asm/mmu.h | 12 +- arch/sh/include/asm/page.h | 11 +- arch/sh/include/asm/uncached.h | 18 +++ arch/sh/kernel/head_32.S | 42 ++++++- arch/sh/kernel/setup.c | 2 + arch/sh/mm/Kconfig | 10 -- arch/sh/mm/init.c | 1 - arch/sh/mm/pmb.c | 243 +++++++++++++++++++++++++++++++++++------ arch/sh/mm/uncached.c | 6 + 10 files changed, 276 insertions(+), 71 deletions(-) create mode 100644 arch/sh/include/asm/uncached.h (limited to 'arch/sh/mm/Kconfig') diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c index 9ba07927d16a..27140a6b365d 100644 --- a/arch/sh/boot/compressed/misc.c +++ b/arch/sh/boot/compressed/misc.c @@ -117,7 +117,7 @@ void decompress_kernel(void) output_addr = (CONFIG_MEMORY_START + 0x2000); #else output_addr = __pa((unsigned long)&_text+PAGE_SIZE); -#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_LEGACY) +#if defined(CONFIG_29BIT) output_addr |= P2SEG; #endif #endif diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index e42c4e2a41df..15a05b615ba7 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -58,7 +58,7 @@ typedef struct { long pmb_remap(unsigned long virt, unsigned long phys, unsigned long size, pgprot_t prot); void pmb_unmap(unsigned long addr); -int pmb_init(void); +void pmb_init(void); bool __in_29bit_mode(void); #else static inline long pmb_remap(unsigned long virt, unsigned long phys, @@ -67,14 +67,8 @@ static inline long pmb_remap(unsigned long virt, unsigned long phys, return -EINVAL; } -static inline void pmb_unmap(unsigned long addr) -{ -} - -static inline int pmb_init(void) -{ - return -ENODEV; -} +#define pmb_unmap(addr) do { } while (0) +#define pmb_init(addr) do { } while (0) #ifdef CONFIG_29BIT #define __in_29bit_mode() (1) diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 8237d9f53e56..d71feb359304 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -45,21 +45,12 @@ #endif #ifndef __ASSEMBLY__ +#include extern unsigned long shm_align_mask; extern unsigned long max_low_pfn, min_low_pfn; extern unsigned long memory_start, memory_end; -#ifdef CONFIG_UNCACHED_MAPPING -extern unsigned long uncached_start, uncached_end; - -extern int virt_addr_uncached(unsigned long kaddr); -extern void uncached_init(void); -#else -#define virt_addr_uncached(kaddr) (0) -#define uncached_init() do { } while (0) -#endif - static inline unsigned long pages_do_alias(unsigned long addr1, unsigned long addr2) { diff --git a/arch/sh/include/asm/uncached.h b/arch/sh/include/asm/uncached.h new file mode 100644 index 000000000000..e3419f96626a --- /dev/null +++ b/arch/sh/include/asm/uncached.h @@ -0,0 +1,18 @@ +#ifndef __ASM_SH_UNCACHED_H +#define __ASM_SH_UNCACHED_H + +#include + +#ifdef CONFIG_UNCACHED_MAPPING +extern unsigned long uncached_start, uncached_end; + +extern int virt_addr_uncached(unsigned long kaddr); +extern void uncached_init(void); +extern void uncached_resize(unsigned long size); +#else +#define virt_addr_uncached(kaddr) (0) +#define uncached_init() do { } while (0) +#define uncached_resize(size) BUG() +#endif + +#endif /* __ASM_SH_UNCACHED_H */ diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S index 79ff39517f8e..fe0b743881b0 100644 --- a/arch/sh/kernel/head_32.S +++ b/arch/sh/kernel/head_32.S @@ -85,7 +85,7 @@ ENTRY(_stext) ldc r0, r7_bank ! ... and initial thread_info #endif -#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY) +#ifdef CONFIG_PMB /* * Reconfigure the initial PMB mappings setup by the hardware. * @@ -139,7 +139,6 @@ ENTRY(_stext) mov.l r0, @r1 mov.l .LMEMORY_SIZE, r5 - mov r5, r7 mov #PMB_E_SHIFT, r0 mov #0x1, r4 @@ -150,6 +149,40 @@ ENTRY(_stext) mov.l .LFIRST_ADDR_ENTRY, r2 mov.l .LPMB_ADDR, r3 + /* + * First we need to walk the PMB and figure out if there are any + * existing mappings that match the initial mappings VPN/PPN. + * If these have already been established by the bootloader, we + * don't bother setting up new entries here, and let the late PMB + * initialization take care of things instead. + * + * Note that we may need to coalesce and merge entries in order + * to reclaim more available PMB slots, which is much more than + * we want to do at this early stage. + */ + mov #0, r10 + mov #NR_PMB_ENTRIES, r9 + + mov r1, r7 /* temporary PMB_DATA iter */ + +.Lvalidate_existing_mappings: + + mov.l @r7, r8 + and r0, r8 + cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */ + bt .Lpmb_done + + add #1, r10 /* Increment the loop counter */ + cmp/eq r9, r10 + bf/s .Lvalidate_existing_mappings + add r4, r7 /* Increment to the next PMB_DATA entry */ + + /* + * If we've fallen through, continue with setting up the initial + * mappings. + */ + + mov r5, r7 /* cached_to_uncached */ mov #0, r10 #ifdef CONFIG_UNCACHED_MAPPING @@ -252,7 +285,8 @@ ENTRY(_stext) mov.l 6f, r0 icbi @r0 -#endif /* !CONFIG_PMB_LEGACY */ +.Lpmb_done: +#endif /* CONFIG_PMB */ #ifndef CONFIG_SH_NO_BSS_INIT /* @@ -304,7 +338,7 @@ ENTRY(stack_start) 6: .long sh_cpu_init 7: .long init_thread_union -#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY) +#ifdef CONFIG_PMB .LPMB_ADDR: .long PMB_ADDR .LPMB_DATA: .long PMB_DATA .LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index e187750dd319..3459e70eed72 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -421,6 +421,8 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); + uncached_init(); + plat_early_device_setup(); /* Let earlyprintk output early console messages */ diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 65cb5b83e072..1445ca6257df 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -91,16 +91,6 @@ config PMB 32-bits through the SH-4A PMB. If this is not set, legacy 29-bit physical addressing will be used. -config PMB_LEGACY - bool "Support legacy boot mappings for PMB" - depends on PMB - select 32BIT - help - If this option is enabled, fixed PMB mappings are inherited - from the boot loader, and the kernel does not attempt dynamic - management. This is the closest to legacy 29-bit physical mode, - and allows systems to support up to 512MiB of system memory. - config X2TLB def_bool y depends on (CPU_SHX2 || CPU_SHX3) && MMU diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 08e280d7cc7e..68028e8f26ce 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -245,7 +245,6 @@ void __init mem_init(void) memset(empty_zero_page, 0, PAGE_SIZE); __flush_wback_region(empty_zero_page, PAGE_SIZE); - uncached_init(); vsyscall_init(); codesize = (unsigned long) &_etext - (unsigned long) &_text; diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index b9d5476e1284..198bcff5e96f 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -52,7 +52,7 @@ struct pmb_entry { struct pmb_entry *link; }; -static void pmb_unmap_entry(struct pmb_entry *); +static void pmb_unmap_entry(struct pmb_entry *, int depth); static DEFINE_RWLOCK(pmb_rwlock); static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; @@ -115,13 +115,14 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, pmbe = &pmb_entry_list[pos]; + memset(pmbe, 0, sizeof(struct pmb_entry)); + spin_lock_init(&pmbe->lock); pmbe->vpn = vpn; pmbe->ppn = ppn; pmbe->flags = flags; pmbe->entry = pos; - pmbe->size = 0; return pmbe; @@ -133,7 +134,9 @@ out: static void pmb_free(struct pmb_entry *pmbe) { __clear_bit(pmbe->entry, pmb_map); - pmbe->entry = PMB_NO_ENTRY; + + pmbe->entry = PMB_NO_ENTRY; + pmbe->link = NULL; } /* @@ -161,9 +164,6 @@ static __always_inline unsigned long pmb_cache_flags(void) */ static void __set_pmb_entry(struct pmb_entry *pmbe) { - pmbe->flags &= ~PMB_CACHE_MASK; - pmbe->flags |= pmb_cache_flags(); - writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry)); @@ -280,7 +280,7 @@ again: return wanted - size; out: - pmb_unmap_entry(pmbp); + pmb_unmap_entry(pmbp, NR_PMB_ENTRIES); return err; } @@ -302,18 +302,40 @@ void pmb_unmap(unsigned long addr) read_unlock(&pmb_rwlock); - pmb_unmap_entry(pmbe); + pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); } -static void pmb_unmap_entry(struct pmb_entry *pmbe) +static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) { - unsigned long flags; + return (b->vpn == (a->vpn + a->size)) && + (b->ppn == (a->ppn + a->size)) && + (b->flags == a->flags); +} - if (unlikely(!pmbe)) - return; +static bool pmb_size_valid(unsigned long size) +{ + int i; - write_lock_irqsave(&pmb_rwlock, flags); + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) + if (pmb_sizes[i].size == size) + return true; + + return false; +} + +static int pmb_size_to_flags(unsigned long size) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) + if (pmb_sizes[i].size == size) + return pmb_sizes[i].flag; + return 0; +} + +static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) +{ do { struct pmb_entry *pmblink = pmbe; @@ -332,8 +354,18 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe) pmbe = pmblink->link; pmb_free(pmblink); - } while (pmbe); + } while (pmbe && --depth); +} + +static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth) +{ + unsigned long flags; + if (unlikely(!pmbe)) + return; + + write_lock_irqsave(&pmb_rwlock, flags); + __pmb_unmap_entry(pmbe, depth); write_unlock_irqrestore(&pmb_rwlock, flags); } @@ -342,14 +374,40 @@ static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) return ppn >= __pa(memory_start) && ppn < __pa(memory_end); } -static int pmb_synchronize_mappings(void) +static void __init pmb_notify(void) { - unsigned int applied = 0; - struct pmb_entry *pmbp = NULL; - int i, j; + int i; pr_info("PMB: boot mappings:\n"); + read_lock(&pmb_rwlock); + + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { + struct pmb_entry *pmbe; + + if (!test_bit(i, pmb_map)) + continue; + + pmbe = &pmb_entry_list[i]; + + pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n", + pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT, + pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un"); + } + + read_unlock(&pmb_rwlock); +} + +/* + * Sync our software copy of the PMB mappings with those in hardware. The + * mappings in the hardware PMB were either set up by the bootloader or + * very early on by the kernel. + */ +static void __init pmb_synchronize(void) +{ + struct pmb_entry *pmbp = NULL; + int i, j; + /* * Run through the initial boot mappings, log the established * ones, and blow away anything that falls outside of the valid @@ -432,10 +490,10 @@ static int pmb_synchronize_mappings(void) /* * Compare the previous entry against the current one to * see if the entries span a contiguous mapping. If so, - * setup the entry links accordingly. + * setup the entry links accordingly. Compound mappings + * are later coalesced. */ - if ((pmbe->vpn == (pmbp->vpn + pmbp->size)) && - (pmbe->ppn == (pmbp->ppn + pmbp->size))) + if (pmb_can_merge(pmbp, pmbe)) pmbp->link = pmbe; spin_unlock(&pmbp->lock); @@ -444,37 +502,150 @@ static int pmb_synchronize_mappings(void) pmbp = pmbe; spin_unlock_irqrestore(&pmbe->lock, irqflags); + } +} - pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n", - vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20, - (data_val & PMB_C) ? "" : "un"); +static void __init pmb_merge(struct pmb_entry *head) +{ + unsigned long span, newsize; + struct pmb_entry *tail; + int i = 1, depth = 0; + + span = newsize = head->size; - applied++; + tail = head->link; + while (tail) { + span += tail->size; + + if (pmb_size_valid(span)) { + newsize = span; + depth = i; + } + + /* This is the end of the line.. */ + if (!tail->link) + break; + + tail = tail->link; + i++; } - return (applied == 0); + /* + * The merged page size must be valid. + */ + if (!pmb_size_valid(newsize)) + return; + + head->flags &= ~PMB_SZ_MASK; + head->flags |= pmb_size_to_flags(newsize); + + head->size = newsize; + + __pmb_unmap_entry(head->link, depth); + __set_pmb_entry(head); } -int pmb_init(void) +static void __init pmb_coalesce(void) { - int ret; + unsigned long flags; + int i; + + write_lock_irqsave(&pmb_rwlock, flags); + + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { + struct pmb_entry *pmbe; + + if (!test_bit(i, pmb_map)) + continue; + + pmbe = &pmb_entry_list[i]; + + /* + * We're only interested in compound mappings + */ + if (!pmbe->link) + continue; + + /* + * Nothing to do if it already uses the largest possible + * page size. + */ + if (pmbe->size == SZ_512M) + continue; + + pmb_merge(pmbe); + } + + write_unlock_irqrestore(&pmb_rwlock, flags); +} + +#ifdef CONFIG_UNCACHED_MAPPING +static void __init pmb_resize(void) +{ + int i; /* - * Sync our software copy of the PMB mappings with those in - * hardware. The mappings in the hardware PMB were either set up - * by the bootloader or very early on by the kernel. + * If the uncached mapping was constructed by the kernel, it will + * already be a reasonable size. */ - ret = pmb_synchronize_mappings(); - if (unlikely(ret == 0)) - return 0; + if (uncached_size == SZ_16M) + return; + + read_lock(&pmb_rwlock); + + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { + struct pmb_entry *pmbe; + unsigned long flags; + + if (!test_bit(i, pmb_map)) + continue; + + pmbe = &pmb_entry_list[i]; + + if (pmbe->vpn != uncached_start) + continue; + + /* + * Found it, now resize it. + */ + spin_lock_irqsave(&pmbe->lock, flags); + + pmbe->size = SZ_16M; + pmbe->flags &= ~PMB_SZ_MASK; + pmbe->flags |= pmb_size_to_flags(pmbe->size); + + uncached_resize(pmbe->size); + + __set_pmb_entry(pmbe); + + spin_unlock_irqrestore(&pmbe->lock, flags); + } + + read_lock(&pmb_rwlock); +} +#endif + +void __init pmb_init(void) +{ + /* Synchronize software state */ + pmb_synchronize(); + + /* Attempt to combine compound mappings */ + pmb_coalesce(); + +#ifdef CONFIG_UNCACHED_MAPPING + /* Resize initial mappings, if necessary */ + pmb_resize(); +#endif + + /* Log them */ + pmb_notify(); writel_uncached(0, PMB_IRMCR); /* Flush out the TLB */ __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); ctrl_barrier(); - - return 0; } bool __in_29bit_mode(void) diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c index 807906981d9d..cf20a5c5136a 100644 --- a/arch/sh/mm/uncached.c +++ b/arch/sh/mm/uncached.c @@ -26,3 +26,9 @@ void __init uncached_init(void) uncached_start = memory_end; uncached_end = uncached_start + uncached_size; } + +void __init uncached_resize(unsigned long size) +{ + uncached_size = size; + uncached_end = uncached_start + uncached_size; +} -- cgit v1.2.3