From a0ab36689a36e583b6e736f1c99ac8c9aebdad59 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 13 Jan 2010 18:31:48 +0900 Subject: sh: fixed PMB mode refactoring. This introduces some much overdue chainsawing of the fixed PMB support. fixed PMB was introduced initially to work around the fact that dynamic PMB mode was relatively broken, though they were never intended to converge. The main areas where there are differences are whether the system is booted in 29-bit mode or 32-bit mode, and whether legacy mappings are to be preserved. Any system booting in true 32-bit mode will not care about legacy mappings, so these are roughly decoupled. Regardless of the entry point, PMB and 32BIT are directly related as far as the kernel is concerned, so we also switch back to having one select the other. With legacy mappings iterated through and applied in the initialization path it's now possible to finally merge the two implementations and permit dynamic remapping overtop of remaining entries regardless of whether boot mappings are crafted by hand or inherited from the boot loader. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index c7426ad9926e..4b0882bf5183 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -65,11 +65,29 @@ struct pmb_entry { struct pmb_entry *link; }; +#ifdef CONFIG_PMB /* arch/sh/mm/pmb.c */ long pmb_remap(unsigned long virt, unsigned long phys, unsigned long size, unsigned long flags); void pmb_unmap(unsigned long addr); int pmb_init(void); +#else +static inline long pmb_remap(unsigned long virt, unsigned long phys, + unsigned long size, unsigned long flags) +{ + return -EINVAL +} + +static inline void pmb_unmap(unsigned long addr) +{ +} + +static inline int pmb_init(void) +{ + return -ENODEV; +} +#endif /* CONFIG_PMB */ + #endif /* __ASSEMBLY__ */ #endif /* __MMU_H */ -- cgit v1.2.3 From 46c4e5daea3d5df06e27bf5a49a0c42274db6725 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 15 Jan 2010 08:00:45 +0900 Subject: sh: Fix CONFIG_PMB=n build. The last commit introduced the following breakage arch/sh/include/asm/mmu.h: In function 'pmb_remap': arch/sh/include/asm/mmu.h:79: error: expected ';' before '}' token and... arch/sh/include/asm/mmu.h:78: error: 'EINVAL' undeclared (first use in this function) arch/sh/include/asm/mmu.h:78: error: (Each undeclared identifier is reported only once arch/sh/include/asm/mmu.h:78: error: for each function it appears in.) arch/sh/include/asm/mmu.h: In function 'pmb_init': arch/sh/include/asm/mmu.h:87: error: 'ENODEV' undeclared (first use in this function) Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 4b0882bf5183..e5e8f48830ef 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -30,6 +30,7 @@ #define PMB_NO_ENTRY (-1) #ifndef __ASSEMBLY__ +#include /* Default "unsigned long" context */ typedef unsigned long mm_context_id_t[NR_CPUS]; @@ -75,7 +76,7 @@ int pmb_init(void); static inline long pmb_remap(unsigned long virt, unsigned long phys, unsigned long size, unsigned long flags) { - return -EINVAL + return -EINVAL; } static inline void pmb_unmap(unsigned long addr) -- cgit v1.2.3 From 2efa53b269ec1e9289a108e1506f53f6f1de440b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 20 Jan 2010 16:40:48 +0900 Subject: sh: Make 29/32-bit mode check helper generally available. Presently __in_29bit_mode() is only defined for the PMB case, but it's also easily derived from the CONFIG_29BIT and CONFIG_32BIT && CONFIG_PMB=n cases. Signed-off-by: Paul Mundt --- arch/sh/include/asm/addrspace.h | 6 ------ arch/sh/include/asm/mmu.h | 10 +++++++++- arch/sh/mm/init.c | 7 ------- arch/sh/mm/pmb.c | 5 +++++ 4 files changed, 14 insertions(+), 14 deletions(-) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h index 268efd62ed21..446b3831c214 100644 --- a/arch/sh/include/asm/addrspace.h +++ b/arch/sh/include/asm/addrspace.h @@ -65,11 +65,5 @@ #define P3_ADDR_MAX P4SEG #endif -#ifndef __ASSEMBLY__ -#ifdef CONFIG_PMB -extern int __in_29bit_mode(void); -#endif /* CONFIG_PMB */ -#endif /* __ASSEMBLY__ */ - #endif /* __KERNEL__ */ #endif /* __ASM_SH_ADDRSPACE_H */ diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index e5e8f48830ef..ca7d91e8aa72 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -31,6 +31,7 @@ #ifndef __ASSEMBLY__ #include +#include /* Default "unsigned long" context */ typedef unsigned long mm_context_id_t[NR_CPUS]; @@ -72,6 +73,7 @@ long pmb_remap(unsigned long virt, unsigned long phys, unsigned long size, unsigned long flags); void pmb_unmap(unsigned long addr); int pmb_init(void); +bool __in_29bit_mode(void); #else static inline long pmb_remap(unsigned long virt, unsigned long phys, unsigned long size, unsigned long flags) @@ -87,8 +89,14 @@ static inline int pmb_init(void) { return -ENODEV; } -#endif /* CONFIG_PMB */ +#ifdef CONFIG_29BIT +#define __in_29bit_mode() (1) +#else +#define __in_29bit_mode() (0) +#endif + +#endif /* CONFIG_PMB */ #endif /* __ASSEMBLY__ */ #endif /* __MMU_H */ diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 32ebd1592e63..491d9d5c8e0d 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -338,10 +338,3 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); #endif #endif /* CONFIG_MEMORY_HOTPLUG */ - -#ifdef CONFIG_PMB -int __in_29bit_mode(void) -{ - return !(ctrl_inl(PMB_PASCR) & PASCR_SE); -} -#endif /* CONFIG_PMB */ diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index b796b6c021b4..d318fa6caffe 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -436,6 +436,11 @@ int __uses_jump_to_uncached pmb_init(void) return 0; } +bool __in_29bit_mode(void) +{ + return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0; +} + static int pmb_seq_show(struct seq_file *file, void *iter) { int i; -- cgit v1.2.3 From efd54ea315f645ef318708aab5714a5f1f432d03 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 16 Feb 2010 18:39:30 +0900 Subject: sh: Merge the legacy PMB mapping and entry synchronization code. This merges the code for iterating over the legacy PMB mappings and the code for synchronizing software state with the hardware mappings. There's really no reason to do the same iteration twice, and this also buys us the legacy entry logging facility for the dynamic PMB case. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 1 + arch/sh/mm/pmb.c | 162 ++++++++++++++++++++-------------------------- 2 files changed, 70 insertions(+), 93 deletions(-) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index ca7d91e8aa72..2fcbedb55002 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -25,6 +25,7 @@ #define PMB_C 0x00000008 #define PMB_WT 0x00000001 #define PMB_UB 0x00000200 +#define PMB_CACHE_MASK (PMB_C | PMB_WT | PMB_UB) #define PMB_V 0x00000100 #define PMB_NO_ENTRY (-1) diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index a06483076a41..f822f83418e4 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -276,41 +276,57 @@ static void __pmb_unmap(struct pmb_entry *pmbe) } while (pmbe); } -#ifdef CONFIG_PMB_LEGACY +static inline void +pmb_log_mapping(unsigned long data_val, unsigned long vpn, unsigned long ppn) +{ + unsigned int size; + const char *sz_str; + + size = data_val & PMB_SZ_MASK; + + sz_str = (size == PMB_SZ_16M) ? " 16MB": + (size == PMB_SZ_64M) ? " 64MB": + (size == PMB_SZ_128M) ? "128MB": + "512MB"; + + pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n", + vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str, + (data_val & PMB_C) ? "" : "un"); +} + static inline unsigned int pmb_ppn_in_range(unsigned long ppn) { - return ppn >= __MEMORY_START && ppn < __MEMORY_START + __MEMORY_SIZE; + return ppn >= __pa(memory_start) && ppn < __pa(memory_end); } -static int pmb_apply_legacy_mappings(void) +static int pmb_synchronize_mappings(void) { unsigned int applied = 0; int i; - pr_info("PMB: Preserving legacy mappings:\n"); + pr_info("PMB: boot mappings:\n"); /* - * The following entries are setup by the bootloader. + * Run through the initial boot mappings, log the established + * ones, and blow away anything that falls outside of the valid + * PPN range. Specifically, we only care about existing mappings + * that impact the cached/uncached sections. * - * Entry VPN PPN V SZ C UB - * -------------------------------------------------------- - * 0 0xA0000000 0x00000000 1 64MB 0 0 - * 1 0xA4000000 0x04000000 1 16MB 0 0 - * 2 0xA6000000 0x08000000 1 16MB 0 0 - * 9 0x88000000 0x48000000 1 128MB 1 1 - * 10 0x90000000 0x50000000 1 128MB 1 1 - * 11 0x98000000 0x58000000 1 128MB 1 1 - * 13 0xA8000000 0x48000000 1 128MB 0 0 - * 14 0xB0000000 0x50000000 1 128MB 0 0 - * 15 0xB8000000 0x58000000 1 128MB 0 0 + * Note that touching these can be a bit of a minefield; the boot + * loader can establish multi-page mappings with the same caching + * attributes, so we need to ensure that we aren't modifying a + * mapping that we're presently executing from, or may execute + * from in the case of straddling page boundaries. * - * The only entries the we need are the ones that map the kernel - * at the cached and uncached addresses. + * In the future we will have to tidy up after the boot loader by + * jumping between the cached and uncached mappings and tearing + * down alternating mappings while executing from the other. */ for (i = 0; i < PMB_ENTRY_MAX; i++) { unsigned long addr, data; unsigned long addr_val, data_val; - unsigned long ppn, vpn; + unsigned long ppn, vpn, flags; + struct pmb_entry *pmbe; addr = mk_pmb_addr(i); data = mk_pmb_data(i); @@ -330,106 +346,66 @@ static int pmb_apply_legacy_mappings(void) /* * Only preserve in-range mappings. */ - if (pmb_ppn_in_range(ppn)) { - unsigned int size; - char *sz_str = NULL; - - size = data_val & PMB_SZ_MASK; - - sz_str = (size == PMB_SZ_16M) ? " 16MB": - (size == PMB_SZ_64M) ? " 64MB": - (size == PMB_SZ_128M) ? "128MB": - "512MB"; - - pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n", - vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str, - (data_val & PMB_C) ? "" : "un"); - - applied++; - } else { + if (!pmb_ppn_in_range(ppn)) { /* * Invalidate anything out of bounds. */ __raw_writel(addr_val & ~PMB_V, addr); __raw_writel(data_val & ~PMB_V, data); + continue; } + + /* + * Update the caching attributes if necessary + */ + if (data_val & PMB_C) { +#if defined(CONFIG_CACHE_WRITETHROUGH) + data_val |= PMB_WT; +#elif defined(CONFIG_CACHE_WRITEBACK) + data_val &= ~PMB_WT; +#else + data_val &= ~(PMB_C | PMB_WT); +#endif + __raw_writel(data_val, data); + } + + flags = data_val & (PMB_SZ_MASK | PMB_CACHE_MASK); + + pmbe = pmb_alloc(vpn, ppn, flags, i); + if (IS_ERR(pmbe)) { + WARN_ON_ONCE(1); + continue; + } + + pmb_log_mapping(data_val, vpn, ppn); + + applied++; } return (applied == 0); } -#else -static inline int pmb_apply_legacy_mappings(void) -{ - return 1; -} -#endif int pmb_init(void) { - int i; - unsigned long addr, data; - unsigned long ret; + int ret; jump_to_uncached(); - /* - * Attempt to apply the legacy boot mappings if configured. If - * this is successful then we simply carry on with those and - * don't bother establishing additional memory mappings. Dynamic - * device mappings through pmb_remap() can still be bolted on - * after this. - */ - ret = pmb_apply_legacy_mappings(); - if (ret == 0) { - back_to_cached(); - return 0; - } - /* * Sync our software copy of the PMB mappings with those in * hardware. The mappings in the hardware PMB were either set up * by the bootloader or very early on by the kernel. */ - for (i = 0; i < PMB_ENTRY_MAX; i++) { - struct pmb_entry *pmbe; - unsigned long vpn, ppn, flags; - - addr = PMB_DATA + (i << PMB_E_SHIFT); - data = __raw_readl(addr); - if (!(data & PMB_V)) - continue; - - if (data & PMB_C) { -#if defined(CONFIG_CACHE_WRITETHROUGH) - data |= PMB_WT; -#elif defined(CONFIG_CACHE_WRITEBACK) - data &= ~PMB_WT; -#else - data &= ~(PMB_C | PMB_WT); -#endif - } - __raw_writel(data, addr); - - ppn = data & PMB_PFN_MASK; - - flags = data & (PMB_C | PMB_WT | PMB_UB); - flags |= data & PMB_SZ_MASK; - - addr = PMB_ADDR + (i << PMB_E_SHIFT); - data = __raw_readl(addr); - - vpn = data & PMB_PFN_MASK; - - pmbe = pmb_alloc(vpn, ppn, flags, i); - WARN_ON(IS_ERR(pmbe)); + ret = pmb_synchronize_mappings(); + if (unlikely(ret == 0)) { + back_to_cached(); + return 0; } __raw_writel(0, PMB_IRMCR); /* Flush out the TLB */ - i = __raw_readl(MMUCR); - i |= MMUCR_TI; - __raw_writel(i, MMUCR); + __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); back_to_cached(); -- cgit v1.2.3 From 7bdda6209f224aa784a036df54b22cb338d2e859 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Feb 2010 13:23:00 +0900 Subject: sh: Fix up more 64-bit pgprot truncation on SH-X2 TLB. Both the store queue API and the PMB remapping take unsigned long for their pgprot flags, which cuts off the extended protection bits. In the case of the PMB this isn't really a problem since the cache attribute bits that we care about are all in the lower 32-bits, but we do it just to be safe. The store queue remapping on the other hand depends on the extended prot bits for enabling userspace access to the mappings. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 5 +++-- arch/sh/include/cpu-sh4/cpu/sq.h | 3 ++- arch/sh/kernel/cpu/sh4/sq.c | 13 ++++++------- arch/sh/mm/ioremap.c | 2 +- arch/sh/mm/pmb.c | 6 +++++- drivers/video/pvr2fb.c | 2 +- 6 files changed, 18 insertions(+), 13 deletions(-) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 2fcbedb55002..151bc922701b 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -33,6 +33,7 @@ #ifndef __ASSEMBLY__ #include #include +#include /* Default "unsigned long" context */ typedef unsigned long mm_context_id_t[NR_CPUS]; @@ -71,13 +72,13 @@ struct pmb_entry { #ifdef CONFIG_PMB /* arch/sh/mm/pmb.c */ long pmb_remap(unsigned long virt, unsigned long phys, - unsigned long size, unsigned long flags); + unsigned long size, pgprot_t prot); void pmb_unmap(unsigned long addr); int pmb_init(void); bool __in_29bit_mode(void); #else static inline long pmb_remap(unsigned long virt, unsigned long phys, - unsigned long size, unsigned long flags) + unsigned long size, pgprot_t prot) { return -EINVAL; } diff --git a/arch/sh/include/cpu-sh4/cpu/sq.h b/arch/sh/include/cpu-sh4/cpu/sq.h index 586d6491816a..74716ba2dc3c 100644 --- a/arch/sh/include/cpu-sh4/cpu/sq.h +++ b/arch/sh/include/cpu-sh4/cpu/sq.h @@ -12,6 +12,7 @@ #define __ASM_CPU_SH4_SQ_H #include +#include /* * Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be @@ -28,7 +29,7 @@ /* arch/sh/kernel/cpu/sh4/sq.c */ unsigned long sq_remap(unsigned long phys, unsigned int size, - const char *name, unsigned long flags); + const char *name, pgprot_t prot); void sq_unmap(unsigned long vaddr); void sq_flush_range(unsigned long start, unsigned int len); diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c index 97aea9d69b00..fc065f9da6e5 100644 --- a/arch/sh/kernel/cpu/sh4/sq.c +++ b/arch/sh/kernel/cpu/sh4/sq.c @@ -100,7 +100,7 @@ static inline void sq_mapping_list_del(struct sq_mapping *map) spin_unlock_irq(&sq_mapping_lock); } -static int __sq_remap(struct sq_mapping *map, unsigned long flags) +static int __sq_remap(struct sq_mapping *map, pgprot_t prot) { #if defined(CONFIG_MMU) struct vm_struct *vma; @@ -113,7 +113,7 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags) if (ioremap_page_range((unsigned long)vma->addr, (unsigned long)vma->addr + map->size, - vma->phys_addr, __pgprot(flags))) { + vma->phys_addr, prot)) { vunmap(vma->addr); return -EAGAIN; } @@ -135,14 +135,14 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags) * @phys: Physical address of mapping. * @size: Length of mapping. * @name: User invoking mapping. - * @flags: Protection flags. + * @prot: Protection bits. * * Remaps the physical address @phys through the next available store queue * address of @size length. @name is logged at boot time as well as through * the sysfs interface. */ unsigned long sq_remap(unsigned long phys, unsigned int size, - const char *name, unsigned long flags) + const char *name, pgprot_t prot) { struct sq_mapping *map; unsigned long end; @@ -177,7 +177,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size, map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); - ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags); + ret = __sq_remap(map, prot); if (unlikely(ret != 0)) goto out; @@ -309,8 +309,7 @@ static ssize_t mapping_store(const char *buf, size_t count) return -EIO; if (likely(len)) { - int ret = sq_remap(base, len, "Userspace", - pgprot_val(PAGE_SHARED)); + int ret = sq_remap(base, len, "Userspace", PAGE_SHARED); if (ret < 0) return ret; } else diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 94583c5da855..c68d2d7d00a9 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -80,7 +80,7 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, if (unlikely(phys_addr >= P1SEG)) { unsigned long mapped; - mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot)); + mapped = pmb_remap(addr, phys_addr, size, pgprot); if (likely(mapped)) { addr += mapped; phys_addr += mapped; diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index f822f83418e4..509a444a30ab 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -166,12 +167,15 @@ static struct { }; long pmb_remap(unsigned long vaddr, unsigned long phys, - unsigned long size, unsigned long flags) + unsigned long size, pgprot_t prot) { struct pmb_entry *pmbp, *pmbe; unsigned long wanted; int pmb_flags, i; long err; + u64 flags; + + flags = pgprot_val(prot); /* Convert typical pgprot value to the PMB equivalent */ if (flags & _PAGE_CACHABLE) { diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c index 53f8f1100e81..f9975100d56d 100644 --- a/drivers/video/pvr2fb.c +++ b/drivers/video/pvr2fb.c @@ -831,7 +831,7 @@ static int __devinit pvr2fb_common_init(void) printk(KERN_NOTICE "fb%d: registering with SQ API\n", fb_info->node); pvr2fb_map = sq_remap(fb_info->fix.smem_start, fb_info->fix.smem_len, - fb_info->fix.id, pgprot_val(PAGE_SHARED)); + fb_info->fix.id, PAGE_SHARED); printk(KERN_NOTICE "fb%d: Mapped video memory to SQ addr 0x%lx\n", fb_info->node, pvr2fb_map); -- cgit v1.2.3 From 51becfd96287b3913b13075699433730984e2f4f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Feb 2010 15:33:30 +0900 Subject: sh: PMB tidying. Some overdue cleanup of the PMB code, killing off unused functionality and duplication sprinkled about the tree. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 4 ++- arch/sh/kernel/head_32.S | 2 +- arch/sh/mm/pmb.c | 83 ++++++++++++++++++++++------------------------- 3 files changed, 42 insertions(+), 47 deletions(-) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 151bc922701b..44c904341414 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -11,7 +11,9 @@ #define PMB_ADDR 0xf6100000 #define PMB_DATA 0xf7100000 -#define PMB_ENTRY_MAX 16 + +#define NR_PMB_ENTRIES 16 + #define PMB_E_MASK 0x0000000f #define PMB_E_SHIFT 8 diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S index 83f2b84b58da..91ae76277d8f 100644 --- a/arch/sh/kernel/head_32.S +++ b/arch/sh/kernel/head_32.S @@ -236,7 +236,7 @@ ENTRY(_stext) * r10 = number of entries we've setup so far */ mov #0, r1 - mov #PMB_ENTRY_MAX, r0 + mov #NR_PMB_ENTRIES, r0 .Lagain: mov.l r1, @r3 /* Clear PMB_ADDR entry */ diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 509a444a30ab..924f3e4b3a82 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -21,32 +21,31 @@ #include #include #include +#include +#include #include #include #include #include #include -#include #include -#define NR_PMB_ENTRIES 16 - -static void __pmb_unmap(struct pmb_entry *); +static void pmb_unmap_entry(struct pmb_entry *); static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; -static unsigned long pmb_map; +static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); -static inline unsigned long mk_pmb_entry(unsigned int entry) +static __always_inline unsigned long mk_pmb_entry(unsigned int entry) { return (entry & PMB_E_MASK) << PMB_E_SHIFT; } -static inline unsigned long mk_pmb_addr(unsigned int entry) +static __always_inline unsigned long mk_pmb_addr(unsigned int entry) { return mk_pmb_entry(entry) | PMB_ADDR; } -static inline unsigned long mk_pmb_data(unsigned int entry) +static __always_inline unsigned long mk_pmb_data(unsigned int entry) { return mk_pmb_entry(entry) | PMB_DATA; } @@ -56,12 +55,12 @@ static int pmb_alloc_entry(void) unsigned int pos; repeat: - pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); + pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES); if (unlikely(pos > NR_PMB_ENTRIES)) return -ENOSPC; - if (test_and_set_bit(pos, &pmb_map)) + if (test_and_set_bit(pos, pmb_map)) goto repeat; return pos; @@ -78,7 +77,7 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, if (pos < 0) return ERR_PTR(pos); } else { - if (test_and_set_bit(entry, &pmb_map)) + if (test_and_set_bit(entry, pmb_map)) return ERR_PTR(-ENOSPC); pos = entry; } @@ -104,16 +103,17 @@ static void pmb_free(struct pmb_entry *pmbe) pmbe->flags = 0; pmbe->entry = 0; - clear_bit(pos, &pmb_map); + clear_bit(pos, pmb_map); } /* - * Must be in P2 for __set_pmb_entry() + * Must be run uncached. */ -static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, - unsigned long flags, int pos) +static void set_pmb_entry(struct pmb_entry *pmbe) { - __raw_writel(vpn | PMB_V, mk_pmb_addr(pos)); + jump_to_uncached(); + + __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); #ifdef CONFIG_CACHE_WRITETHROUGH /* @@ -121,17 +121,12 @@ static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, * invalid, so care must be taken to manually adjust cacheable * translations. */ - if (likely(flags & PMB_C)) - flags |= PMB_WT; + if (likely(pmbe->flags & PMB_C)) + pmbe->flags |= PMB_WT; #endif - __raw_writel(ppn | flags | PMB_V, mk_pmb_data(pos)); -} + __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry)); -static void set_pmb_entry(struct pmb_entry *pmbe) -{ - jump_to_uncached(); - __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry); back_to_cached(); } @@ -140,9 +135,6 @@ static void clear_pmb_entry(struct pmb_entry *pmbe) unsigned int entry = pmbe->entry; unsigned long addr; - if (unlikely(entry >= NR_PMB_ENTRIES)) - return; - jump_to_uncached(); /* Clear V-bit */ @@ -155,15 +147,14 @@ static void clear_pmb_entry(struct pmb_entry *pmbe) back_to_cached(); } - static struct { unsigned long size; int flag; } pmb_sizes[] = { - { .size = 0x20000000, .flag = PMB_SZ_512M, }, - { .size = 0x08000000, .flag = PMB_SZ_128M, }, - { .size = 0x04000000, .flag = PMB_SZ_64M, }, - { .size = 0x01000000, .flag = PMB_SZ_16M, }, + { .size = SZ_512M, .flag = PMB_SZ_512M, }, + { .size = SZ_128M, .flag = PMB_SZ_128M, }, + { .size = SZ_64M, .flag = PMB_SZ_64M, }, + { .size = SZ_16M, .flag = PMB_SZ_16M, }, }; long pmb_remap(unsigned long vaddr, unsigned long phys, @@ -230,34 +221,36 @@ again: return wanted - size; out: - if (pmbp) - __pmb_unmap(pmbp); + pmb_unmap_entry(pmbp); return err; } void pmb_unmap(unsigned long addr) { - struct pmb_entry *pmbe = NULL; + struct pmb_entry *pmbe; int i; for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { - if (test_bit(i, &pmb_map)) { + if (test_bit(i, pmb_map)) { pmbe = &pmb_entry_list[i]; - if (pmbe->vpn == addr) + if (pmbe->vpn == addr) { + pmb_unmap_entry(pmbe); break; + } } } +} +static void pmb_unmap_entry(struct pmb_entry *pmbe) +{ if (unlikely(!pmbe)) return; - __pmb_unmap(pmbe); -} - -static void __pmb_unmap(struct pmb_entry *pmbe) -{ - BUG_ON(!test_bit(pmbe->entry, &pmb_map)); + if (!test_bit(pmbe->entry, pmb_map)) { + WARN_ON(1); + return; + } do { struct pmb_entry *pmblink = pmbe; @@ -326,7 +319,7 @@ static int pmb_synchronize_mappings(void) * jumping between the cached and uncached mappings and tearing * down alternating mappings while executing from the other. */ - for (i = 0; i < PMB_ENTRY_MAX; i++) { + for (i = 0; i < NR_PMB_ENTRIES; i++) { unsigned long addr, data; unsigned long addr_val, data_val; unsigned long ppn, vpn, flags; @@ -494,7 +487,7 @@ static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) prev_state.event == PM_EVENT_FREEZE) { struct pmb_entry *pmbe; for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { - if (test_bit(i, &pmb_map)) { + if (test_bit(i, pmb_map)) { pmbe = &pmb_entry_list[i]; set_pmb_entry(pmbe); } -- cgit v1.2.3 From d7813bc9e8e384f5a293b05c095c799d41af3668 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Feb 2010 17:56:38 +0900 Subject: sh: Build PMB entry links for existing contiguous multi-page mappings. This plugs in entry sizing support for existing mappings and then builds on top of that for linking together entries that are mapping contiguous areas. This will ultimately permit us to coalesce mappings and promote head pages while reclaiming PMB slots for dynamic remapping. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 2 +- arch/sh/mm/pmb.c | 59 +++++++++++++++++++++++------------------------ 2 files changed, 30 insertions(+), 31 deletions(-) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 44c904341414..5453169bf052 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -59,6 +59,7 @@ struct pmb_entry { unsigned long vpn; unsigned long ppn; unsigned long flags; + unsigned long size; /* * 0 .. NR_PMB_ENTRIES for specific entry selection, or @@ -66,7 +67,6 @@ struct pmb_entry { */ int entry; - struct pmb_entry *next; /* Adjacent entry link for contiguous multi-entry mappings */ struct pmb_entry *link; }; diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 924f3e4b3a82..f2ad6e374b64 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -90,20 +90,15 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, pmbe->ppn = ppn; pmbe->flags = flags; pmbe->entry = pos; + pmbe->size = 0; return pmbe; } static void pmb_free(struct pmb_entry *pmbe) { - int pos = pmbe->entry; - - pmbe->vpn = 0; - pmbe->ppn = 0; - pmbe->flags = 0; - pmbe->entry = 0; - - clear_bit(pos, pmb_map); + clear_bit(pmbe->entry, pmb_map); + pmbe->entry = PMB_NO_ENTRY; } /* @@ -198,6 +193,8 @@ again: vaddr += pmb_sizes[i].size; size -= pmb_sizes[i].size; + pmbe->size = pmb_sizes[i].size; + /* * Link adjacent entries that span multiple PMB entries * for easier tear-down. @@ -273,25 +270,7 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe) } while (pmbe); } -static inline void -pmb_log_mapping(unsigned long data_val, unsigned long vpn, unsigned long ppn) -{ - unsigned int size; - const char *sz_str; - - size = data_val & PMB_SZ_MASK; - - sz_str = (size == PMB_SZ_16M) ? " 16MB": - (size == PMB_SZ_64M) ? " 64MB": - (size == PMB_SZ_128M) ? "128MB": - "512MB"; - - pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n", - vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str, - (data_val & PMB_C) ? "" : "un"); -} - -static inline unsigned int pmb_ppn_in_range(unsigned long ppn) +static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) { return ppn >= __pa(memory_start) && ppn < __pa(memory_end); } @@ -299,7 +278,8 @@ static inline unsigned int pmb_ppn_in_range(unsigned long ppn) static int pmb_synchronize_mappings(void) { unsigned int applied = 0; - int i; + struct pmb_entry *pmbp = NULL; + int i, j; pr_info("PMB: boot mappings:\n"); @@ -323,6 +303,7 @@ static int pmb_synchronize_mappings(void) unsigned long addr, data; unsigned long addr_val, data_val; unsigned long ppn, vpn, flags; + unsigned int size; struct pmb_entry *pmbe; addr = mk_pmb_addr(i); @@ -366,7 +347,8 @@ static int pmb_synchronize_mappings(void) __raw_writel(data_val, data); } - flags = data_val & (PMB_SZ_MASK | PMB_CACHE_MASK); + size = data_val & PMB_SZ_MASK; + flags = size | (data_val & PMB_CACHE_MASK); pmbe = pmb_alloc(vpn, ppn, flags, i); if (IS_ERR(pmbe)) { @@ -374,7 +356,24 @@ static int pmb_synchronize_mappings(void) continue; } - pmb_log_mapping(data_val, vpn, ppn); + for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) + if (pmb_sizes[j].flag == size) + pmbe->size = pmb_sizes[j].size; + + /* + * Compare the previous entry against the current one to + * see if the entries span a contiguous mapping. If so, + * setup the entry links accordingly. + */ + if (pmbp && ((pmbe->vpn == (pmbp->vpn + pmbp->size)) && + (pmbe->ppn == (pmbp->ppn + pmbp->size)))) + pmbp->link = pmbe; + + pmbp = pmbe; + + pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n", + vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20, + (data_val & PMB_C) ? "" : "un"); applied++; } -- cgit v1.2.3 From d53a0d33bc3a50ea0e8dd1680a2e8435770b162a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Feb 2010 21:17:02 +0900 Subject: sh: PMB locking overhaul. This implements some locking for the PMB code. A high level rwlock is added for dealing with rw accesses on the entry map while a per-entry data structure spinlock is added to deal with the PMB entry changing out from underneath us. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 18 ------ arch/sh/mm/pmb.c | 152 ++++++++++++++++++++++++++++++++++------------ 2 files changed, 114 insertions(+), 56 deletions(-) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 5453169bf052..e42c4e2a41df 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -53,24 +53,6 @@ typedef struct { #endif } mm_context_t; -struct pmb_entry; - -struct pmb_entry { - unsigned long vpn; - unsigned long ppn; - unsigned long flags; - unsigned long size; - - /* - * 0 .. NR_PMB_ENTRIES for specific entry selection, or - * PMB_NO_ENTRY to search for a free one - */ - int entry; - - /* Adjacent entry link for contiguous multi-entry mappings */ - struct pmb_entry *link; -}; - #ifdef CONFIG_PMB /* arch/sh/mm/pmb.c */ long pmb_remap(unsigned long virt, unsigned long phys, diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index cb808a8aaffc..e65e8b8e2a5e 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include #include #include @@ -30,8 +32,29 @@ #include #include +struct pmb_entry; + +struct pmb_entry { + unsigned long vpn; + unsigned long ppn; + unsigned long flags; + unsigned long size; + + spinlock_t lock; + + /* + * 0 .. NR_PMB_ENTRIES for specific entry selection, or + * PMB_NO_ENTRY to search for a free one + */ + int entry; + + /* Adjacent entry link for contiguous multi-entry mappings */ + struct pmb_entry *link; +}; + static void pmb_unmap_entry(struct pmb_entry *); +static DEFINE_RWLOCK(pmb_rwlock); static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); @@ -52,16 +75,13 @@ static __always_inline unsigned long mk_pmb_data(unsigned int entry) static int pmb_alloc_entry(void) { - unsigned int pos; + int pos; -repeat: pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES); - - if (unlikely(pos > NR_PMB_ENTRIES)) - return -ENOSPC; - - if (test_and_set_bit(pos, pmb_map)) - goto repeat; + if (pos >= 0 && pos < NR_PMB_ENTRIES) + __set_bit(pos, pmb_map); + else + pos = -ENOSPC; return pos; } @@ -70,21 +90,32 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, unsigned long flags, int entry) { struct pmb_entry *pmbe; + unsigned long irqflags; + void *ret = NULL; int pos; + write_lock_irqsave(&pmb_rwlock, irqflags); + if (entry == PMB_NO_ENTRY) { pos = pmb_alloc_entry(); - if (pos < 0) - return ERR_PTR(pos); + if (unlikely(pos < 0)) { + ret = ERR_PTR(pos); + goto out; + } } else { - if (test_and_set_bit(entry, pmb_map)) - return ERR_PTR(-ENOSPC); + if (__test_and_set_bit(entry, pmb_map)) { + ret = ERR_PTR(-ENOSPC); + goto out; + } + pos = entry; } + write_unlock_irqrestore(&pmb_rwlock, irqflags); + pmbe = &pmb_entry_list[pos]; - if (!pmbe) - return ERR_PTR(-ENOMEM); + + spin_lock_init(&pmbe->lock); pmbe->vpn = vpn; pmbe->ppn = ppn; @@ -93,11 +124,15 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, pmbe->size = 0; return pmbe; + +out: + write_unlock_irqrestore(&pmb_rwlock, irqflags); + return ret; } static void pmb_free(struct pmb_entry *pmbe) { - clear_bit(pmbe->entry, pmb_map); + __clear_bit(pmbe->entry, pmb_map); pmbe->entry = PMB_NO_ENTRY; } @@ -124,7 +159,7 @@ static __always_inline unsigned long pmb_cache_flags(void) /* * Must be run uncached. */ -static void set_pmb_entry(struct pmb_entry *pmbe) +static void __set_pmb_entry(struct pmb_entry *pmbe) { jump_to_uncached(); @@ -137,7 +172,7 @@ static void set_pmb_entry(struct pmb_entry *pmbe) back_to_cached(); } -static void clear_pmb_entry(struct pmb_entry *pmbe) +static void __clear_pmb_entry(struct pmb_entry *pmbe) { unsigned int entry = pmbe->entry; unsigned long addr; @@ -154,6 +189,15 @@ static void clear_pmb_entry(struct pmb_entry *pmbe) back_to_cached(); } +static void set_pmb_entry(struct pmb_entry *pmbe) +{ + unsigned long flags; + + spin_lock_irqsave(&pmbe->lock, flags); + __set_pmb_entry(pmbe); + spin_unlock_irqrestore(&pmbe->lock, flags); +} + static struct { unsigned long size; int flag; @@ -190,6 +234,8 @@ long pmb_remap(unsigned long vaddr, unsigned long phys, again: for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { + unsigned long flags; + if (size < pmb_sizes[i].size) continue; @@ -200,7 +246,9 @@ again: goto out; } - set_pmb_entry(pmbe); + spin_lock_irqsave(&pmbe->lock, flags); + + __set_pmb_entry(pmbe); phys += pmb_sizes[i].size; vaddr += pmb_sizes[i].size; @@ -212,8 +260,11 @@ again: * Link adjacent entries that span multiple PMB entries * for easier tear-down. */ - if (likely(pmbp)) + if (likely(pmbp)) { + spin_lock(&pmbp->lock); pmbp->link = pmbe; + spin_unlock(&pmbp->lock); + } pmbp = pmbe; @@ -223,9 +274,11 @@ again: * pmb_sizes[i].size again. */ i--; + + spin_unlock_irqrestore(&pmbe->lock, flags); } - if (size >= 0x1000000) + if (size >= SZ_16M) goto again; return wanted - size; @@ -238,29 +291,32 @@ out: void pmb_unmap(unsigned long addr) { - struct pmb_entry *pmbe; + struct pmb_entry *pmbe = NULL; int i; + read_lock(&pmb_rwlock); + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { if (test_bit(i, pmb_map)) { pmbe = &pmb_entry_list[i]; - if (pmbe->vpn == addr) { - pmb_unmap_entry(pmbe); + if (pmbe->vpn == addr) break; - } } } + + read_unlock(&pmb_rwlock); + + pmb_unmap_entry(pmbe); } static void pmb_unmap_entry(struct pmb_entry *pmbe) { + unsigned long flags; + if (unlikely(!pmbe)) return; - if (!test_bit(pmbe->entry, pmb_map)) { - WARN_ON(1); - return; - } + write_lock_irqsave(&pmb_rwlock, flags); do { struct pmb_entry *pmblink = pmbe; @@ -272,15 +328,17 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe) * this entry in pmb_alloc() (even if we haven't filled * it yet). * - * Therefore, calling clear_pmb_entry() is safe as no + * Therefore, calling __clear_pmb_entry() is safe as no * other mapping can be using that slot. */ - clear_pmb_entry(pmbe); + __clear_pmb_entry(pmbe); pmbe = pmblink->link; pmb_free(pmblink); } while (pmbe); + + write_unlock_irqrestore(&pmb_rwlock, flags); } static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) @@ -316,6 +374,7 @@ static int pmb_synchronize_mappings(void) unsigned long addr, data; unsigned long addr_val, data_val; unsigned long ppn, vpn, flags; + unsigned long irqflags; unsigned int size; struct pmb_entry *pmbe; @@ -364,21 +423,31 @@ static int pmb_synchronize_mappings(void) continue; } + spin_lock_irqsave(&pmbe->lock, irqflags); + for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) if (pmb_sizes[j].flag == size) pmbe->size = pmb_sizes[j].size; - /* - * Compare the previous entry against the current one to - * see if the entries span a contiguous mapping. If so, - * setup the entry links accordingly. - */ - if (pmbp && ((pmbe->vpn == (pmbp->vpn + pmbp->size)) && - (pmbe->ppn == (pmbp->ppn + pmbp->size)))) - pmbp->link = pmbe; + if (pmbp) { + spin_lock(&pmbp->lock); + + /* + * Compare the previous entry against the current one to + * see if the entries span a contiguous mapping. If so, + * setup the entry links accordingly. + */ + if ((pmbe->vpn == (pmbp->vpn + pmbp->size)) && + (pmbe->ppn == (pmbp->ppn + pmbp->size))) + pmbp->link = pmbe; + + spin_unlock(&pmbp->lock); + } pmbp = pmbe; + spin_unlock_irqrestore(&pmbe->lock, irqflags); + pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n", vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20, (data_val & PMB_C) ? "" : "un"); @@ -493,14 +562,21 @@ static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) if (state.event == PM_EVENT_ON && prev_state.event == PM_EVENT_FREEZE) { struct pmb_entry *pmbe; + + read_lock(&pmb_rwlock); + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { if (test_bit(i, pmb_map)) { pmbe = &pmb_entry_list[i]; set_pmb_entry(pmbe); } } + + read_unlock(&pmb_rwlock); } + prev_state = state; + return 0; } -- cgit v1.2.3 From d01447b3197c2c470a14666be2c640407bbbfec7 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 18 Feb 2010 18:13:51 +0900 Subject: sh: Merge legacy and dynamic PMB modes. This implements a bit of rework for the PMB code, which permits us to kill off the legacy PMB mode completely. Rather than trusting the boot loader to do the right thing, we do a quick verification of the PMB contents to determine whether to have the kernel setup the initial mappings or whether it needs to mangle them later on instead. If we're booting from legacy mappings, the kernel will now take control of them and make them match the kernel's initial mapping configuration. This is accomplished by breaking the initialization phase out in to multiple steps: synchronization, merging, and resizing. With the recent rework, the synchronization code establishes page links for compound mappings already, so we build on top of this for promoting mappings and reclaiming unused slots. At the same time, the changes introduced for the uncached helpers also permit us to dynamically resize the uncached mapping without any particular headaches. The smallest page size is more than sufficient for mapping all of kernel text, and as we're careful not to jump to any far off locations in the setup code the mapping can safely be resized regardless of whether we are executing from it or not. Signed-off-by: Paul Mundt --- arch/sh/boot/compressed/misc.c | 2 +- arch/sh/include/asm/mmu.h | 12 +- arch/sh/include/asm/page.h | 11 +- arch/sh/include/asm/uncached.h | 18 +++ arch/sh/kernel/head_32.S | 42 ++++++- arch/sh/kernel/setup.c | 2 + arch/sh/mm/Kconfig | 10 -- arch/sh/mm/init.c | 1 - arch/sh/mm/pmb.c | 243 +++++++++++++++++++++++++++++++++++------ arch/sh/mm/uncached.c | 6 + 10 files changed, 276 insertions(+), 71 deletions(-) create mode 100644 arch/sh/include/asm/uncached.h (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c index 9ba07927d16a..27140a6b365d 100644 --- a/arch/sh/boot/compressed/misc.c +++ b/arch/sh/boot/compressed/misc.c @@ -117,7 +117,7 @@ void decompress_kernel(void) output_addr = (CONFIG_MEMORY_START + 0x2000); #else output_addr = __pa((unsigned long)&_text+PAGE_SIZE); -#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_LEGACY) +#if defined(CONFIG_29BIT) output_addr |= P2SEG; #endif #endif diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index e42c4e2a41df..15a05b615ba7 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -58,7 +58,7 @@ typedef struct { long pmb_remap(unsigned long virt, unsigned long phys, unsigned long size, pgprot_t prot); void pmb_unmap(unsigned long addr); -int pmb_init(void); +void pmb_init(void); bool __in_29bit_mode(void); #else static inline long pmb_remap(unsigned long virt, unsigned long phys, @@ -67,14 +67,8 @@ static inline long pmb_remap(unsigned long virt, unsigned long phys, return -EINVAL; } -static inline void pmb_unmap(unsigned long addr) -{ -} - -static inline int pmb_init(void) -{ - return -ENODEV; -} +#define pmb_unmap(addr) do { } while (0) +#define pmb_init(addr) do { } while (0) #ifdef CONFIG_29BIT #define __in_29bit_mode() (1) diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 8237d9f53e56..d71feb359304 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -45,21 +45,12 @@ #endif #ifndef __ASSEMBLY__ +#include extern unsigned long shm_align_mask; extern unsigned long max_low_pfn, min_low_pfn; extern unsigned long memory_start, memory_end; -#ifdef CONFIG_UNCACHED_MAPPING -extern unsigned long uncached_start, uncached_end; - -extern int virt_addr_uncached(unsigned long kaddr); -extern void uncached_init(void); -#else -#define virt_addr_uncached(kaddr) (0) -#define uncached_init() do { } while (0) -#endif - static inline unsigned long pages_do_alias(unsigned long addr1, unsigned long addr2) { diff --git a/arch/sh/include/asm/uncached.h b/arch/sh/include/asm/uncached.h new file mode 100644 index 000000000000..e3419f96626a --- /dev/null +++ b/arch/sh/include/asm/uncached.h @@ -0,0 +1,18 @@ +#ifndef __ASM_SH_UNCACHED_H +#define __ASM_SH_UNCACHED_H + +#include + +#ifdef CONFIG_UNCACHED_MAPPING +extern unsigned long uncached_start, uncached_end; + +extern int virt_addr_uncached(unsigned long kaddr); +extern void uncached_init(void); +extern void uncached_resize(unsigned long size); +#else +#define virt_addr_uncached(kaddr) (0) +#define uncached_init() do { } while (0) +#define uncached_resize(size) BUG() +#endif + +#endif /* __ASM_SH_UNCACHED_H */ diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S index 79ff39517f8e..fe0b743881b0 100644 --- a/arch/sh/kernel/head_32.S +++ b/arch/sh/kernel/head_32.S @@ -85,7 +85,7 @@ ENTRY(_stext) ldc r0, r7_bank ! ... and initial thread_info #endif -#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY) +#ifdef CONFIG_PMB /* * Reconfigure the initial PMB mappings setup by the hardware. * @@ -139,7 +139,6 @@ ENTRY(_stext) mov.l r0, @r1 mov.l .LMEMORY_SIZE, r5 - mov r5, r7 mov #PMB_E_SHIFT, r0 mov #0x1, r4 @@ -150,6 +149,40 @@ ENTRY(_stext) mov.l .LFIRST_ADDR_ENTRY, r2 mov.l .LPMB_ADDR, r3 + /* + * First we need to walk the PMB and figure out if there are any + * existing mappings that match the initial mappings VPN/PPN. + * If these have already been established by the bootloader, we + * don't bother setting up new entries here, and let the late PMB + * initialization take care of things instead. + * + * Note that we may need to coalesce and merge entries in order + * to reclaim more available PMB slots, which is much more than + * we want to do at this early stage. + */ + mov #0, r10 + mov #NR_PMB_ENTRIES, r9 + + mov r1, r7 /* temporary PMB_DATA iter */ + +.Lvalidate_existing_mappings: + + mov.l @r7, r8 + and r0, r8 + cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */ + bt .Lpmb_done + + add #1, r10 /* Increment the loop counter */ + cmp/eq r9, r10 + bf/s .Lvalidate_existing_mappings + add r4, r7 /* Increment to the next PMB_DATA entry */ + + /* + * If we've fallen through, continue with setting up the initial + * mappings. + */ + + mov r5, r7 /* cached_to_uncached */ mov #0, r10 #ifdef CONFIG_UNCACHED_MAPPING @@ -252,7 +285,8 @@ ENTRY(_stext) mov.l 6f, r0 icbi @r0 -#endif /* !CONFIG_PMB_LEGACY */ +.Lpmb_done: +#endif /* CONFIG_PMB */ #ifndef CONFIG_SH_NO_BSS_INIT /* @@ -304,7 +338,7 @@ ENTRY(stack_start) 6: .long sh_cpu_init 7: .long init_thread_union -#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY) +#ifdef CONFIG_PMB .LPMB_ADDR: .long PMB_ADDR .LPMB_DATA: .long PMB_DATA .LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index e187750dd319..3459e70eed72 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -421,6 +421,8 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); + uncached_init(); + plat_early_device_setup(); /* Let earlyprintk output early console messages */ diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 65cb5b83e072..1445ca6257df 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -91,16 +91,6 @@ config PMB 32-bits through the SH-4A PMB. If this is not set, legacy 29-bit physical addressing will be used. -config PMB_LEGACY - bool "Support legacy boot mappings for PMB" - depends on PMB - select 32BIT - help - If this option is enabled, fixed PMB mappings are inherited - from the boot loader, and the kernel does not attempt dynamic - management. This is the closest to legacy 29-bit physical mode, - and allows systems to support up to 512MiB of system memory. - config X2TLB def_bool y depends on (CPU_SHX2 || CPU_SHX3) && MMU diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 08e280d7cc7e..68028e8f26ce 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -245,7 +245,6 @@ void __init mem_init(void) memset(empty_zero_page, 0, PAGE_SIZE); __flush_wback_region(empty_zero_page, PAGE_SIZE); - uncached_init(); vsyscall_init(); codesize = (unsigned long) &_etext - (unsigned long) &_text; diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index b9d5476e1284..198bcff5e96f 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -52,7 +52,7 @@ struct pmb_entry { struct pmb_entry *link; }; -static void pmb_unmap_entry(struct pmb_entry *); +static void pmb_unmap_entry(struct pmb_entry *, int depth); static DEFINE_RWLOCK(pmb_rwlock); static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; @@ -115,13 +115,14 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, pmbe = &pmb_entry_list[pos]; + memset(pmbe, 0, sizeof(struct pmb_entry)); + spin_lock_init(&pmbe->lock); pmbe->vpn = vpn; pmbe->ppn = ppn; pmbe->flags = flags; pmbe->entry = pos; - pmbe->size = 0; return pmbe; @@ -133,7 +134,9 @@ out: static void pmb_free(struct pmb_entry *pmbe) { __clear_bit(pmbe->entry, pmb_map); - pmbe->entry = PMB_NO_ENTRY; + + pmbe->entry = PMB_NO_ENTRY; + pmbe->link = NULL; } /* @@ -161,9 +164,6 @@ static __always_inline unsigned long pmb_cache_flags(void) */ static void __set_pmb_entry(struct pmb_entry *pmbe) { - pmbe->flags &= ~PMB_CACHE_MASK; - pmbe->flags |= pmb_cache_flags(); - writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry)); @@ -280,7 +280,7 @@ again: return wanted - size; out: - pmb_unmap_entry(pmbp); + pmb_unmap_entry(pmbp, NR_PMB_ENTRIES); return err; } @@ -302,18 +302,40 @@ void pmb_unmap(unsigned long addr) read_unlock(&pmb_rwlock); - pmb_unmap_entry(pmbe); + pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); } -static void pmb_unmap_entry(struct pmb_entry *pmbe) +static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) { - unsigned long flags; + return (b->vpn == (a->vpn + a->size)) && + (b->ppn == (a->ppn + a->size)) && + (b->flags == a->flags); +} - if (unlikely(!pmbe)) - return; +static bool pmb_size_valid(unsigned long size) +{ + int i; - write_lock_irqsave(&pmb_rwlock, flags); + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) + if (pmb_sizes[i].size == size) + return true; + + return false; +} + +static int pmb_size_to_flags(unsigned long size) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) + if (pmb_sizes[i].size == size) + return pmb_sizes[i].flag; + return 0; +} + +static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) +{ do { struct pmb_entry *pmblink = pmbe; @@ -332,8 +354,18 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe) pmbe = pmblink->link; pmb_free(pmblink); - } while (pmbe); + } while (pmbe && --depth); +} + +static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth) +{ + unsigned long flags; + if (unlikely(!pmbe)) + return; + + write_lock_irqsave(&pmb_rwlock, flags); + __pmb_unmap_entry(pmbe, depth); write_unlock_irqrestore(&pmb_rwlock, flags); } @@ -342,14 +374,40 @@ static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) return ppn >= __pa(memory_start) && ppn < __pa(memory_end); } -static int pmb_synchronize_mappings(void) +static void __init pmb_notify(void) { - unsigned int applied = 0; - struct pmb_entry *pmbp = NULL; - int i, j; + int i; pr_info("PMB: boot mappings:\n"); + read_lock(&pmb_rwlock); + + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { + struct pmb_entry *pmbe; + + if (!test_bit(i, pmb_map)) + continue; + + pmbe = &pmb_entry_list[i]; + + pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n", + pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT, + pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un"); + } + + read_unlock(&pmb_rwlock); +} + +/* + * Sync our software copy of the PMB mappings with those in hardware. The + * mappings in the hardware PMB were either set up by the bootloader or + * very early on by the kernel. + */ +static void __init pmb_synchronize(void) +{ + struct pmb_entry *pmbp = NULL; + int i, j; + /* * Run through the initial boot mappings, log the established * ones, and blow away anything that falls outside of the valid @@ -432,10 +490,10 @@ static int pmb_synchronize_mappings(void) /* * Compare the previous entry against the current one to * see if the entries span a contiguous mapping. If so, - * setup the entry links accordingly. + * setup the entry links accordingly. Compound mappings + * are later coalesced. */ - if ((pmbe->vpn == (pmbp->vpn + pmbp->size)) && - (pmbe->ppn == (pmbp->ppn + pmbp->size))) + if (pmb_can_merge(pmbp, pmbe)) pmbp->link = pmbe; spin_unlock(&pmbp->lock); @@ -444,37 +502,150 @@ static int pmb_synchronize_mappings(void) pmbp = pmbe; spin_unlock_irqrestore(&pmbe->lock, irqflags); + } +} - pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n", - vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20, - (data_val & PMB_C) ? "" : "un"); +static void __init pmb_merge(struct pmb_entry *head) +{ + unsigned long span, newsize; + struct pmb_entry *tail; + int i = 1, depth = 0; + + span = newsize = head->size; - applied++; + tail = head->link; + while (tail) { + span += tail->size; + + if (pmb_size_valid(span)) { + newsize = span; + depth = i; + } + + /* This is the end of the line.. */ + if (!tail->link) + break; + + tail = tail->link; + i++; } - return (applied == 0); + /* + * The merged page size must be valid. + */ + if (!pmb_size_valid(newsize)) + return; + + head->flags &= ~PMB_SZ_MASK; + head->flags |= pmb_size_to_flags(newsize); + + head->size = newsize; + + __pmb_unmap_entry(head->link, depth); + __set_pmb_entry(head); } -int pmb_init(void) +static void __init pmb_coalesce(void) { - int ret; + unsigned long flags; + int i; + + write_lock_irqsave(&pmb_rwlock, flags); + + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { + struct pmb_entry *pmbe; + + if (!test_bit(i, pmb_map)) + continue; + + pmbe = &pmb_entry_list[i]; + + /* + * We're only interested in compound mappings + */ + if (!pmbe->link) + continue; + + /* + * Nothing to do if it already uses the largest possible + * page size. + */ + if (pmbe->size == SZ_512M) + continue; + + pmb_merge(pmbe); + } + + write_unlock_irqrestore(&pmb_rwlock, flags); +} + +#ifdef CONFIG_UNCACHED_MAPPING +static void __init pmb_resize(void) +{ + int i; /* - * Sync our software copy of the PMB mappings with those in - * hardware. The mappings in the hardware PMB were either set up - * by the bootloader or very early on by the kernel. + * If the uncached mapping was constructed by the kernel, it will + * already be a reasonable size. */ - ret = pmb_synchronize_mappings(); - if (unlikely(ret == 0)) - return 0; + if (uncached_size == SZ_16M) + return; + + read_lock(&pmb_rwlock); + + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { + struct pmb_entry *pmbe; + unsigned long flags; + + if (!test_bit(i, pmb_map)) + continue; + + pmbe = &pmb_entry_list[i]; + + if (pmbe->vpn != uncached_start) + continue; + + /* + * Found it, now resize it. + */ + spin_lock_irqsave(&pmbe->lock, flags); + + pmbe->size = SZ_16M; + pmbe->flags &= ~PMB_SZ_MASK; + pmbe->flags |= pmb_size_to_flags(pmbe->size); + + uncached_resize(pmbe->size); + + __set_pmb_entry(pmbe); + + spin_unlock_irqrestore(&pmbe->lock, flags); + } + + read_lock(&pmb_rwlock); +} +#endif + +void __init pmb_init(void) +{ + /* Synchronize software state */ + pmb_synchronize(); + + /* Attempt to combine compound mappings */ + pmb_coalesce(); + +#ifdef CONFIG_UNCACHED_MAPPING + /* Resize initial mappings, if necessary */ + pmb_resize(); +#endif + + /* Log them */ + pmb_notify(); writel_uncached(0, PMB_IRMCR); /* Flush out the TLB */ __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); ctrl_barrier(); - - return 0; } bool __in_29bit_mode(void) diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c index 807906981d9d..cf20a5c5136a 100644 --- a/arch/sh/mm/uncached.c +++ b/arch/sh/mm/uncached.c @@ -26,3 +26,9 @@ void __init uncached_init(void) uncached_start = memory_end; uncached_end = uncached_start + uncached_size; } + +void __init uncached_resize(unsigned long size) +{ + uncached_size = size; + uncached_end = uncached_start + uncached_size; +} -- cgit v1.2.3 From 90e7d649d86f21d478dc134f74c88e19dd472393 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 23 Feb 2010 16:20:53 +0900 Subject: sh: reworked dynamic PMB mapping. This implements a fairly significant overhaul of the dynamic PMB mapping code. The primary change here is that the PMB gets its own VMA that follows the uncached mapping and we attempt to be a bit more intelligent with dynamic sizing, multi-entry mapping, and so forth. Signed-off-by: Paul Mundt --- arch/sh/include/asm/io.h | 23 ++-- arch/sh/include/asm/mmu.h | 31 ++++-- arch/sh/mm/ioremap.c | 70 ++++--------- arch/sh/mm/ioremap_fixed.c | 11 +- arch/sh/mm/pmb.c | 256 ++++++++++++++++++++++++++++----------------- 5 files changed, 223 insertions(+), 168 deletions(-) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 7dab7b23a5ec..f689554e17c1 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -291,21 +291,21 @@ unsigned long long poke_real_address_q(unsigned long long addr, * doesn't exist, so everything must go through page tables. */ #ifdef CONFIG_MMU -void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, +void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size, pgprot_t prot, void *caller); void __iounmap(void __iomem *addr); static inline void __iomem * -__ioremap(unsigned long offset, unsigned long size, pgprot_t prot) +__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot) { return __ioremap_caller(offset, size, prot, __builtin_return_address(0)); } static inline void __iomem * -__ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot) +__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) { #ifdef CONFIG_29BIT - unsigned long last_addr = offset + size - 1; + phys_addr_t last_addr = offset + size - 1; /* * For P1 and P2 space this is trivial, as everything is already @@ -329,7 +329,7 @@ __ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot) } static inline void __iomem * -__ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot) +__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot) { void __iomem *ret; @@ -349,35 +349,32 @@ __ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot) #define __iounmap(addr) do { } while (0) #endif /* CONFIG_MMU */ -static inline void __iomem * -ioremap(unsigned long offset, unsigned long size) +static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) { return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE); } static inline void __iomem * -ioremap_cache(unsigned long offset, unsigned long size) +ioremap_cache(phys_addr_t offset, unsigned long size) { return __ioremap_mode(offset, size, PAGE_KERNEL); } #ifdef CONFIG_HAVE_IOREMAP_PROT static inline void __iomem * -ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags) +ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags) { return __ioremap_mode(offset, size, __pgprot(flags)); } #endif #ifdef CONFIG_IOREMAP_FIXED -extern void __iomem *ioremap_fixed(resource_size_t, unsigned long, - unsigned long, pgprot_t); +extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t); extern int iounmap_fixed(void __iomem *); extern void ioremap_fixed_init(void); #else static inline void __iomem * -ioremap_fixed(resource_size_t phys_addr, unsigned long offset, - unsigned long size, pgprot_t prot) +ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot) { BUG(); return NULL; diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 15a05b615ba7..19fe84550b49 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -55,19 +55,29 @@ typedef struct { #ifdef CONFIG_PMB /* arch/sh/mm/pmb.c */ -long pmb_remap(unsigned long virt, unsigned long phys, - unsigned long size, pgprot_t prot); -void pmb_unmap(unsigned long addr); -void pmb_init(void); bool __in_29bit_mode(void); + +void pmb_init(void); +int pmb_bolt_mapping(unsigned long virt, phys_addr_t phys, + unsigned long size, pgprot_t prot); +void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size, + pgprot_t prot, void *caller); +int pmb_unmap(void __iomem *addr); + #else -static inline long pmb_remap(unsigned long virt, unsigned long phys, - unsigned long size, pgprot_t prot) + +static inline void __iomem * +pmb_remap_caller(phys_addr_t phys, unsigned long size, + pgprot_t prot, void *caller) +{ + return NULL; +} + +static inline int pmb_unmap(void __iomem *addr) { return -EINVAL; } -#define pmb_unmap(addr) do { } while (0) #define pmb_init(addr) do { } while (0) #ifdef CONFIG_29BIT @@ -77,6 +87,13 @@ static inline long pmb_remap(unsigned long virt, unsigned long phys, #endif #endif /* CONFIG_PMB */ + +static inline void __iomem * +pmb_remap(phys_addr_t phys, unsigned long size, pgprot_t prot) +{ + return pmb_remap_caller(phys, size, prot, __builtin_return_address(0)); +} + #endif /* __ASSEMBLY__ */ #endif /* __MMU_H */ diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index c68d2d7d00a9..1ab2385ecefe 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -34,17 +34,32 @@ * caller shouldn't need to know that small detail. */ void __iomem * __init_refok -__ioremap_caller(unsigned long phys_addr, unsigned long size, +__ioremap_caller(phys_addr_t phys_addr, unsigned long size, pgprot_t pgprot, void *caller) { struct vm_struct *area; unsigned long offset, last_addr, addr, orig_addr; + void __iomem *mapped; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; + /* + * If we can't yet use the regular approach, go the fixmap route. + */ + if (!mem_init_done) + return ioremap_fixed(phys_addr, size, pgprot); + + /* + * First try to remap through the PMB. + * PMB entries are all pre-faulted. + */ + mapped = pmb_remap_caller(phys_addr, size, pgprot, caller); + if (mapped && !IS_ERR(mapped)) + return mapped; + /* * Mappings have to be page-aligned */ @@ -52,12 +67,6 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; - /* - * If we can't yet use the regular approach, go the fixmap route. - */ - if (!mem_init_done) - return ioremap_fixed(phys_addr, offset, size, pgprot); - /* * Ok, go for it.. */ @@ -67,33 +76,10 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, area->phys_addr = phys_addr; orig_addr = addr = (unsigned long)area->addr; -#ifdef CONFIG_PMB - /* - * First try to remap through the PMB once a valid VMA has been - * established. Smaller allocations (or the rest of the size - * remaining after a PMB mapping due to the size not being - * perfectly aligned on a PMB size boundary) are then mapped - * through the UTLB using conventional page tables. - * - * PMB entries are all pre-faulted. - */ - if (unlikely(phys_addr >= P1SEG)) { - unsigned long mapped; - - mapped = pmb_remap(addr, phys_addr, size, pgprot); - if (likely(mapped)) { - addr += mapped; - phys_addr += mapped; - size -= mapped; - } + if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { + vunmap((void *)orig_addr); + return NULL; } -#endif - - if (likely(size)) - if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { - vunmap((void *)orig_addr); - return NULL; - } return (void __iomem *)(offset + (char *)orig_addr); } @@ -133,23 +119,11 @@ void __iounmap(void __iomem *addr) if (iounmap_fixed(addr) == 0) return; -#ifdef CONFIG_PMB /* - * Purge any PMB entries that may have been established for this - * mapping, then proceed with conventional VMA teardown. - * - * XXX: Note that due to the way that remove_vm_area() does - * matching of the resultant VMA, we aren't able to fast-forward - * the address past the PMB space until the end of the VMA where - * the page tables reside. As such, unmap_vm_area() will be - * forced to linearly scan over the area until it finds the page - * tables where PTEs that need to be unmapped actually reside, - * which is far from optimal. Perhaps we need to use a separate - * VMA for the PMB mappings? - * -- PFM. + * If the PMB handled it, there's nothing else to do. */ - pmb_unmap(vaddr); -#endif + if (pmb_unmap(addr) == 0) + return; p = remove_vm_area((void *)(vaddr & PAGE_MASK)); if (!p) { diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c index 0b78b1e20ef1..7f682e5dafcf 100644 --- a/arch/sh/mm/ioremap_fixed.c +++ b/arch/sh/mm/ioremap_fixed.c @@ -45,14 +45,21 @@ void __init ioremap_fixed_init(void) } void __init __iomem * -ioremap_fixed(resource_size_t phys_addr, unsigned long offset, - unsigned long size, pgprot_t prot) +ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot) { enum fixed_addresses idx0, idx; struct ioremap_map *map; unsigned int nrpages; + unsigned long offset; int i, slot; + /* + * Mappings have to be page-aligned + */ + offset = phys_addr & ~PAGE_MASK; + phys_addr &= PAGE_MASK; + size = PAGE_ALIGN(phys_addr + size) - phys_addr; + slot = -1; for (i = 0; i < FIX_N_IOREMAPS; i++) { map = &ioremap_maps[i]; diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 35b364f931ea..9a516b89839a 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -51,6 +52,16 @@ struct pmb_entry { struct pmb_entry *link; }; +static struct { + unsigned long size; + int flag; +} pmb_sizes[] = { + { .size = SZ_512M, .flag = PMB_SZ_512M, }, + { .size = SZ_128M, .flag = PMB_SZ_128M, }, + { .size = SZ_64M, .flag = PMB_SZ_64M, }, + { .size = SZ_16M, .flag = PMB_SZ_16M, }, +}; + static void pmb_unmap_entry(struct pmb_entry *, int depth); static DEFINE_RWLOCK(pmb_rwlock); @@ -72,6 +83,88 @@ static __always_inline unsigned long mk_pmb_data(unsigned int entry) return mk_pmb_entry(entry) | PMB_DATA; } +static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) +{ + return ppn >= __pa(memory_start) && ppn < __pa(memory_end); +} + +/* + * Ensure that the PMB entries match our cache configuration. + * + * When we are in 32-bit address extended mode, CCR.CB becomes + * invalid, so care must be taken to manually adjust cacheable + * translations. + */ +static __always_inline unsigned long pmb_cache_flags(void) +{ + unsigned long flags = 0; + +#if defined(CONFIG_CACHE_OFF) + flags |= PMB_WT | PMB_UB; +#elif defined(CONFIG_CACHE_WRITETHROUGH) + flags |= PMB_C | PMB_WT | PMB_UB; +#elif defined(CONFIG_CACHE_WRITEBACK) + flags |= PMB_C; +#endif + + return flags; +} + +/* + * Convert typical pgprot value to the PMB equivalent + */ +static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot) +{ + unsigned long pmb_flags = 0; + u64 flags = pgprot_val(prot); + + if (flags & _PAGE_CACHABLE) + pmb_flags |= PMB_C; + if (flags & _PAGE_WT) + pmb_flags |= PMB_WT | PMB_UB; + + return pmb_flags; +} + +static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) +{ + return (b->vpn == (a->vpn + a->size)) && + (b->ppn == (a->ppn + a->size)) && + (b->flags == a->flags); +} + +static bool pmb_size_valid(unsigned long size) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) + if (pmb_sizes[i].size == size) + return true; + + return false; +} + +static inline bool pmb_addr_valid(unsigned long addr, unsigned long size) +{ + return (addr >= P1SEG && (addr + size - 1) < P3SEG); +} + +static inline bool pmb_prot_valid(pgprot_t prot) +{ + return (pgprot_val(prot) & _PAGE_USER) == 0; +} + +static int pmb_size_to_flags(unsigned long size) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) + if (pmb_sizes[i].size == size) + return pmb_sizes[i].flag; + + return 0; +} + static int pmb_alloc_entry(void) { int pos; @@ -138,34 +231,14 @@ static void pmb_free(struct pmb_entry *pmbe) pmbe->link = NULL; } -/* - * Ensure that the PMB entries match our cache configuration. - * - * When we are in 32-bit address extended mode, CCR.CB becomes - * invalid, so care must be taken to manually adjust cacheable - * translations. - */ -static __always_inline unsigned long pmb_cache_flags(void) -{ - unsigned long flags = 0; - -#if defined(CONFIG_CACHE_WRITETHROUGH) - flags |= PMB_C | PMB_WT | PMB_UB; -#elif defined(CONFIG_CACHE_WRITEBACK) - flags |= PMB_C; -#endif - - return flags; -} - /* * Must be run uncached. */ static void __set_pmb_entry(struct pmb_entry *pmbe) { - writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); - writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, - mk_pmb_data(pmbe->entry)); + /* Set V-bit */ + __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry)); + __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); } static void __clear_pmb_entry(struct pmb_entry *pmbe) @@ -193,39 +266,56 @@ static void set_pmb_entry(struct pmb_entry *pmbe) spin_unlock_irqrestore(&pmbe->lock, flags); } -static struct { - unsigned long size; - int flag; -} pmb_sizes[] = { - { .size = SZ_512M, .flag = PMB_SZ_512M, }, - { .size = SZ_128M, .flag = PMB_SZ_128M, }, - { .size = SZ_64M, .flag = PMB_SZ_64M, }, - { .size = SZ_16M, .flag = PMB_SZ_16M, }, -}; +int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, + unsigned long size, pgprot_t prot) +{ + return 0; +} -long pmb_remap(unsigned long vaddr, unsigned long phys, - unsigned long size, pgprot_t prot) +void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size, + pgprot_t prot, void *caller) { struct pmb_entry *pmbp, *pmbe; - unsigned long wanted; - int pmb_flags, i; - long err; - u64 flags; + unsigned long pmb_flags; + int i, mapped; + unsigned long orig_addr, vaddr; + phys_addr_t offset, last_addr; + phys_addr_t align_mask; + unsigned long aligned; + struct vm_struct *area; - flags = pgprot_val(prot); + /* + * Small mappings need to go through the TLB. + */ + if (size < SZ_16M) + return ERR_PTR(-EINVAL); + if (!pmb_prot_valid(prot)) + return ERR_PTR(-EINVAL); - pmb_flags = PMB_WT | PMB_UB; + pmbp = NULL; + pmb_flags = pgprot_to_pmb_flags(prot); + mapped = 0; - /* Convert typical pgprot value to the PMB equivalent */ - if (flags & _PAGE_CACHABLE) { - pmb_flags |= PMB_C; + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) + if (size >= pmb_sizes[i].size) + break; - if ((flags & _PAGE_WT) == 0) - pmb_flags &= ~(PMB_WT | PMB_UB); - } + last_addr = phys + size; + align_mask = ~(pmb_sizes[i].size - 1); + offset = phys & ~align_mask; + phys &= align_mask; + aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys; - pmbp = NULL; - wanted = size; + area = __get_vm_area_caller(aligned, VM_IOREMAP, uncached_end, + P3SEG, caller); + if (!area) + return NULL; + + area->phys_addr = phys; + orig_addr = vaddr = (unsigned long)area->addr; + + if (!pmb_addr_valid(vaddr, aligned)) + return ERR_PTR(-EFAULT); again: for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { @@ -237,19 +327,19 @@ again: pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, PMB_NO_ENTRY); if (IS_ERR(pmbe)) { - err = PTR_ERR(pmbe); - goto out; + pmb_unmap_entry(pmbp, mapped); + return pmbe; } spin_lock_irqsave(&pmbe->lock, flags); + pmbe->size = pmb_sizes[i].size; + __set_pmb_entry(pmbe); - phys += pmb_sizes[i].size; - vaddr += pmb_sizes[i].size; - size -= pmb_sizes[i].size; - - pmbe->size = pmb_sizes[i].size; + phys += pmbe->size; + vaddr += pmbe->size; + size -= pmbe->size; /* * Link adjacent entries that span multiple PMB entries @@ -269,6 +359,7 @@ again: * pmb_sizes[i].size again. */ i--; + mapped++; spin_unlock_irqrestore(&pmbe->lock, flags); } @@ -276,61 +367,35 @@ again: if (size >= SZ_16M) goto again; - return wanted - size; - -out: - pmb_unmap_entry(pmbp, NR_PMB_ENTRIES); - - return err; + return (void __iomem *)(offset + (char *)orig_addr); } -void pmb_unmap(unsigned long addr) +int pmb_unmap(void __iomem *addr) { struct pmb_entry *pmbe = NULL; - int i; + unsigned long vaddr = (unsigned long __force)addr; + int i, found = 0; read_lock(&pmb_rwlock); for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { if (test_bit(i, pmb_map)) { pmbe = &pmb_entry_list[i]; - if (pmbe->vpn == addr) + if (pmbe->vpn == vaddr) { + found = 1; break; + } } } read_unlock(&pmb_rwlock); - pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); -} - -static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) -{ - return (b->vpn == (a->vpn + a->size)) && - (b->ppn == (a->ppn + a->size)) && - (b->flags == a->flags); -} - -static bool pmb_size_valid(unsigned long size) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) - if (pmb_sizes[i].size == size) - return true; - - return false; -} - -static int pmb_size_to_flags(unsigned long size) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) - if (pmb_sizes[i].size == size) - return pmb_sizes[i].flag; + if (found) { + pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); + return 0; + } - return 0; + return -EINVAL; } static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) @@ -368,11 +433,6 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth) write_unlock_irqrestore(&pmb_rwlock, flags); } -static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) -{ - return ppn >= __pa(memory_start) && ppn < __pa(memory_end); -} - static void __init pmb_notify(void) { int i; -- cgit v1.2.3 From 089b43f9737f2e51c6ce354749f5a9f3f093601c Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 10 Mar 2010 16:29:48 +0900 Subject: sh: Fix up NUMA build for 29-bit. pmb_bolt_mapping() is undefined on 29-bit builds, so provide a stub. This fixes up the NUMA build on platforms lacking PMB support. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/sh/include/asm/mmu.h') diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 19fe84550b49..56e4418c19b9 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -66,6 +66,13 @@ int pmb_unmap(void __iomem *addr); #else +static inline int +pmb_bolt_mapping(unsigned long virt, phys_addr_t phys, + unsigned long size, pgprot_t prot) +{ + return -EINVAL; +} + static inline void __iomem * pmb_remap_caller(phys_addr_t phys, unsigned long size, pgprot_t prot, void *caller) -- cgit v1.2.3