From 7c4584d39a8a144c60adaf4cf998c3233b6683d9 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 24 Apr 2010 11:12:12 +0100 Subject: sh: Assembly friendly __pa and __va definitions This patch defines ___pa and ___va which return the physical and virtual address of an address, respectively. These macros are suitable for calling from assembly because they don't include the C casts required by __pa and __va. Signed-off-by: Matt Fleming --- arch/sh/include/asm/page.h | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index d71feb359304..0152c040f6c3 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -128,13 +128,18 @@ typedef struct page *pgtable_t; * added or subtracted as required. */ #ifdef CONFIG_PMB -#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START) -#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START)) +#define ___pa(x) ((x)-PAGE_OFFSET+__MEMORY_START) +#define ___va(x) ((x)+PAGE_OFFSET-__MEMORY_START) #else -#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) -#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) +#define ___pa(x) ((x)-PAGE_OFFSET) +#define ___va(x) ((x)+PAGE_OFFSET) #endif +#ifndef __ASSEMBLY__ +#define __pa(x) ___pa((unsigned long)x) +#define __va(x) (void *)___va((unsigned long)x) +#endif /* !__ASSEMBLY__ */ + #ifdef CONFIG_UNCACHED_MAPPING #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start) #define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET) -- cgit v1.2.3 From b161313ae8d9c64de589d1de72f975210ce37b48 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 24 Apr 2010 13:28:20 +0100 Subject: sh: Fix address to decompress at when CONFIG_32BIT=y When running in 32BIT mode the P1SEG region doesn't necessarily provide a window onto RAM (it depends how the bootloader setup the PMB). The correct location to place the decompressed kernel is the physical address of _text. Signed-off-by: Matt Fleming --- arch/sh/boot/compressed/head_32.S | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/sh/boot/compressed/head_32.S b/arch/sh/boot/compressed/head_32.S index 02a30935f0b9..200c1d4f1efe 100644 --- a/arch/sh/boot/compressed/head_32.S +++ b/arch/sh/boot/compressed/head_32.S @@ -97,7 +97,11 @@ init_stack_addr: decompress_kernel_addr: .long decompress_kernel kernel_start_addr: +#ifdef CONFIG_32BIT + .long ___pa(_text+PAGE_SIZE) +#else .long _text+PAGE_SIZE +#endif .align 9 fake_headers_as_bzImage: -- cgit v1.2.3 From 9c3d936352fefaadec57bafda1fe3807890cbf2c Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 24 Apr 2010 13:34:44 +0100 Subject: sh: Fix zImage load address when CONFIG_32BIT=y We can't necessarily use the P1SEG region to access RAM when running in 32BIT mode, so use CONFIG_MEMORY_START as the base address. Signed-off-by: Matt Fleming --- arch/sh/boot/compressed/Makefile | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile index 5d660b90943b..cfa5a087a886 100644 --- a/arch/sh/boot/compressed/Makefile +++ b/arch/sh/boot/compressed/Makefile @@ -14,10 +14,16 @@ OBJECTS = $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/cache.o # # IMAGE_OFFSET is the load offset of the compression loader # +ifeq ($(CONFIG_32BIT),y) +IMAGE_OFFSET := $(shell /bin/bash -c 'printf "0x%08x" \ + $$[$(CONFIG_MEMORY_START) + \ + $(CONFIG_BOOT_LINK_OFFSET)]') +else IMAGE_OFFSET := $(shell /bin/bash -c 'printf "0x%08x" \ $$[$(CONFIG_PAGE_OFFSET) + \ $(KERNEL_MEMORY) + \ $(CONFIG_BOOT_LINK_OFFSET)]') +endif LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) -- cgit v1.2.3 From c7b03fa0bdc04e00bfbdc4cc69da144b11108f37 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 25 Apr 2010 17:29:07 +0100 Subject: sh: Do not try merging two 128MB PMB mappings There is a logic error in pmb_merge() that means we will incorrectly try to merge two 128MB PMB mappings into one mapping. However, 256MB isn't a valid PMB map size and pmb_merge() will actually drop the second 128MB mapping. This patch allows my SDK7786 board to boot when configured with CONFIG_MEMORY_SIZE=0x10000000. Signed-off-by: Matt Fleming --- arch/sh/mm/pmb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 3cc21933063b..c0fdc217ece5 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -681,7 +681,7 @@ static void __init pmb_merge(struct pmb_entry *head) /* * The merged page size must be valid. */ - if (!pmb_size_valid(newsize)) + if (!depth || !pmb_size_valid(newsize)) return; head->flags &= ~PMB_SZ_MASK; -- cgit v1.2.3 From 035ca59fe8399a67b56511bd5b28130f0c4874a0 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 25 Apr 2010 20:18:41 +0100 Subject: sh: Use correct mask when comparing PMB DATA array values Previously we were masking the PMB DATA array values with the value of __MEMORY_START | PMB_V, which misses some PFN bits off the mask. Signed-off-by: Matt Fleming --- arch/sh/kernel/head_32.S | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S index 8c1fc9a9fab2..6e35f012cc03 100644 --- a/arch/sh/kernel/head_32.S +++ b/arch/sh/kernel/head_32.S @@ -131,6 +131,7 @@ ENTRY(_stext) * r8 = scratch register * r9 = scratch register * r10 = number of PMB entries we've setup + * r11 = scratch register */ mov.l .LMMUCR, r1 /* Flush the TLB */ @@ -167,8 +168,9 @@ ENTRY(_stext) .Lvalidate_existing_mappings: + mov.l .LPMB_DATA_MASK, r11 mov.l @r7, r8 - and r0, r8 + and r11, r8 cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */ bt .Lpmb_done @@ -341,6 +343,7 @@ ENTRY(stack_start) #ifdef CONFIG_PMB .LPMB_ADDR: .long PMB_ADDR .LPMB_DATA: .long PMB_DATA +.LPMB_DATA_MASK: .long PMB_PFN_MASK | PMB_V .LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V .LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V .LMMUCR: .long MMUCR -- cgit v1.2.3