summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig26
-rw-r--r--arch/arm/mm/abort-ev7.S21
-rw-r--r--arch/arm/mm/alignment.c77
-rw-r--r--arch/arm/mm/cache-l2x0.c39
-rw-r--r--arch/arm/mm/cache-v6.S17
-rw-r--r--arch/arm/mm/cache-v7.S4
-rw-r--r--arch/arm/mm/copypage-fa.c2
-rw-r--r--arch/arm/mm/fault-armv.c2
-rw-r--r--arch/arm/mm/fault.c5
-rw-r--r--arch/arm/mm/init.c48
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmu.c44
-rw-r--r--arch/arm/mm/nommu.c13
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/tlb-v7.S8
15 files changed, 205 insertions, 106 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 5bd7c89a6045..346ae14824a5 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -572,6 +572,8 @@ config CPU_TLB_V6
config CPU_TLB_V7
bool
+config VERIFY_PERMISSION_FAULT
+ bool
endif
config CPU_HAS_ASID
@@ -760,7 +762,8 @@ config CACHE_FEROCEON_L2_WRITETHROUGH
config CACHE_L2X0
bool "Enable the L2x0 outer cache controller"
depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
- REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4
+ REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \
+ ARCH_NOMADIK || ARCH_OMAP4 || ARCH_U8500 || ARCH_VEXPRESS_CA9X4
default y
select OUTER_CACHE
select OUTER_CACHE_SYNC
@@ -769,7 +772,7 @@ config CACHE_L2X0
config CACHE_TAUROS2
bool "Enable the Tauros2 L2 cache controller"
- depends on ARCH_DOVE
+ depends on (ARCH_DOVE || ARCH_MMP)
default y
select OUTER_CACHE
help
@@ -789,6 +792,25 @@ config ARM_L1_CACHE_SHIFT
default 6 if ARM_L1_CACHE_SHIFT_6
default 5
+config ARM_DMA_MEM_BUFFERABLE
+ bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7
+ default y if CPU_V6 || CPU_V7
+ help
+ Historically, the kernel has used strongly ordered mappings to
+ provide DMA coherent memory. With the advent of ARMv7, mapping
+ memory with differing types results in unpredictable behaviour,
+ so on these CPUs, this option is forced on.
+
+ Multiple mappings with differing attributes is also unpredictable
+ on ARMv6 CPUs, but since they do not have aggressive speculative
+ prefetch, no harm appears to occur.
+
+ However, drivers may be missing the necessary barriers for ARMv6,
+ and therefore turning this on may result in unpredictable driver
+ behaviour. Therefore, we offer this as an option.
+
+ You are recommended say 'Y' here and debug any affected drivers.
+
config ARCH_HAS_BARRIERS
bool
help
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 2e6dc040c654..ec88b157d3bb 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -29,5 +29,26 @@ ENTRY(v7_early_abort)
* V6 code adjusts the returned DFSR.
* New designs should not need to patch up faults.
*/
+
+#if defined(CONFIG_VERIFY_PERMISSION_FAULT)
+ /*
+ * Detect erroneous permission failures and fix
+ */
+ ldr r3, =0x40d @ On permission fault
+ and r3, r1, r3
+ cmp r3, #0x0d
+ movne pc, lr
+
+ mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR
+ isb
+ mrc p15, 0, r2, c7, c4, 0 @ Read the PAR
+ and r3, r2, #0x7b @ On translation fault
+ cmp r3, #0x0b
+ movne pc, lr
+ bic r1, r1, #0xf @ Fix up FSR FS[5:0]
+ and r2, r2, #0x7e
+ orr r1, r1, r2, LSR #1
+#endif
+
mov pc, lr
ENDPROC(v7_early_abort)
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index edddd66faac6..6f98c358989a 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
@@ -94,36 +95,29 @@ static const char *usermode_action[] = {
"signal+warn"
};
-static int
-proc_alignment_read(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int alignment_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- int len;
-
- p += sprintf(p, "User:\t\t%lu\n", ai_user);
- p += sprintf(p, "System:\t\t%lu\n", ai_sys);
- p += sprintf(p, "Skipped:\t%lu\n", ai_skipped);
- p += sprintf(p, "Half:\t\t%lu\n", ai_half);
- p += sprintf(p, "Word:\t\t%lu\n", ai_word);
+ seq_printf(m, "User:\t\t%lu\n", ai_user);
+ seq_printf(m, "System:\t\t%lu\n", ai_sys);
+ seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
+ seq_printf(m, "Half:\t\t%lu\n", ai_half);
+ seq_printf(m, "Word:\t\t%lu\n", ai_word);
if (cpu_architecture() >= CPU_ARCH_ARMv5TE)
- p += sprintf(p, "DWord:\t\t%lu\n", ai_dword);
- p += sprintf(p, "Multi:\t\t%lu\n", ai_multi);
- p += sprintf(p, "User faults:\t%i (%s)\n", ai_usermode,
+ seq_printf(m, "DWord:\t\t%lu\n", ai_dword);
+ seq_printf(m, "Multi:\t\t%lu\n", ai_multi);
+ seq_printf(m, "User faults:\t%i (%s)\n", ai_usermode,
usermode_action[ai_usermode]);
- len = (p - page) - off;
- if (len < 0)
- len = 0;
-
- *eof = (len <= count) ? 1 : 0;
- *start = page + off;
+ return 0;
+}
- return len;
+static int alignment_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, alignment_proc_show, NULL);
}
-static int proc_alignment_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t alignment_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
char mode;
@@ -136,6 +130,13 @@ static int proc_alignment_write(struct file *file, const char __user *buffer,
return count;
}
+static const struct file_operations alignment_proc_fops = {
+ .open = alignment_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = alignment_proc_write,
+};
#endif /* CONFIG_PROC_FS */
union offset_union {
@@ -166,15 +167,15 @@ union offset_union {
THUMB( "1: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
"2:\n" \
- " .section .fixup,\"ax\"\n" \
+ " .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, #1\n" \
" b 2b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
- " .previous\n" \
+ " .popsection\n" \
: "=r" (err), "=&r" (val), "=r" (addr) \
: "0" (err), "2" (addr))
@@ -226,16 +227,16 @@ union offset_union {
" mov %1, %1, "NEXT_BYTE"\n" \
"2: "ins" %1, [%2]\n" \
"3:\n" \
- " .section .fixup,\"ax\"\n" \
+ " .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %0, #1\n" \
" b 3b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 4b\n" \
" .long 2b, 4b\n" \
- " .previous\n" \
+ " .popsection\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
if (err) \
@@ -266,18 +267,18 @@ union offset_union {
" mov %1, %1, "NEXT_BYTE"\n" \
"4: "ins" %1, [%2]\n" \
"5:\n" \
- " .section .fixup,\"ax\"\n" \
+ " .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
"6: mov %0, #1\n" \
" b 5b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 6b\n" \
" .long 2b, 6b\n" \
" .long 3b, 6b\n" \
" .long 4b, 6b\n" \
- " .previous\n" \
+ " .popsection\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
if (err) \
@@ -901,12 +902,10 @@ static int __init alignment_init(void)
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *res;
- res = create_proc_entry("cpu/alignment", S_IWUSR | S_IRUGO, NULL);
+ res = proc_create("cpu/alignment", S_IWUSR | S_IRUGO, NULL,
+ &alignment_proc_fops);
if (!res)
return -ENOMEM;
-
- res->read_proc = proc_alignment_read;
- res->write_proc = proc_alignment_write;
#endif
/*
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 21ad68ba22ba..9819869d2bc9 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -27,6 +27,7 @@
static void __iomem *l2x0_base;
static DEFINE_SPINLOCK(l2x0_lock);
+static uint32_t l2x0_way_mask; /* Bitmask of active ways */
static inline void cache_wait(void __iomem *reg, unsigned long mask)
{
@@ -108,8 +109,8 @@ static inline void l2x0_inv_all(void)
/* invalidate all ways */
spin_lock_irqsave(&l2x0_lock, flags);
- writel(0xff, l2x0_base + L2X0_INV_WAY);
- cache_wait(l2x0_base + L2X0_INV_WAY, 0xff);
+ writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
+ cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
}
@@ -208,9 +209,37 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
{
__u32 aux;
+ __u32 cache_id;
+ int ways;
+ const char *type;
l2x0_base = base;
+ cache_id = readl(l2x0_base + L2X0_CACHE_ID);
+ aux = readl(l2x0_base + L2X0_AUX_CTRL);
+
+ /* Determine the number of ways */
+ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
+ case L2X0_CACHE_ID_PART_L310:
+ if (aux & (1 << 16))
+ ways = 16;
+ else
+ ways = 8;
+ type = "L310";
+ break;
+ case L2X0_CACHE_ID_PART_L210:
+ ways = (aux >> 13) & 0xf;
+ type = "L210";
+ break;
+ default:
+ /* Assume unknown chips have 8 ways */
+ ways = 8;
+ type = "L2x0 series";
+ break;
+ }
+
+ l2x0_way_mask = (1 << ways) - 1;
+
/*
* Check if l2x0 controller is already enabled.
* If you are booting from non-secure mode
@@ -219,8 +248,6 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
if (!(readl(l2x0_base + L2X0_CTRL) & 1)) {
/* l2x0 controller is disabled */
-
- aux = readl(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
writel(aux, l2x0_base + L2X0_AUX_CTRL);
@@ -236,5 +263,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
outer_cache.flush_range = l2x0_flush_range;
outer_cache.sync = l2x0_cache_sync;
- printk(KERN_INFO "L2X0 cache controller enabled\n");
+ printk(KERN_INFO "%s cache controller enabled\n", type);
+ printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
+ ways, cache_id, aux);
}
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 9d89c67a1cc3..e46ecd847138 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -211,6 +211,9 @@ v6_dma_inv_range:
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
#endif
1:
+#ifdef CONFIG_SMP
+ str r0, [r0] @ write for ownership
+#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
#else
@@ -231,6 +234,9 @@ v6_dma_inv_range:
v6_dma_clean_range:
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
+#ifdef CONFIG_SMP
+ ldr r2, [r0] @ read for ownership
+#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c10, 1 @ clean D line
#else
@@ -251,6 +257,10 @@ v6_dma_clean_range:
ENTRY(v6_dma_flush_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
+#ifdef CONFIG_SMP
+ ldr r2, [r0] @ read for ownership
+ str r2, [r0] @ write for ownership
+#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
#else
@@ -273,7 +283,9 @@ ENTRY(v6_dma_map_area)
add r1, r1, r0
teq r2, #DMA_FROM_DEVICE
beq v6_dma_inv_range
- b v6_dma_clean_range
+ teq r2, #DMA_TO_DEVICE
+ beq v6_dma_clean_range
+ b v6_dma_flush_range
ENDPROC(v6_dma_map_area)
/*
@@ -283,9 +295,6 @@ ENDPROC(v6_dma_map_area)
* - dir - DMA direction
*/
ENTRY(v6_dma_unmap_area)
- add r1, r1, r0
- teq r2, #DMA_TO_DEVICE
- bne v6_dma_inv_range
mov pc, lr
ENDPROC(v6_dma_unmap_area)
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index bcd64f265870..06a90dcfc60a 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -167,7 +167,11 @@ ENTRY(v7_coherent_user_range)
cmp r0, r1
blo 1b
mov r0, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable
+#else
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
+#endif
dsb
isb
mov pc, lr
diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c
index b2a6008b0111..d2852e1635b1 100644
--- a/arch/arm/mm/copypage-fa.c
+++ b/arch/arm/mm/copypage-fa.c
@@ -40,7 +40,7 @@ fa_copy_user_page(void *kto, const void *kfrom)
}
void fa_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 0d414c28eb2c..9b906dec1ca1 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -134,8 +134,6 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
flush_dcache_mmap_unlock(mapping);
if (aliases)
do_adjust_pte(vma, addr, pfn, ptep);
- else
- flush_cache_page(vma, addr, pfn);
}
/*
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 9d40c341e07e..92f5801f99c1 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -463,7 +463,12 @@ static struct fsr_info {
{ do_bad, SIGILL, BUS_ADRALN, "alignment exception" },
{ do_bad, SIGKILL, 0, "terminal exception" },
{ do_bad, SIGILL, BUS_ADRALN, "alignment exception" },
+/* Do we need runtime check ? */
+#if __LINUX_ARM_ARCH__ < 6
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
+#else
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "I-cache maintenance fault" },
+#endif
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 83db12a68d56..1ba6cf5a2c02 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -15,7 +15,6 @@
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
-#include <linux/sort.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
@@ -86,9 +85,6 @@ void show_mem(void)
printk("Mem-info:\n");
show_free_areas();
for_each_online_node(node) {
- pg_data_t *n = NODE_DATA(node);
- struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn;
-
for_each_nodebank (i,mi,node) {
struct membank *bank = &mi->bank[i];
unsigned int pfn1, pfn2;
@@ -97,8 +93,8 @@ void show_mem(void)
pfn1 = bank_pfn_start(bank);
pfn2 = bank_pfn_end(bank);
- page = map + pfn1;
- end = map + pfn2;
+ page = pfn_to_page(pfn1);
+ end = pfn_to_page(pfn2 - 1) + 1;
do {
total++;
@@ -227,20 +223,6 @@ static int __init check_initrd(struct meminfo *mi)
return initrd_node;
}
-static inline void map_memory_bank(struct membank *bank)
-{
-#ifdef CONFIG_MMU
- struct map_desc map;
-
- map.pfn = bank_pfn_start(bank);
- map.virtual = __phys_to_virt(bank_phys_start(bank));
- map.length = bank_phys_size(bank);
- map.type = MT_MEMORY;
-
- create_mapping(&map);
-#endif
-}
-
static void __init bootmem_init_node(int node, struct meminfo *mi,
unsigned long start_pfn, unsigned long end_pfn)
{
@@ -250,16 +232,6 @@ static void __init bootmem_init_node(int node, struct meminfo *mi,
int i;
/*
- * Map the memory banks for this node.
- */
- for_each_nodebank(i, mi, node) {
- struct membank *bank = &mi->bank[i];
-
- if (!bank->highmem)
- map_memory_bank(bank);
- }
-
- /*
* Allocate the bootmem bitmap page.
*/
boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
@@ -388,21 +360,12 @@ static void arm_memory_present(struct meminfo *mi, int node)
}
#endif
-static int __init meminfo_cmp(const void *_a, const void *_b)
-{
- const struct membank *a = _a, *b = _b;
- long cmp = bank_pfn_start(a) - bank_pfn_start(b);
- return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
-}
-
void __init bootmem_init(void)
{
struct meminfo *mi = &meminfo;
unsigned long min, max_low, max_high;
int node, initrd_node;
- sort(&mi->bank, mi->nr_banks, sizeof(mi->bank[0]), meminfo_cmp, NULL);
-
/*
* Locate which node contains the ramdisk image, if any.
*/
@@ -603,9 +566,6 @@ void __init mem_init(void)
reserved_pages = free_pages = 0;
for_each_online_node(node) {
- pg_data_t *n = NODE_DATA(node);
- struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn;
-
for_each_nodebank(i, &meminfo, node) {
struct membank *bank = &meminfo.bank[i];
unsigned int pfn1, pfn2;
@@ -614,8 +574,8 @@ void __init mem_init(void)
pfn1 = bank_pfn_start(bank);
pfn2 = bank_pfn_end(bank);
- page = map + pfn1;
- end = map + pfn2;
+ page = pfn_to_page(pfn1);
+ end = pfn_to_page(pfn2 - 1) + 1;
do {
if (PageReserved(page))
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index a888363398f8..815d08eecbb0 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -28,10 +28,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
#endif
-struct map_desc;
-struct meminfo;
struct pglist_data;
-void __init create_mapping(struct map_desc *md);
void __init bootmem_init(void);
void reserve_node_zero(struct pglist_data *pgdat);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 241c24a1c18f..285894171186 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -14,6 +14,7 @@
#include <linux/bootmem.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
+#include <linux/sort.h>
#include <asm/cputype.h>
#include <asm/mach-types.h>
@@ -603,7 +604,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
* offsets, and we take full advantage of sections and
* supersections.
*/
-void __init create_mapping(struct map_desc *md)
+static void __init create_mapping(struct map_desc *md)
{
unsigned long phys, addr, length, end;
const struct mem_type *type;
@@ -869,9 +870,10 @@ void __init reserve_node_zero(pg_data_t *pgdat)
if (machine_is_p720t())
res_size = 0x00014000;
- /* H1940 and RX3715 need to reserve this for suspend */
+ /* H1940, RX3715 and RX1950 need to reserve this for suspend */
- if (machine_is_h1940() || machine_is_rx3715()) {
+ if (machine_is_h1940() || machine_is_rx3715()
+ || machine_is_rx1950()) {
reserve_bootmem_node(pgdat, 0x30003000, 0x1000,
BOOTMEM_DEFAULT);
reserve_bootmem_node(pgdat, 0x30081000, 0x1000,
@@ -1017,6 +1019,39 @@ static void __init kmap_init(void)
#endif
}
+static inline void map_memory_bank(struct membank *bank)
+{
+ struct map_desc map;
+
+ map.pfn = bank_pfn_start(bank);
+ map.virtual = __phys_to_virt(bank_phys_start(bank));
+ map.length = bank_phys_size(bank);
+ map.type = MT_MEMORY;
+
+ create_mapping(&map);
+}
+
+static void __init map_lowmem(void)
+{
+ struct meminfo *mi = &meminfo;
+ int i;
+
+ /* Map all the lowmem memory banks. */
+ for (i = 0; i < mi->nr_banks; i++) {
+ struct membank *bank = &mi->bank[i];
+
+ if (!bank->highmem)
+ map_memory_bank(bank);
+ }
+}
+
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+ const struct membank *a = _a, *b = _b;
+ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
@@ -1025,9 +1060,12 @@ void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
+ sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+
build_mem_type_table();
sanity_check_meminfo();
prepare_page_table();
+ map_lowmem();
bootmem_init();
devicemaps_init(mdesc);
kmap_init();
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 9bfeb6b9509a..33b327379f07 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -65,6 +65,15 @@ void flush_dcache_page(struct page *page)
}
EXPORT_SYMBOL(flush_dcache_page);
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long uaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ memcpy(dst, src, len);
+ if (vma->vm_flags & VM_EXEC)
+ __cpuc_coherent_user_range(uaddr, uaddr + len);
+}
+
void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
size_t size, unsigned int mtype)
{
@@ -87,8 +96,8 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
}
EXPORT_SYMBOL(__arm_ioremap);
-void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
- unsigned int mtype, void *caller)
+void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller)
{
return __arm_ioremap(phys_addr, size, mtype);
}
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index ee7700242c19..5c47760c2064 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -45,7 +45,7 @@ ENTRY(cpu_sa1100_proc_init)
mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland
mov pc, lr
- .previous
+ .section .text
/*
* cpu_sa1100_proc_fin()
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 0cb1848bd876..f3f288a9546d 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -50,7 +50,11 @@ ENTRY(v7wbi_flush_user_tlb_range)
cmp r0, r1
blo 1b
mov ip, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
+#else
mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
+#endif
dsb
mov pc, lr
ENDPROC(v7wbi_flush_user_tlb_range)
@@ -79,7 +83,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
cmp r0, r1
blo 1b
mov r2, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
+#else
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
+#endif
dsb
isb
mov pc, lr