summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c99
1 files changed, 72 insertions, 27 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 761ffede6a23..285894171186 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -14,6 +14,7 @@
#include <linux/bootmem.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
+#include <linux/sort.h>
#include <asm/cputype.h>
#include <asm/mach-types.h>
@@ -100,18 +101,17 @@ static struct cachepolicy cache_policies[] __initdata = {
* writebuffer to be turned off. (Note: the write
* buffer should not be on and the cache off).
*/
-static void __init early_cachepolicy(char **p)
+static int __init early_cachepolicy(char *p)
{
int i;
for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
int len = strlen(cache_policies[i].policy);
- if (memcmp(*p, cache_policies[i].policy, len) == 0) {
+ if (memcmp(p, cache_policies[i].policy, len) == 0) {
cachepolicy = i;
cr_alignment &= ~cache_policies[i].cr_mask;
cr_no_alignment &= ~cache_policies[i].cr_mask;
- *p += len;
break;
}
}
@@ -130,36 +130,37 @@ static void __init early_cachepolicy(char **p)
}
flush_cache_all();
set_cr(cr_alignment);
+ return 0;
}
-__early_param("cachepolicy=", early_cachepolicy);
+early_param("cachepolicy", early_cachepolicy);
-static void __init early_nocache(char **__unused)
+static int __init early_nocache(char *__unused)
{
char *p = "buffered";
printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
- early_cachepolicy(&p);
+ early_cachepolicy(p);
+ return 0;
}
-__early_param("nocache", early_nocache);
+early_param("nocache", early_nocache);
-static void __init early_nowrite(char **__unused)
+static int __init early_nowrite(char *__unused)
{
char *p = "uncached";
printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
- early_cachepolicy(&p);
+ early_cachepolicy(p);
+ return 0;
}
-__early_param("nowb", early_nowrite);
+early_param("nowb", early_nowrite);
-static void __init early_ecc(char **p)
+static int __init early_ecc(char *p)
{
- if (memcmp(*p, "on", 2) == 0) {
+ if (memcmp(p, "on", 2) == 0)
ecc_mask = PMD_PROTECTION;
- *p += 2;
- } else if (memcmp(*p, "off", 3) == 0) {
+ else if (memcmp(p, "off", 3) == 0)
ecc_mask = 0;
- *p += 3;
- }
+ return 0;
}
-__early_param("ecc=", early_ecc);
+early_param("ecc", early_ecc);
static int __init noalign_setup(char *__unused)
{
@@ -420,6 +421,10 @@ static void __init build_mem_type_table(void)
user_pgprot |= L_PTE_SHARED;
kern_pgprot |= L_PTE_SHARED;
vecs_pgprot |= L_PTE_SHARED;
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
#endif
@@ -599,7 +604,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
* offsets, and we take full advantage of sections and
* supersections.
*/
-void __init create_mapping(struct map_desc *md)
+static void __init create_mapping(struct map_desc *md)
{
unsigned long phys, addr, length, end;
const struct mem_type *type;
@@ -670,9 +675,9 @@ static unsigned long __initdata vmalloc_reserve = SZ_128M;
* bytes. This can be used to increase (or decrease) the vmalloc
* area - the default is 128m.
*/
-static void __init early_vmalloc(char **arg)
+static int __init early_vmalloc(char *arg)
{
- vmalloc_reserve = memparse(*arg, arg);
+ vmalloc_reserve = memparse(arg, NULL);
if (vmalloc_reserve < SZ_16M) {
vmalloc_reserve = SZ_16M;
@@ -687,8 +692,9 @@ static void __init early_vmalloc(char **arg)
"vmalloc area is too big, limiting to %luMB\n",
vmalloc_reserve >> 20);
}
+ return 0;
}
-__early_param("vmalloc=", early_vmalloc);
+early_param("vmalloc", early_vmalloc);
#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
@@ -864,9 +870,10 @@ void __init reserve_node_zero(pg_data_t *pgdat)
if (machine_is_p720t())
res_size = 0x00014000;
- /* H1940 and RX3715 need to reserve this for suspend */
+ /* H1940, RX3715 and RX1950 need to reserve this for suspend */
- if (machine_is_h1940() || machine_is_rx3715()) {
+ if (machine_is_h1940() || machine_is_rx3715()
+ || machine_is_rx1950()) {
reserve_bootmem_node(pgdat, 0x30003000, 0x1000,
BOOTMEM_DEFAULT);
reserve_bootmem_node(pgdat, 0x30081000, 0x1000,
@@ -1012,6 +1019,39 @@ static void __init kmap_init(void)
#endif
}
+static inline void map_memory_bank(struct membank *bank)
+{
+ struct map_desc map;
+
+ map.pfn = bank_pfn_start(bank);
+ map.virtual = __phys_to_virt(bank_phys_start(bank));
+ map.length = bank_phys_size(bank);
+ map.type = MT_MEMORY;
+
+ create_mapping(&map);
+}
+
+static void __init map_lowmem(void)
+{
+ struct meminfo *mi = &meminfo;
+ int i;
+
+ /* Map all the lowmem memory banks. */
+ for (i = 0; i < mi->nr_banks; i++) {
+ struct membank *bank = &mi->bank[i];
+
+ if (!bank->highmem)
+ map_memory_bank(bank);
+ }
+}
+
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+ const struct membank *a = _a, *b = _b;
+ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
@@ -1020,9 +1060,12 @@ void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
+ sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+
build_mem_type_table();
sanity_check_meminfo();
prepare_page_table();
+ map_lowmem();
bootmem_init();
devicemaps_init(mdesc);
kmap_init();
@@ -1049,10 +1092,12 @@ void setup_mm_for_reboot(char mode)
pgd_t *pgd;
int i;
- if (current->mm && current->mm->pgd)
- pgd = current->mm->pgd;
- else
- pgd = init_mm.pgd;
+ /*
+ * We need to access to user-mode page tables here. For kernel threads
+ * we don't have any user-mode mappings so we use the context that we
+ * "borrowed".
+ */
+ pgd = current->active_mm->pgd;
base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())