summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/cache-l2x0.c13
-rw-r--r--arch/arm/mm/dma-mapping.c7
-rw-r--r--arch/arm/mm/dump.c54
-rw-r--r--arch/arm/mm/init.c13
-rw-r--r--arch/arm/mm/ioremap.c7
-rw-r--r--arch/arm/mm/mmu.c21
-rw-r--r--arch/arm/mm/nommu.c12
-rw-r--r--arch/arm/mm/pageattr.c1
-rw-r--r--arch/arm/mm/proc-v7.S15
-rw-r--r--arch/arm/mm/proc-v7m.S6
10 files changed, 122 insertions, 27 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 2290be390f87..808efbb89b88 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -57,6 +57,9 @@ static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
struct l2x0_regs l2x0_saved_regs;
+static bool l2x0_bresp_disable;
+static bool l2x0_flz_disable;
+
/*
* Common code for all cache controllers.
*/
@@ -620,7 +623,7 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
u32 aux = l2x0_saved_regs.aux_ctrl;
if (rev >= L310_CACHE_ID_RTL_R2P0) {
- if (cortex_a9) {
+ if (cortex_a9 && !l2x0_bresp_disable) {
aux |= L310_AUX_CTRL_EARLY_BRESP;
pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
} else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
@@ -629,7 +632,7 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
}
}
- if (cortex_a9) {
+ if (cortex_a9 && !l2x0_flz_disable) {
u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
u32 acr = get_auxcr();
@@ -1200,6 +1203,12 @@ static void __init l2c310_of_parse(const struct device_node *np,
*aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
}
+ if (of_property_read_bool(np, "arm,early-bresp-disable"))
+ l2x0_bresp_disable = true;
+
+ if (of_property_read_bool(np, "arm,full-line-zero-disable"))
+ l2x0_flz_disable = true;
+
prefetch = l2x0_saved_regs.prefetch_ctrl;
ret = of_property_read_u32(np, "arm,double-linefill", &val);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 386f41ced733..c742dfd2967b 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2423,6 +2423,13 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
dma_ops = arm_get_dma_map_ops(coherent);
set_dma_ops(dev, dma_ops);
+
+#ifdef CONFIG_XEN
+ if (xen_initial_domain()) {
+ dev->archdata.dev_dma_ops = dev->dma_ops;
+ dev->dma_ops = xen_dma_ops;
+ }
+#endif
}
void arch_teardown_dma_ops(struct device *dev)
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 21192d6eda40..35ff45470dbf 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -17,6 +17,7 @@
#include <linux/mm.h>
#include <linux/seq_file.h>
+#include <asm/domain.h>
#include <asm/fixmap.h>
#include <asm/memory.h>
#include <asm/pgtable.h>
@@ -43,6 +44,7 @@ struct pg_state {
unsigned long start_address;
unsigned level;
u64 current_prot;
+ const char *current_domain;
};
struct prot_bits {
@@ -216,7 +218,8 @@ static void dump_prot(struct pg_state *st, const struct prot_bits *bits, size_t
}
}
-static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u64 val)
+static void note_page(struct pg_state *st, unsigned long addr,
+ unsigned int level, u64 val, const char *domain)
{
static const char units[] = "KMGTPE";
u64 prot = val & pg_level[level].mask;
@@ -224,8 +227,10 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
if (!st->level) {
st->level = level;
st->current_prot = prot;
+ st->current_domain = domain;
seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
} else if (prot != st->current_prot || level != st->level ||
+ domain != st->current_domain ||
addr >= st->marker[1].start_address) {
const char *unit = units;
unsigned long delta;
@@ -240,6 +245,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
unit++;
}
seq_printf(st->seq, "%9lu%c", delta, *unit);
+ if (st->current_domain)
+ seq_printf(st->seq, " %s", st->current_domain);
if (pg_level[st->level].bits)
dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num);
seq_printf(st->seq, "\n");
@@ -251,11 +258,13 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
}
st->start_address = addr;
st->current_prot = prot;
+ st->current_domain = domain;
st->level = level;
}
}
-static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
+static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start,
+ const char *domain)
{
pte_t *pte = pte_offset_kernel(pmd, 0);
unsigned long addr;
@@ -263,25 +272,50 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
addr = start + i * PAGE_SIZE;
- note_page(st, addr, 4, pte_val(*pte));
+ note_page(st, addr, 4, pte_val(*pte), domain);
}
}
+static const char *get_domain_name(pmd_t *pmd)
+{
+#ifndef CONFIG_ARM_LPAE
+ switch (pmd_val(*pmd) & PMD_DOMAIN_MASK) {
+ case PMD_DOMAIN(DOMAIN_KERNEL):
+ return "KERNEL ";
+ case PMD_DOMAIN(DOMAIN_USER):
+ return "USER ";
+ case PMD_DOMAIN(DOMAIN_IO):
+ return "IO ";
+ case PMD_DOMAIN(DOMAIN_VECTORS):
+ return "VECTORS";
+ default:
+ return "unknown";
+ }
+#endif
+ return NULL;
+}
+
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
{
pmd_t *pmd = pmd_offset(pud, 0);
unsigned long addr;
unsigned i;
+ const char *domain;
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
addr = start + i * PMD_SIZE;
+ domain = get_domain_name(pmd);
if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
- note_page(st, addr, 3, pmd_val(*pmd));
+ note_page(st, addr, 3, pmd_val(*pmd), domain);
else
- walk_pte(st, pmd, addr);
+ walk_pte(st, pmd, addr, domain);
- if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1]))
- note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1]));
+ if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) {
+ addr += SECTION_SIZE;
+ pmd++;
+ domain = get_domain_name(pmd);
+ note_page(st, addr, 3, pmd_val(*pmd), domain);
+ }
}
}
@@ -296,7 +330,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
if (!pud_none(*pud)) {
walk_pmd(st, pud, addr);
} else {
- note_page(st, addr, 2, pud_val(*pud));
+ note_page(st, addr, 2, pud_val(*pud), NULL);
}
}
}
@@ -317,11 +351,11 @@ static void walk_pgd(struct seq_file *m)
if (!pgd_none(*pgd)) {
walk_pud(&st, pgd, addr);
} else {
- note_page(&st, addr, 1, pgd_val(*pgd));
+ note_page(&st, addr, 1, pgd_val(*pgd), NULL);
}
}
- note_page(&st, 0, 0, 0);
+ note_page(&st, 0, 0, 0, NULL);
}
static int ptdump_show(struct seq_file *m, void *v)
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 1d8558ff9827..ad80548325fe 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -709,34 +709,37 @@ void set_section_perms(struct section_perm *perms, int n, bool set,
}
+/**
+ * update_sections_early intended to be called only through stop_machine
+ * framework and executed by only one CPU while all other CPUs will spin and
+ * wait, so no locking is required in this function.
+ */
static void update_sections_early(struct section_perm perms[], int n)
{
struct task_struct *t, *s;
- read_lock(&tasklist_lock);
for_each_process(t) {
if (t->flags & PF_KTHREAD)
continue;
for_each_thread(t, s)
set_section_perms(perms, n, true, s->mm);
}
- read_unlock(&tasklist_lock);
set_section_perms(perms, n, true, current->active_mm);
set_section_perms(perms, n, true, &init_mm);
}
-int __fix_kernmem_perms(void *unused)
+static int __fix_kernmem_perms(void *unused)
{
update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
return 0;
}
-void fix_kernmem_perms(void)
+static void fix_kernmem_perms(void)
{
stop_machine(__fix_kernmem_perms, NULL, NULL);
}
-int __mark_rodata_ro(void *unused)
+static int __mark_rodata_ro(void *unused)
{
update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
return 0;
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index ff0eed23ddf1..fc91205ff46c 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -481,6 +481,13 @@ int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
__pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
}
EXPORT_SYMBOL_GPL(pci_ioremap_io);
+
+void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
+{
+ return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
#endif
/*
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4e016d7f37b3..31af3cb59a60 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -87,6 +87,8 @@ struct cachepolicy {
#define s2_policy(policy) 0
#endif
+unsigned long kimage_voffset __ro_after_init;
+
static struct cachepolicy cache_policies[] __initdata = {
{
.policy = "uncached",
@@ -414,6 +416,11 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
FIXADDR_END);
BUG_ON(idx >= __end_of_fixed_addresses);
+ /* we only support device mappings until pgprot_kernel has been set */
+ if (WARN_ON(pgprot_val(prot) != pgprot_val(FIXMAP_PAGE_IO) &&
+ pgprot_val(pgprot_kernel) == 0))
+ return;
+
if (pgprot_val(prot))
set_pte_at(NULL, vaddr, pte,
pfn_pte(phys >> PAGE_SHIFT, prot));
@@ -1492,7 +1499,7 @@ pgtables_remap lpae_pgtables_remap_asm;
* early_paging_init() recreates boot time page table setup, allowing machines
* to switch over to a high (>4G) address space on LPAE systems
*/
-void __init early_paging_init(const struct machine_desc *mdesc)
+static void __init early_paging_init(const struct machine_desc *mdesc)
{
pgtables_remap *lpae_pgtables_remap;
unsigned long pa_pgd;
@@ -1560,7 +1567,7 @@ void __init early_paging_init(const struct machine_desc *mdesc)
#else
-void __init early_paging_init(const struct machine_desc *mdesc)
+static void __init early_paging_init(const struct machine_desc *mdesc)
{
long long offset;
@@ -1616,7 +1623,6 @@ void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;
- build_mem_type_table();
prepare_page_table();
map_lowmem();
memblock_set_current_limit(arm_lowmem_limit);
@@ -1635,4 +1641,13 @@ void __init paging_init(const struct machine_desc *mdesc)
empty_zero_page = virt_to_page(zero_page);
__flush_dcache_page(NULL, empty_zero_page);
+
+ /* Compute the virt/idmap offset, mostly for the sake of KVM */
+ kimage_voffset = (unsigned long)&kimage_voffset - virt_to_idmap(&kimage_voffset);
+}
+
+void __init early_mm_init(const struct machine_desc *mdesc)
+{
+ build_mem_type_table();
+ early_paging_init(mdesc);
}
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 33a45bd96860..3b8e728cc944 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -436,6 +436,18 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
}
EXPORT_SYMBOL(ioremap_wc);
+#ifdef CONFIG_PCI
+
+#include <asm/mach/map.h>
+
+void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
+{
+ return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
+#endif
+
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
{
return (void *)phys_addr;
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index 3b69f2642513..1403cb4a0c3d 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -15,6 +15,7 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
+#include <asm/set_memory.h>
struct page_change_data {
pgprot_t set_mask;
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index d00d52c9de3e..01d64c0b2563 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -39,13 +39,14 @@ ENTRY(cpu_v7_proc_fin)
ENDPROC(cpu_v7_proc_fin)
/*
- * cpu_v7_reset(loc)
+ * cpu_v7_reset(loc, hyp)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* - loc - location to jump to for soft reset
+ * - hyp - indicate if restart occurs in HYP mode
*
* This code must be executed using a flat identity mapping with
* caches disabled.
@@ -53,11 +54,15 @@ ENDPROC(cpu_v7_proc_fin)
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_v7_reset)
- mrc p15, 0, r1, c1, c0, 0 @ ctrl register
- bic r1, r1, #0x1 @ ...............m
- THUMB( bic r1, r1, #1 << 30 ) @ SCTLR.TE (Thumb exceptions)
- mcr p15, 0, r1, c1, c0, 0 @ disable MMU
+ mrc p15, 0, r2, c1, c0, 0 @ ctrl register
+ bic r2, r2, #0x1 @ ...............m
+ THUMB( bic r2, r2, #1 << 30 ) @ SCTLR.TE (Thumb exceptions)
+ mcr p15, 0, r2, c1, c0, 0 @ disable MMU
isb
+#ifdef CONFIG_ARM_VIRT_EXT
+ teq r1, #0
+ bne __hyp_soft_restart
+#endif
bx r0
ENDPROC(cpu_v7_reset)
.popsection
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 8dea61640cc1..47a5acc64433 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -135,9 +135,11 @@ __v7m_setup_cont:
dsb
mov r6, lr @ save LR
ldr sp, =init_thread_union + THREAD_START_SP
+ stmia sp, {r0-r3, r12}
cpsie i
svc #0
1: cpsid i
+ ldmia sp, {r0-r3, r12}
str r5, [r12, #11 * 4] @ restore the original SVC vector entry
mov lr, r6 @ restore LR
@@ -147,10 +149,10 @@ __v7m_setup_cont:
@ Configure caches (if implemented)
teq r8, #0
- stmneia r12, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6
+ stmneia sp, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6
blne v7m_invalidate_l1
teq r8, #0 @ re-evalutae condition
- ldmneia r12, {r0-r6, lr}
+ ldmneia sp, {r0-r6, lr}
@ Configure the System Control Register to ensure 8-byte stack alignment
@ Note the STKALIGN bit is either RW or RAO.