summaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/init.c17
-rw-r--r--arch/mips/mm/pgtable-64.c20
-rw-r--r--arch/mips/mm/tlb-r4k.c1
-rw-r--r--arch/mips/mm/tlbex.c79
4 files changed, 65 insertions, 52 deletions
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 1a85ba92eb5c..be9acb2b959d 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -469,19 +469,20 @@ void __init_refok free_initmem(void)
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
unsigned long pgd_current[NR_CPUS];
#endif
-/*
- * On 64-bit we've got three-level pagetables with a slightly
- * different layout ...
- */
-#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
/*
* gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
* are constants. So we use the variants from asm-offset.h until that gcc
* will officially be retired.
+ *
+ * Align swapper_pg_dir in to 64K, allows its address to be loaded
+ * with a single LUI instruction in the TLB handlers. If we used
+ * __aligned(64K), its size would get rounded up to the alignment
+ * size, and waste space. So we place it in its own section and align
+ * it in the linker script.
*/
-pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
+pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
#ifndef __PAGETABLE_PMD_FOLDED
-pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
+pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
#endif
-pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
+pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index cda4e300eb0a..25407794edb4 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -26,17 +26,17 @@ void pgd_init(unsigned long page)
p = (unsigned long *) page;
end = p + PTRS_PER_PGD;
- while (p < end) {
+ do {
p[0] = entry;
p[1] = entry;
p[2] = entry;
p[3] = entry;
p[4] = entry;
- p[5] = entry;
- p[6] = entry;
- p[7] = entry;
p += 8;
- }
+ p[-3] = entry;
+ p[-2] = entry;
+ p[-1] = entry;
+ } while (p != end);
}
#ifndef __PAGETABLE_PMD_FOLDED
@@ -47,17 +47,17 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
p = (unsigned long *) addr;
end = p + PTRS_PER_PMD;
- while (p < end) {
+ do {
p[0] = pagetable;
p[1] = pagetable;
p[2] = pagetable;
p[3] = pagetable;
p[4] = pagetable;
- p[5] = pagetable;
- p[6] = pagetable;
- p[7] = pagetable;
p += 8;
- }
+ p[-3] = pagetable;
+ p[-2] = pagetable;
+ p[-1] = pagetable;
+ } while (p != end);
}
#endif
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 87b9cfcc30ff..4b9b935a070e 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -320,6 +320,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
tlb_write_random();
else
tlb_write_indexed();
+ tlbw_use_hazard();
write_c0_pagemask(PM_DEFAULT_MASK);
} else
#endif
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index e09d49256908..2833dcb67b5a 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -148,8 +148,8 @@ enum label_id {
label_leave,
label_vmalloc,
label_vmalloc_done,
- label_tlbw_hazard,
- label_split,
+ label_tlbw_hazard_0,
+ label_split = label_tlbw_hazard_0 + 8,
label_tlbl_goaround1,
label_tlbl_goaround2,
label_nopage_tlbl,
@@ -167,7 +167,7 @@ UASM_L_LA(_second_part)
UASM_L_LA(_leave)
UASM_L_LA(_vmalloc)
UASM_L_LA(_vmalloc_done)
-UASM_L_LA(_tlbw_hazard)
+/* _tlbw_hazard_x is handled differently. */
UASM_L_LA(_split)
UASM_L_LA(_tlbl_goaround1)
UASM_L_LA(_tlbl_goaround2)
@@ -181,6 +181,30 @@ UASM_L_LA(_large_segbits_fault)
UASM_L_LA(_tlb_huge_update)
#endif
+static int __cpuinitdata hazard_instance;
+
+static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
+{
+ switch (instance) {
+ case 0 ... 7:
+ uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
+ return;
+ default:
+ BUG();
+ }
+}
+
+static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
+{
+ switch (instance) {
+ case 0 ... 7:
+ uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
+ break;
+ default:
+ BUG();
+ }
+}
+
/*
* For debug purposes.
*/
@@ -478,21 +502,28 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
* This branch uses up a mtc0 hazard nop slot and saves
* two nops after the tlbw instruction.
*/
- uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
+ uasm_bgezl_hazard(p, r, hazard_instance);
tlbw(p);
- uasm_l_tlbw_hazard(l, *p);
+ uasm_bgezl_label(l, p, hazard_instance);
+ hazard_instance++;
uasm_i_nop(p);
break;
case CPU_R4600:
case CPU_R4700:
- case CPU_R5000:
- case CPU_R5000A:
uasm_i_nop(p);
tlbw(p);
uasm_i_nop(p);
break;
+ case CPU_R5000:
+ case CPU_R5000A:
+ case CPU_NEVADA:
+ uasm_i_nop(p); /* QED specifies 2 nops hazard */
+ uasm_i_nop(p); /* QED specifies 2 nops hazard */
+ tlbw(p);
+ break;
+
case CPU_R4300:
case CPU_5KC:
case CPU_TX49XX:
@@ -526,17 +557,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
tlbw(p);
break;
- case CPU_NEVADA:
- uasm_i_nop(p); /* QED specifies 2 nops hazard */
- /*
- * This branch uses up a mtc0 hazard nop slot and saves
- * a nop after the tlbw instruction.
- */
- uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
- tlbw(p);
- uasm_l_tlbw_hazard(l, *p);
- break;
-
case CPU_RM7000:
uasm_i_nop(p);
uasm_i_nop(p);
@@ -599,8 +619,7 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
unsigned int reg)
{
if (cpu_has_rixi) {
- UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
- UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+ UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
} else {
#ifdef CONFIG_64BIT_PHYS_ADDR
uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
@@ -1019,11 +1038,9 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
if (cpu_has_rixi) {
- UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
- UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
- UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
- UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+ UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
} else {
uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
@@ -1046,13 +1063,11 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
if (r45k_bvahwbug())
build_tlb_probe_entry(p);
if (cpu_has_rixi) {
- UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
- UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
- UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
if (r4k_250MHZhwbug())
UASM_i_MTC0(p, 0, C0_ENTRYLO0);
UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
- UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+ UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
} else {
UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
if (r4k_250MHZhwbug())
@@ -1212,13 +1227,9 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
}
if (cpu_has_rixi) {
- uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_NO_EXEC));
- uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_NO_EXEC));
- uasm_i_drotr(p, even, even,
- ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+ uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
- uasm_i_drotr(p, odd, odd,
- ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+ uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL));
} else {
uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */