summaryrefslogtreecommitdiffstats
path: root/arch/mips/include
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@imgtec.com>2016-04-19 10:25:02 +0200
committerRalf Baechle <ralf@linux-mips.org>2016-05-13 15:30:25 +0200
commit694977006a7ba71e33e1f5df4f66ffd5ae19ec84 (patch)
treed0c5cda178229fb93fa29268aa9f2267d57d48b5 /arch/mips/include
parentMIPS: Remove redundant asm/pgtable-bits.h inclusions (diff)
downloadlinux-694977006a7ba71e33e1f5df4f66ffd5ae19ec84.tar.xz
linux-694977006a7ba71e33e1f5df4f66ffd5ae19ec84.zip
MIPS: Use enums to make asm/pgtable-bits.h readable
asm/pgtable-bits.h has grown to become an unreadable mess of #ifdef directives defining bits conditionally upon other bits all at the preprocessing stage, for no good reason. Instead of having quite so many #ifdef's, simply use enums to provide sequential numbering for bit shifts, without having to keep track manually of what the last bit defined was. Masks are defined separately, after the shifts, which allows for most of their definitions to be reused for all systems rather than duplicated. This patch is not intended to make any behavioural change to the code - all bits should be used in the same way they were before this patch. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Reviewed-by: James Hogan <james.hogan@imgtec.com> Cc: Maciej W. Rozycki <macro@linux-mips.org> Cc: Alex Smith <alex.smith@imgtec.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/13115/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/include')
-rw-r--r--arch/mips/include/asm/pgtable-bits.h189
1 files changed, 81 insertions, 108 deletions
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index 2f4031209d54..c81fc1703c8a 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -35,36 +35,25 @@
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
/*
- * The following bits are implemented by the TLB hardware
+ * Page table bit offsets used for 64 bit physical addressing on MIPS32,
+ * for example with Alchemy, Netlogic XLP/XLR or XPA.
*/
-#define _PAGE_NO_EXEC_SHIFT 0
-#define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT)
-#define _PAGE_NO_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1)
-#define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT)
-#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
-#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
-#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
-#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
-#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
-#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
-#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1)
-#define _CACHE_MASK (7 << _CACHE_SHIFT)
-
-/*
- * The following bits are implemented in software
- */
-#define _PAGE_PRESENT_SHIFT (24)
-#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
-#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
-#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
-#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
-#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
-#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
-#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
-#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
-
-#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
+enum pgtable_bits {
+ /* Used by TLB hardware (placed in EntryLo*) */
+ _PAGE_NO_EXEC_SHIFT,
+ _PAGE_NO_READ_SHIFT,
+ _PAGE_GLOBAL_SHIFT,
+ _PAGE_VALID_SHIFT,
+ _PAGE_DIRTY_SHIFT,
+ _CACHE_SHIFT,
+
+ /* Used only by software (masked out before writing EntryLo*) */
+ _PAGE_PRESENT_SHIFT = 24,
+ _PAGE_READ_SHIFT,
+ _PAGE_WRITE_SHIFT,
+ _PAGE_ACCESSED_SHIFT,
+ _PAGE_MODIFIED_SHIFT,
+};
/*
* Bits for extended EntryLo0/EntryLo1 registers
@@ -73,101 +62,85 @@
#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
-/*
- * The following bits are implemented in software
- */
-#define _PAGE_PRESENT_SHIFT (0)
-#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
-#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
-#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
-#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
-#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
-#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
-#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
-#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
+/* Page table bits used for r3k systems */
+enum pgtable_bits {
+ /* Used only by software (writes to EntryLo ignored) */
+ _PAGE_PRESENT_SHIFT,
+ _PAGE_READ_SHIFT,
+ _PAGE_WRITE_SHIFT,
+ _PAGE_ACCESSED_SHIFT,
+ _PAGE_MODIFIED_SHIFT,
+
+ /* Used by TLB hardware (placed in EntryLo) */
+ _PAGE_GLOBAL_SHIFT = 8,
+ _PAGE_VALID_SHIFT,
+ _PAGE_DIRTY_SHIFT,
+ _CACHE_UNCACHED_SHIFT,
+};
-/*
- * The following bits are implemented by the TLB hardware
- */
-#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 4)
-#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
-#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
-#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
-#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
-#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
-#define _CACHE_UNCACHED_SHIFT (_PAGE_DIRTY_SHIFT + 1)
-#define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT)
-#define _CACHE_MASK _CACHE_UNCACHED
+#else
-#define _PFN_SHIFT PAGE_SHIFT
+/* Page table bits used for r4k systems */
+enum pgtable_bits {
+ /* Used only by software (masked out before writing EntryLo*) */
+ _PAGE_PRESENT_SHIFT,
+#if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)
+ _PAGE_READ_SHIFT,
+#endif
+ _PAGE_WRITE_SHIFT,
+ _PAGE_ACCESSED_SHIFT,
+ _PAGE_MODIFIED_SHIFT,
+#if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
+ _PAGE_HUGE_SHIFT,
+#endif
-#else
-/*
- * Below are the "Normal" R4K cases
- */
+ /* Used by TLB hardware (placed in EntryLo*) */
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ _PAGE_NO_EXEC_SHIFT,
+ _PAGE_NO_READ_SHIFT,
+ _PAGE_READ_SHIFT = _PAGE_NO_READ_SHIFT,
+#endif
+ _PAGE_GLOBAL_SHIFT,
+ _PAGE_VALID_SHIFT,
+ _PAGE_DIRTY_SHIFT,
+ _CACHE_SHIFT,
+};
-/*
- * The following bits are implemented in software
- */
-#define _PAGE_PRESENT_SHIFT 0
+#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
+
+/* Used only by software */
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
-/* R2 or later cores check for RI/XI support to determine _PAGE_READ */
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
-#define _PAGE_WRITE_SHIFT (_PAGE_PRESENT_SHIFT + 1)
-#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
+# define _PAGE_READ (cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT))
#else
-#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
-#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
-#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
-#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
+# define _PAGE_READ (1 << _PAGE_READ_SHIFT)
#endif
-#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
+#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
-
#if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
-/* Huge TLB page */
-#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
-#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
-#endif /* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */
-
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
-/* XI - page cannot be executed */
-#ifdef _PAGE_HUGE_SHIFT
-#define _PAGE_NO_EXEC_SHIFT (_PAGE_HUGE_SHIFT + 1)
-#else
-#define _PAGE_NO_EXEC_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
+# define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
#endif
-#define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
-
-/* RI - page cannot be read */
-#define _PAGE_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1)
-#define _PAGE_READ (cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT))
-#define _PAGE_NO_READ_SHIFT _PAGE_READ_SHIFT
-#define _PAGE_NO_READ (cpu_has_rixi ? (1 << _PAGE_READ_SHIFT) : 0)
-#endif /* defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) */
-
-#if defined(_PAGE_NO_READ_SHIFT)
-#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
-#elif defined(_PAGE_HUGE_SHIFT)
-#define _PAGE_GLOBAL_SHIFT (_PAGE_HUGE_SHIFT + 1)
-#else
-#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
+
+/* Used by TLB hardware (placed in EntryLo*) */
+#if (defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32))
+# define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT)
+# define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+# define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
+# define _PAGE_NO_READ (cpu_has_rixi ? (1 << _PAGE_NO_READ_SHIFT) : 0)
#endif
#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
-
-#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
-#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
-#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1)
-#define _CACHE_MASK (7 << _CACHE_SHIFT)
-
-#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
-
-#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+# define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT)
+# define _CACHE_MASK _CACHE_UNCACHED
+# define _PFN_SHIFT PAGE_SHIFT
+#else
+# define _CACHE_MASK (7 << _CACHE_SHIFT)
+# define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
+#endif
#ifndef _PAGE_NO_EXEC
#define _PAGE_NO_EXEC 0