summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig4
-rw-r--r--arch/arm/mm/abort-ev4.S1
-rw-r--r--arch/arm/mm/abort-ev5t.S4
-rw-r--r--arch/arm/mm/abort-ev5tj.S4
-rw-r--r--arch/arm/mm/abort-ev6.S8
-rw-r--r--arch/arm/mm/abort-ev7.S1
-rw-r--r--arch/arm/mm/abort-lv4t.S2
-rw-r--r--arch/arm/mm/abort-macro.S14
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c6
-rw-r--r--arch/arm/mm/cache-l2x0.c5
-rw-r--r--arch/arm/mm/dma-mapping.c22
-rw-r--r--arch/arm/mm/dma.h32
-rw-r--r--arch/arm/mm/flush.c15
-rw-r--r--arch/arm/mm/highmem.c6
-rw-r--r--arch/arm/mm/mmu.c92
-rw-r--r--arch/arm/mm/pgd.c10
16 files changed, 187 insertions, 39 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 7c6b976ab8d3..df7537f12469 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -883,6 +883,7 @@ config OUTER_CACHE
config OUTER_CACHE_SYNC
bool
+ select ARM_HEAVY_MB
help
The outer cache has a outer_cache_fns.sync function pointer
that can be used to drain the write buffer of the outer cache.
@@ -1031,6 +1032,9 @@ config ARCH_HAS_BARRIERS
This option allows the use of custom mandatory barriers
included via the mach/barriers.h file.
+config ARM_HEAVY_MB
+ bool
+
config ARCH_SUPPORTS_BIG_ENDIAN
bool
help
diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S
index 54473cd4aba9..b3b31e30cadd 100644
--- a/arch/arm/mm/abort-ev4.S
+++ b/arch/arm/mm/abort-ev4.S
@@ -19,6 +19,7 @@ ENTRY(v4_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
ldr r3, [r4] @ read aborted ARM instruction
+ uaccess_disable ip @ disable userspace access
bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
tst r3, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 11 @ yes.
diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S
index a0908d4653a3..a6a381a6caa5 100644
--- a/arch/arm/mm/abort-ev5t.S
+++ b/arch/arm/mm/abort-ev5t.S
@@ -21,8 +21,10 @@ ENTRY(v5t_early_abort)
mrc p15, 0, r0, c6, c0, 0 @ get FAR
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
ldreq r3, [r4] @ read aborted ARM instruction
+ uaccess_disable ip @ disable user access
bic r1, r1, #1 << 11 @ clear bits 11 of FSR
- do_ldrd_abort tmp=ip, insn=r3
+ teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
+ beq do_DataAbort @ yes
tst r3, #1 << 20 @ check write
orreq r1, r1, #1 << 11
b do_DataAbort
diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S
index 4006b7a61264..00ab011bef58 100644
--- a/arch/arm/mm/abort-ev5tj.S
+++ b/arch/arm/mm/abort-ev5tj.S
@@ -24,7 +24,9 @@ ENTRY(v5tj_early_abort)
bne do_DataAbort
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
ldreq r3, [r4] @ read aborted ARM instruction
- do_ldrd_abort tmp=ip, insn=r3
+ uaccess_disable ip @ disable userspace access
+ teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
+ beq do_DataAbort @ yes
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
b do_DataAbort
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8c48c5c22a33..8801a15aa105 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -26,16 +26,18 @@ ENTRY(v6_early_abort)
ldr ip, =0x4107b36
mrc p15, 0, r3, c0, c0, 0 @ get processor id
teq ip, r3, lsr #4 @ r0 ARM1136?
- bne do_DataAbort
+ bne 1f
tst r5, #PSR_J_BIT @ Java?
tsteq r5, #PSR_T_BIT @ Thumb?
- bne do_DataAbort
+ bne 1f
bic r1, r1, #1 << 11 @ clear bit 11 of FSR
ldr r3, [r4] @ read aborted ARM instruction
ARM_BE8(rev r3, r3)
- do_ldrd_abort tmp=ip, insn=r3
+ teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
+ beq 1f @ yes
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
#endif
+1: uaccess_disable ip @ disable userspace access
b do_DataAbort
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 4812ad054214..e8d0e08c227f 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -15,6 +15,7 @@
ENTRY(v7_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
+ uaccess_disable ip @ disable userspace access
/*
* V6 code adjusts the returned DFSR.
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index f3982580c273..6d8e8e3365d1 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -26,6 +26,7 @@ ENTRY(v4t_late_abort)
#endif
bne .data_thumb_abort
ldr r8, [r4] @ read arm instruction
+ uaccess_disable ip @ disable userspace access
tst r8, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 11 @ yes.
and r7, r8, #15 << 24
@@ -155,6 +156,7 @@ ENTRY(v4t_late_abort)
.data_thumb_abort:
ldrh r8, [r4] @ read instruction
+ uaccess_disable ip @ disable userspace access
tst r8, #1 << 11 @ L = 1 -> write?
orreq r1, r1, #1 << 8 @ yes
and r7, r8, #15 << 12
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S
index 2cbf68ef0e83..4509bee4e081 100644
--- a/arch/arm/mm/abort-macro.S
+++ b/arch/arm/mm/abort-macro.S
@@ -13,6 +13,7 @@
tst \psr, #PSR_T_BIT
beq not_thumb
ldrh \tmp, [\pc] @ Read aborted Thumb instruction
+ uaccess_disable ip @ disable userspace access
and \tmp, \tmp, # 0xfe00 @ Mask opcode field
cmp \tmp, # 0x5600 @ Is it ldrsb?
orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
@@ -29,12 +30,9 @@ not_thumb:
* [7:4] == 1101
* [20] == 0
*/
- .macro do_ldrd_abort, tmp, insn
- tst \insn, #0x0e100000 @ [27:25,20] == 0
- bne not_ldrd
- and \tmp, \insn, #0x000000f0 @ [7:4] == 1101
- cmp \tmp, #0x000000d0
- beq do_DataAbort
-not_ldrd:
+ .macro teq_ldrd, tmp, insn
+ mov \tmp, #0x0e100000
+ orr \tmp, #0x000000f0
+ and \tmp, \insn, \tmp
+ teq \tmp, #0x000000d0
.endm
-
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 097181e08c25..5c1b7a7b9af6 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -368,7 +368,6 @@ int __init feroceon_of_init(void)
struct device_node *node;
void __iomem *base;
bool l2_wt_override = false;
- struct resource res;
#if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
l2_wt_override = true;
@@ -376,10 +375,7 @@ int __init feroceon_of_init(void)
node = of_find_matching_node(NULL, feroceon_ids);
if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
- if (of_address_to_resource(node, 0, &res))
- return -ENODEV;
-
- base = ioremap(res.start, resource_size(&res));
+ base = of_iomap(node, 0);
if (!base)
return -ENOMEM;
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 71b3d3309024..493692d838c6 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -1171,6 +1171,11 @@ static void __init l2c310_of_parse(const struct device_node *np,
}
}
+ if (of_property_read_bool(np, "arm,shared-override")) {
+ *aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
+ *aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
+ }
+
prefetch = l2x0_saved_regs.prefetch_ctrl;
ret = of_property_read_u32(np, "arm,double-linefill", &val);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 3d3d6aa60c87..bf35abcc7d59 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -39,6 +39,7 @@
#include <asm/system_info.h>
#include <asm/dma-contiguous.h>
+#include "dma.h"
#include "mm.h"
/*
@@ -648,14 +649,18 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
size = PAGE_ALIGN(size);
want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
- if (is_coherent || nommu())
+ if (nommu())
+ addr = __alloc_simple_buffer(dev, size, gfp, &page);
+ else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT))
+ addr = __alloc_from_contiguous(dev, size, prot, &page,
+ caller, want_vaddr);
+ else if (is_coherent)
addr = __alloc_simple_buffer(dev, size, gfp, &page);
else if (!(gfp & __GFP_WAIT))
addr = __alloc_from_pool(size, &page);
- else if (!dev_get_cma_area(dev))
- addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
else
- addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
+ addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
+ caller, want_vaddr);
if (page)
*handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -683,13 +688,12 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
- pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
return memory;
- return __dma_alloc(dev, size, handle, gfp, prot, true,
+ return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
attrs, __builtin_return_address(0));
}
@@ -753,12 +757,12 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
size = PAGE_ALIGN(size);
- if (is_coherent || nommu()) {
+ if (nommu()) {
__dma_free_buffer(page, size);
- } else if (__free_from_pool(cpu_addr, size)) {
+ } else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
return;
} else if (!dev_get_cma_area(dev)) {
- if (want_vaddr)
+ if (want_vaddr && !is_coherent)
__dma_free_remap(cpu_addr, size);
__dma_free_buffer(page, size);
} else {
diff --git a/arch/arm/mm/dma.h b/arch/arm/mm/dma.h
new file mode 100644
index 000000000000..70ea6852f94e
--- /dev/null
+++ b/arch/arm/mm/dma.h
@@ -0,0 +1,32 @@
+#ifndef DMA_H
+#define DMA_H
+
+#include <asm/glue-cache.h>
+
+#ifndef MULTI_CACHE
+#define dmac_map_area __glue(_CACHE,_dma_map_area)
+#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
+
+/*
+ * These are private to the dma-mapping API. Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+extern void dmac_map_area(const void *, size_t, int);
+extern void dmac_unmap_area(const void *, size_t, int);
+
+#else
+
+/*
+ * These are private to the dma-mapping API. Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+#define dmac_map_area cpu_cache.dma_map_area
+#define dmac_unmap_area cpu_cache.dma_unmap_area
+
+#endif
+
+#endif
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 34b66af516ea..1ec8e7590fc6 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -21,6 +21,21 @@
#include "mm.h"
+#ifdef CONFIG_ARM_HEAVY_MB
+void (*soc_mb)(void);
+
+void arm_heavy_mb(void)
+{
+#ifdef CONFIG_OUTER_CACHE_SYNC
+ if (outer_cache.sync)
+ outer_cache.sync();
+#endif
+ if (soc_mb)
+ soc_mb();
+}
+EXPORT_SYMBOL(arm_heavy_mb);
+#endif
+
#ifdef CONFIG_CPU_CACHE_VIPT
static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index ee8dfa793989..9df5f09585ca 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,7 @@ void *kmap_atomic(struct page *page)
type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
+ idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM
/*
@@ -106,7 +106,7 @@ void __kunmap_atomic(void *kvaddr)
if (kvaddr >= (void *)FIXADDR_START) {
type = kmap_atomic_idx();
- idx = type + KM_TYPE_NR * smp_processor_id();
+ idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
@@ -138,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
return page_address(page);
type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
+ idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 870838a46d52..7cd15143a507 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -291,13 +291,13 @@ static struct mem_type mem_types[] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
- .domain = DOMAIN_USER,
+ .domain = DOMAIN_VECTORS,
},
[MT_HIGH_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_USER | L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
- .domain = DOMAIN_USER,
+ .domain = DOMAIN_VECTORS,
},
[MT_MEMORY_RWX] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
@@ -357,6 +357,47 @@ const struct mem_type *get_mem_type(unsigned int type)
}
EXPORT_SYMBOL(get_mem_type);
+static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
+
+static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
+ __aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
+
+static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
+{
+ return &bm_pte[pte_index(addr)];
+}
+
+static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
+{
+ return pte_offset_kernel(dir, addr);
+}
+
+static inline pmd_t * __init fixmap_pmd(unsigned long addr)
+{
+ pgd_t *pgd = pgd_offset_k(addr);
+ pud_t *pud = pud_offset(pgd, addr);
+ pmd_t *pmd = pmd_offset(pud, addr);
+
+ return pmd;
+}
+
+void __init early_fixmap_init(void)
+{
+ pmd_t *pmd;
+
+ /*
+ * The early fixmap range spans multiple pmds, for which
+ * we are not prepared:
+ */
+ BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
+ != FIXADDR_TOP >> PMD_SHIFT);
+
+ pmd = fixmap_pmd(FIXADDR_TOP);
+ pmd_populate_kernel(&init_mm, pmd, bm_pte);
+
+ pte_offset_fixmap = pte_offset_early_fixmap;
+}
+
/*
* To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
* As a result, this can only be called with preemption disabled, as under
@@ -365,7 +406,7 @@ EXPORT_SYMBOL(get_mem_type);
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{
unsigned long vaddr = __fix_to_virt(idx);
- pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+ pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
/* Make sure fixmap region does not exceed available allocation. */
BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
@@ -855,7 +896,7 @@ static void __init create_mapping(struct map_desc *md)
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
- md->virtual >= PAGE_OFFSET &&
+ md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
@@ -1219,10 +1260,10 @@ void __init arm_mm_memblock_reserve(void)
/*
* Set up the device mappings. Since we clear out the page tables for all
- * mappings above VMALLOC_START, we will remove any debug device mappings.
- * This means you have to be careful how you debug this function, or any
- * called function. This means you can't use any function or debugging
- * method which may touch any device, otherwise the kernel _will_ crash.
+ * mappings above VMALLOC_START, except early fixmap, we might remove debug
+ * device mappings. This means earlycon can be used to debug this function
+ * Any other function or debugging method which may touch any device _will_
+ * crash the kernel.
*/
static void __init devicemaps_init(const struct machine_desc *mdesc)
{
@@ -1237,7 +1278,10 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
early_trap_init(vectors);
- for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
+ /*
+ * Clear page table except top pmd used by early fixmaps
+ */
+ for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
/*
@@ -1489,6 +1533,35 @@ void __init early_paging_init(const struct machine_desc *mdesc)
#endif
+static void __init early_fixmap_shutdown(void)
+{
+ int i;
+ unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
+
+ pte_offset_fixmap = pte_offset_late_fixmap;
+ pmd_clear(fixmap_pmd(va));
+ local_flush_tlb_kernel_page(va);
+
+ for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
+ pte_t *pte;
+ struct map_desc map;
+
+ map.virtual = fix_to_virt(i);
+ pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
+
+ /* Only i/o device mappings are supported ATM */
+ if (pte_none(*pte) ||
+ (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
+ continue;
+
+ map.pfn = pte_pfn(*pte);
+ map.type = MT_DEVICE;
+ map.length = PAGE_SIZE;
+
+ create_mapping(&map);
+ }
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
@@ -1502,6 +1575,7 @@ void __init paging_init(const struct machine_desc *mdesc)
map_lowmem();
memblock_set_current_limit(arm_lowmem_limit);
dma_contiguous_remap();
+ early_fixmap_shutdown();
devicemaps_init(mdesc);
kmap_init();
tcm_init();
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index a3681f11dd9f..e683db1b90a3 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
if (!new_pte)
goto no_pte;
+#ifndef CONFIG_ARM_LPAE
+ /*
+ * Modify the PTE pointer to have the correct domain. This
+ * needs to be the vectors domain to avoid the low vectors
+ * being unmapped.
+ */
+ pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
+ pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
+#endif
+
init_pud = pud_offset(init_pgd, 0);
init_pmd = pmd_offset(init_pud, 0);
init_pte = pte_offset_map(init_pmd, 0);