summaryrefslogtreecommitdiffstats
path: root/include/asm-x86/pgtable-3level.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/pgtable-3level.h')
-rw-r--r--include/asm-x86/pgtable-3level.h88
1 files changed, 37 insertions, 51 deletions
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h
index 948a33414118..a195c3e757b9 100644
--- a/include/asm-x86/pgtable-3level.h
+++ b/include/asm-x86/pgtable-3level.h
@@ -15,16 +15,18 @@
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
-#define pud_none(pud) 0
-#define pud_bad(pud) 0
-#define pud_present(pud) 1
-/*
- * All present pages with !NX bit are kernel-executable:
- */
-static inline int pte_exec_kernel(pte_t pte)
+static inline int pud_none(pud_t pud)
+{
+ return pud_val(pud) == 0;
+}
+static inline int pud_bad(pud_t pud)
+{
+ return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
+}
+static inline int pud_present(pud_t pud)
{
- return !(pte_val(pte) & _PAGE_NX);
+ return pud_val(pud) & _PAGE_PRESENT;
}
/* Rules for using set_pte: the pte being assigned *must* be
@@ -39,11 +41,6 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
smp_wmb();
ptep->pte_low = pte.pte_low;
}
-static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep , pte_t pte)
-{
- native_set_pte(ptep, pte);
-}
/*
* Since this is only called on user PTEs, and the page fault handler
@@ -71,7 +68,7 @@ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
}
static inline void native_set_pud(pud_t *pudp, pud_t pud)
{
- *pudp = pud;
+ set_64bit((unsigned long long *)(pudp),native_pud_val(pud));
}
/*
@@ -94,24 +91,29 @@ static inline void native_pmd_clear(pmd_t *pmd)
*(tmp + 1) = 0;
}
-#ifndef CONFIG_PARAVIRT
-#define set_pte(ptep, pte) native_set_pte(ptep, pte)
-#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
-#define set_pte_present(mm, addr, ptep, pte) native_set_pte_present(mm, addr, ptep, pte)
-#define set_pte_atomic(ptep, pte) native_set_pte_atomic(ptep, pte)
-#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
-#define set_pud(pudp, pud) native_set_pud(pudp, pud)
-#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
-#define pmd_clear(pmd) native_pmd_clear(pmd)
-#endif
-
-/*
- * Pentium-II erratum A13: in PAE mode we explicitly have to flush
- * the TLB via cr3 if the top-level pgd is changed...
- * We do not let the generic code free and clear pgd entries due to
- * this erratum.
- */
-static inline void pud_clear (pud_t * pud) { }
+static inline void pud_clear(pud_t *pudp)
+{
+ set_pud(pudp, __pud(0));
+
+ /*
+ * In principle we need to do a cr3 reload here to make sure
+ * the processor recognizes the changed pgd. In practice, all
+ * the places where pud_clear() gets called are followed by
+ * full tlb flushes anyway, so we can defer the cost here.
+ *
+ * Specifically:
+ *
+ * mm/memory.c:free_pmd_range() - immediately after the
+ * pud_clear() it does a pmd_free_tlb(). We change the
+ * mmu_gather structure to do a full tlb flush (which has the
+ * effect of reloading cr3) when the pagetable free is
+ * complete.
+ *
+ * arch/x86/mm/hugetlbpage.c:huge_pmd_unshare() - the call to
+ * this is followed by a flush_tlb_range, which on x86 does a
+ * full tlb flush.
+ */
+}
#define pud_page(pud) \
((struct page *) __va(pud_val(pud) & PAGE_MASK))
@@ -155,21 +157,7 @@ static inline int pte_none(pte_t pte)
static inline unsigned long pte_pfn(pte_t pte)
{
- return pte_val(pte) >> PAGE_SHIFT;
-}
-
-extern unsigned long long __supported_pte_mask;
-
-static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
-{
- return __pte((((unsigned long long)page_nr << PAGE_SHIFT) |
- pgprot_val(pgprot)) & __supported_pte_mask);
-}
-
-static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
-{
- return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) |
- pgprot_val(pgprot)) & __supported_pte_mask);
+ return (pte_val(pte) & ~_PAGE_NX) >> PAGE_SHIFT;
}
/*
@@ -177,7 +165,7 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
* put the 32 bits of offset into the high part.
*/
#define pte_to_pgoff(pte) ((pte).pte_high)
-#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
+#define pgoff_to_pte(off) ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
#define PTE_FILE_MAX_BITS 32
/* Encode and de-code a swap entry */
@@ -185,8 +173,6 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
#define __swp_offset(x) ((x).val >> 5)
#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
-#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
-
-#define __pmd_free_tlb(tlb, x) do { } while (0)
+#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
#endif /* _I386_PGTABLE_3LEVEL_H */