diff options
author | Anshuman Khandual <anshuman.khandual@arm.com> | 2021-02-24 21:01:32 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-24 22:38:27 +0100 |
commit | bb5c47ced46797409f4791d0380db3116d93134c (patch) | |
tree | 9c31b4c0d27063babfa89aed71e0c9e565d7354b /mm/debug_vm_pgtable.c | |
parent | mm/debug: improve memcg debugging (diff) | |
download | linux-bb5c47ced46797409f4791d0380db3116d93134c.tar.xz linux-bb5c47ced46797409f4791d0380db3116d93134c.zip |
mm/debug_vm_pgtable/basic: add validation for dirtiness after write protect
Patch series "mm/debug_vm_pgtable: Some minor updates", v3.
This series contains some cleanups and new test suggestions from Catalin
from an earlier discussion.
https://lore.kernel.org/linux-mm/20201123142237.GF17833@gaia/
This patch (of 2):
This adds validation tests for dirtiness after write protect conversion
for each page table level. There are two new separate test types involved
here.
The first test ensures that a given page table entry does not become dirty
after pxx_wrprotect(). This is important for platforms like arm64 which
transfers and drops the hardware dirty bit (!PTE_RDONLY) to the software
dirty bit while making it an write protected one. This test ensures that
no fresh page table entry could be created with hardware dirty bit set.
The second test ensures that a given page table entry always preserve the
dirty information across pxx_wrprotect().
This adds two previously missing PUD level basic tests and while here
fixes pxx_wrprotect() related typos in the documentation file.
Link: https://lkml.kernel.org/r/1611137241-26220-1-git-send-email-anshuman.khandual@arm.com
Link: https://lkml.kernel.org/r/1611137241-26220-2-git-send-email-anshuman.khandual@arm.com
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [s390]
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Steven Price <steven.price@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/debug_vm_pgtable.c')
-rw-r--r-- | mm/debug_vm_pgtable.c | 39 |
1 files changed, 39 insertions, 0 deletions
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index c05d9dcf7891..1842d97522bb 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -63,6 +63,16 @@ static void __init pte_basic_tests(unsigned long pfn, pgprot_t prot) pte_t pte = pfn_pte(pfn, prot); pr_debug("Validating PTE basic\n"); + + /* + * This test needs to be executed after the given page table entry + * is created with pfn_pte() to make sure that protection_map[idx] + * does not have the dirty bit enabled from the beginning. This is + * important for platforms like arm64 where (!PTE_RDONLY) indicate + * dirty bit being set. + */ + WARN_ON(pte_dirty(pte_wrprotect(pte))); + WARN_ON(!pte_same(pte, pte)); WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte)))); WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte)))); @@ -70,6 +80,8 @@ static void __init pte_basic_tests(unsigned long pfn, pgprot_t prot) WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte)))); WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte)))); WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte)))); + WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte)))); + WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte)))); } static void __init pte_advanced_tests(struct mm_struct *mm, @@ -137,6 +149,17 @@ static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot) return; pr_debug("Validating PMD basic\n"); + + /* + * This test needs to be executed after the given page table entry + * is created with pfn_pmd() to make sure that protection_map[idx] + * does not have the dirty bit enabled from the beginning. This is + * important for platforms like arm64 where (!PTE_RDONLY) indicate + * dirty bit being set. + */ + WARN_ON(pmd_dirty(pmd_wrprotect(pmd))); + + WARN_ON(!pmd_same(pmd, pmd)); WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd)))); WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd)))); @@ -144,6 +167,8 @@ static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot) WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd)))); WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd)))); WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd)))); + WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd)))); + WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd)))); /* * A huge page does not point to next level page table * entry. Hence this must qualify as pmd_bad(). @@ -257,11 +282,25 @@ static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) return; pr_debug("Validating PUD basic\n"); + + /* + * This test needs to be executed after the given page table entry + * is created with pfn_pud() to make sure that protection_map[idx] + * does not have the dirty bit enabled from the beginning. This is + * important for platforms like arm64 where (!PTE_RDONLY) indicate + * dirty bit being set. + */ + WARN_ON(pud_dirty(pud_wrprotect(pud))); + WARN_ON(!pud_same(pud, pud)); WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud)))); + WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud)))); + WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud)))); WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud)))); WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud)))); WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud)))); + WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud)))); + WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud)))); if (mm_pmd_folded(mm)) return; |