summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAlexandre Ghiti <alex@ghiti.fr>2021-02-08 20:30:17 +0100
committerPalmer Dabbelt <palmerdabbelt@google.com>2021-02-23 03:54:17 +0100
commitd7fbcf40df86bb67193d9faf52138fc1202decb2 (patch)
tree52a24c60d68f48718e45e1b43ffccbb715c925df /arch
parentriscv: Improve kasan population function (diff)
downloadlinux-d7fbcf40df86bb67193d9faf52138fc1202decb2.tar.xz
linux-d7fbcf40df86bb67193d9faf52138fc1202decb2.zip
riscv: Improve kasan population by using hugepages when possible
The kasan functions that populates the shadow regions used to allocate them page by page and did not take advantage of hugepages, so fix this by trying to allocate hugepages of 1GB and fallback to 2MB hugepages or 4K pages in case it fails. This reduces the page table memory consumption and improves TLB usage, as shown below: Before this patch: ---[ Kasan shadow start ]--- 0xffffffc000000000-0xffffffc400000000 0x00000000818ef000 16G PTE . A . . . . R V 0xffffffc400000000-0xffffffc447fc0000 0x00000002b7f4f000 1179392K PTE D A . . . W R V 0xffffffc480000000-0xffffffc800000000 0x00000000818ef000 14G PTE . A . . . . R V ---[ Kasan shadow end ]--- After this patch: ---[ Kasan shadow start ]--- 0xffffffc000000000-0xffffffc400000000 0x00000000818ef000 16G PTE . A . . . . R V 0xffffffc400000000-0xffffffc440000000 0x0000000240000000 1G PGD D A . . . W R V 0xffffffc440000000-0xffffffc447e00000 0x00000002b7e00000 126M PMD D A . . . W R V 0xffffffc447e00000-0xffffffc447fc0000 0x00000002b818f000 1792K PTE D A . . . W R V 0xffffffc480000000-0xffffffc800000000 0x00000000818ef000 14G PTE . A . . . . R V ---[ Kasan shadow end ]--- Signed-off-by: Alexandre Ghiti <alex@ghiti.fr> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/riscv/mm/kasan_init.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index a97df4fdce09..c676a4674342 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -96,6 +96,15 @@ static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long en
do {
next = pmd_addr_end(vaddr, end);
+
+ if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
+ phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
+ if (phys_addr) {
+ set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
+ continue;
+ }
+ }
+
kasan_populate_pte(pmdp, vaddr, next);
} while (pmdp++, vaddr = next, vaddr != end);
@@ -116,6 +125,21 @@ static void kasan_populate_pgd(unsigned long vaddr, unsigned long end)
do {
next = pgd_addr_end(vaddr, end);
+
+ /*
+ * pgdp can't be none since kasan_early_init initialized all KASAN
+ * shadow region with kasan_early_shadow_pmd: if this is stillthe case,
+ * that means we can try to allocate a hugepage as a replacement.
+ */
+ if (pgd_page_vaddr(*pgdp) == (unsigned long)lm_alias(kasan_early_shadow_pmd) &&
+ IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
+ phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
+ if (phys_addr) {
+ set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
+ continue;
+ }
+ }
+
kasan_populate_pmd(pgdp, vaddr, next);
} while (pgdp++, vaddr = next, vaddr != end);
}