diff options
author | Alexander Gordeev <agordeev@linux.ibm.com> | 2021-02-12 07:43:19 +0100 |
---|---|---|
committer | Vasily Gorbik <gor@linux.ibm.com> | 2021-02-24 00:31:22 +0100 |
commit | 4c86d2f51a0b2f8f7793129660f1232ec01d562b (patch) | |
tree | 71ae9c6fa501acedfb78b237c226421d86aeeda9 /arch | |
parent | s390/mm: fix phys vs virt confusion in pgtable allocation routines (diff) | |
download | linux-4c86d2f51a0b2f8f7793129660f1232ec01d562b.tar.xz linux-4c86d2f51a0b2f8f7793129660f1232ec01d562b.zip |
s390/mm: fix phys vs virt confusion in vmem_*() functions family
Due to historical reasons vmem_*() functions misuse
or ignore the notion of physical vs virtual addresses
difference.
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/mm/vmem.c | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 01f3a5f58e64..77afa28876a1 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -26,14 +26,14 @@ static void __ref *vmem_alloc_pages(unsigned int order) if (slab_is_available()) return (void *)__get_free_pages(GFP_KERNEL, order); - return (void *) memblock_phys_alloc(size, size); + return memblock_alloc(size, size); } static void vmem_free_pages(unsigned long addr, int order) { /* We don't expect boot memory to be removed ever. */ if (!slab_is_available() || - WARN_ON_ONCE(PageReserved(phys_to_page(addr)))) + WARN_ON_ONCE(PageReserved(virt_to_page(addr)))) return; free_pages(addr, order); } @@ -56,7 +56,7 @@ pte_t __ref *vmem_pte_alloc(void) if (slab_is_available()) pte = (pte_t *) page_table_alloc(&init_mm); else - pte = (pte_t *) memblock_phys_alloc(size, size); + pte = (pte_t *) memblock_alloc(size, size); if (!pte) return NULL; memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE); @@ -84,7 +84,7 @@ static void vmemmap_flush_unused_sub_pmd(void) { if (!unused_sub_pmd_start) return; - memset(__va(unused_sub_pmd_start), PAGE_UNUSED, + memset((void *)unused_sub_pmd_start, PAGE_UNUSED, ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start); unused_sub_pmd_start = 0; } @@ -97,7 +97,7 @@ static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end) * getting removed (just in case the memmap never gets initialized, * e.g., because the memory block never gets onlined). */ - memset(__va(start), 0, sizeof(struct page)); + memset((void *)start, 0, sizeof(struct page)); } static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end) @@ -118,7 +118,7 @@ static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end) static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) { - void *page = __va(ALIGN_DOWN(start, PMD_SIZE)); + unsigned long page = ALIGN_DOWN(start, PMD_SIZE); vmemmap_flush_unused_sub_pmd(); @@ -127,7 +127,7 @@ static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) /* Mark the unused parts of the new memmap page PAGE_UNUSED. */ if (!IS_ALIGNED(start, PMD_SIZE)) - memset(page, PAGE_UNUSED, start - __pa(page)); + memset((void *)page, PAGE_UNUSED, start - page); /* * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of * consecutive sections. Remember for the last added PMD the last @@ -140,11 +140,11 @@ static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) /* Returns true if the PMD is completely unused and can be freed. */ static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end) { - void *page = __va(ALIGN_DOWN(start, PMD_SIZE)); + unsigned long page = ALIGN_DOWN(start, PMD_SIZE); vmemmap_flush_unused_sub_pmd(); - memset(__va(start), PAGE_UNUSED, end - start); - return !memchr_inv(page, PAGE_UNUSED, PMD_SIZE); + memset((void *)start, PAGE_UNUSED, end - start); + return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE); } /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */ @@ -165,7 +165,7 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr, if (pte_none(*pte)) continue; if (!direct) - vmem_free_pages(pfn_to_phys(pte_pfn(*pte)), 0); + vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0); pte_clear(&init_mm, addr, pte); } else if (pte_none(*pte)) { if (!direct) { @@ -175,7 +175,7 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr, goto out; pte_val(*pte) = __pa(new_page) | prot; } else { - pte_val(*pte) = addr | prot; + pte_val(*pte) = __pa(addr) | prot; } } else { continue; @@ -200,7 +200,7 @@ static void try_free_pte_table(pmd_t *pmd, unsigned long start) if (!pte_none(*pte)) return; } - vmem_pte_free(__va(pmd_deref(*pmd))); + vmem_pte_free((unsigned long *) pmd_deref(*pmd)); pmd_clear(pmd); } @@ -241,7 +241,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr, IS_ALIGNED(next, PMD_SIZE) && MACHINE_HAS_EDAT1 && addr && direct && !debug_pagealloc_enabled()) { - pmd_val(*pmd) = addr | prot; + pmd_val(*pmd) = __pa(addr) | prot; pages++; continue; } else if (!direct && MACHINE_HAS_EDAT1) { @@ -337,7 +337,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end, IS_ALIGNED(next, PUD_SIZE) && MACHINE_HAS_EDAT2 && addr && direct && !debug_pagealloc_enabled()) { - pud_val(*pud) = addr | prot; + pud_val(*pud) = __pa(addr) | prot; pages++; continue; } |