From c0d5b0c721b67d13796914031cc8b9bbf2c7f453 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sat, 27 Jun 2020 11:16:53 +0300 Subject: sparc32: srmmu: improve type safety of __nocache_fix() The __nocache_fix(VADDR) macro is used to add an offset for pointers and its "return type" is 'void *'. We can do better and keep the type information with simply by casting the return value to (__typeof__(VADDR)). This will ".. show when those pgd/p4d/pud pointers get mis-used because they don't end up dropping the type info.." The addition of the casting to __nocache_fix() also allows to remove explicit casts at its call sites. Suggested-by: Linus Torvalds Signed-off-by: Mike Rapoport Link: https://lkml.kernel.org/r/CAHk-=wisORTa7QVPnFqNw9pFs62UiwgsD4C4d=MtYy1o4JPyGQ@mail.gmail.com Signed-off-by: David S. Miller --- arch/sparc/mm/srmmu.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/sparc/mm/srmmu.c') diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index f5fa07958f34..da4e10b4cb37 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -689,7 +689,7 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, pgdp = pgd_offset_k(start); p4dp = p4d_offset(pgdp, start); pudp = pud_offset(p4dp, start); - if (pud_none(*(pud_t *)__nocache_fix(pudp))) { + if (pud_none(*__nocache_fix(pudp))) { pmdp = __srmmu_get_nocache( SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); if (pmdp == NULL) @@ -698,7 +698,7 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, pud_set(__nocache_fix(pudp), pmdp); } pmdp = pmd_offset(__nocache_fix(pudp), start); - if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { + if (srmmu_pmd_none(*__nocache_fix(pmdp))) { ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); if (ptep == NULL) early_pgtable_allocfail("pte"); @@ -810,11 +810,11 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start, p4dp = p4d_offset(pgdp, start); pudp = pud_offset(p4dp, start); if (what == 2) { - *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed); + *__nocache_fix(pgdp) = __pgd(probed); start += PGDIR_SIZE; continue; } - if (pud_none(*(pud_t *)__nocache_fix(pudp))) { + if (pud_none(*__nocache_fix(pudp))) { pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); if (pmdp == NULL) @@ -828,7 +828,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start, start += PMD_SIZE; continue; } - if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { + if (srmmu_pmd_none(*__nocache_fix(pmdp))) { ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); if (ptep == NULL) early_pgtable_allocfail("pte"); @@ -836,7 +836,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start, pmd_set(__nocache_fix(pmdp), ptep); } ptep = pte_offset_kernel(__nocache_fix(pmdp), start); - *(pte_t *)__nocache_fix(ptep) = __pte(probed); + *__nocache_fix(ptep) = __pte(probed); start += PAGE_SIZE; } } @@ -850,7 +850,7 @@ static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base unsigned long big_pte; big_pte = KERNEL_PTE(phys_base >> 4); - *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte); + *__nocache_fix(pgdp) = __pgd(big_pte); } /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */ @@ -940,7 +940,7 @@ void __init srmmu_paging_init(void) srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa(srmmu_context_table); for (i = 0; i < num_contexts; i++) - srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); + srmmu_ctxd_set(__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); flush_cache_all(); srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); -- cgit v1.2.3