diff options
Diffstat (limited to 'arch/parisc')
-rw-r--r-- | arch/parisc/Kconfig | 1 | ||||
-rw-r--r-- | arch/parisc/include/asm/assembly.h | 4 | ||||
-rw-r--r-- | arch/parisc/include/asm/atomic.h | 27 | ||||
-rw-r--r-- | arch/parisc/include/asm/bugs.h | 20 | ||||
-rw-r--r-- | arch/parisc/include/asm/pgtable.h | 3 | ||||
-rw-r--r-- | arch/parisc/kernel/cache.c | 26 | ||||
-rw-r--r-- | arch/parisc/kernel/pci-dma.c | 2 | ||||
-rw-r--r-- | arch/parisc/kernel/process.c | 4 | ||||
-rw-r--r-- | arch/parisc/kernel/smp.c | 7 | ||||
-rw-r--r-- | arch/parisc/kernel/syscalls/syscall.tbl | 1 | ||||
-rw-r--r-- | arch/parisc/mm/hugetlbpage.c | 4 |
11 files changed, 49 insertions, 50 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 967bde65dd0e..c0b4b1c253d1 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -57,6 +57,7 @@ config PARISC select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_REGS_AND_STACK_ACCESS_API + select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU select GENERIC_SCHED_CLOCK select GENERIC_IRQ_MIGRATION if SMP select HAVE_UNSTABLE_SCHED_CLOCK if SMP diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h index 0f0d4a496fef..75677b526b2b 100644 --- a/arch/parisc/include/asm/assembly.h +++ b/arch/parisc/include/asm/assembly.h @@ -90,10 +90,6 @@ #include <asm/asmregs.h> #include <asm/psw.h> - sp = 30 - gp = 27 - ipsw = 22 - /* * We provide two versions of each macro to convert from physical * to virtual and vice versa. The "_r1" versions take one argument diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index dd5a299ada69..d4f023887ff8 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -73,10 +73,6 @@ static __inline__ int arch_atomic_read(const atomic_t *v) return READ_ONCE((v)->counter); } -/* exported interface */ -#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n))) -#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) - #define ATOMIC_OP(op, c_op) \ static __inline__ void arch_atomic_##op(int i, atomic_t *v) \ { \ @@ -122,6 +118,11 @@ static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v) \ ATOMIC_OPS(add, +=) ATOMIC_OPS(sub, -=) +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op) \ ATOMIC_OP(op, c_op) \ @@ -131,6 +132,10 @@ ATOMIC_OPS(and, &=) ATOMIC_OPS(or, |=) ATOMIC_OPS(xor, ^=) +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN @@ -185,6 +190,11 @@ static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \ ATOMIC64_OPS(add, +=) ATOMIC64_OPS(sub, -=) +#define arch_atomic64_add_return arch_atomic64_add_return +#define arch_atomic64_sub_return arch_atomic64_sub_return +#define arch_atomic64_fetch_add arch_atomic64_fetch_add +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub + #undef ATOMIC64_OPS #define ATOMIC64_OPS(op, c_op) \ ATOMIC64_OP(op, c_op) \ @@ -194,6 +204,10 @@ ATOMIC64_OPS(and, &=) ATOMIC64_OPS(or, |=) ATOMIC64_OPS(xor, ^=) +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor + #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN @@ -218,11 +232,6 @@ arch_atomic64_read(const atomic64_t *v) return READ_ONCE((v)->counter); } -/* exported interface */ -#define arch_atomic64_cmpxchg(v, o, n) \ - ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n))) -#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) - #endif /* !CONFIG_64BIT */ diff --git a/arch/parisc/include/asm/bugs.h b/arch/parisc/include/asm/bugs.h deleted file mode 100644 index 0a7f9db6bd1c..000000000000 --- a/arch/parisc/include/asm/bugs.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * include/asm-parisc/bugs.h - * - * Copyright (C) 1999 Mike Shaver - */ - -/* - * This is included by init/main.c to check for architecture-dependent bugs. - * - * Needs: - * void check_bugs(void); - */ - -#include <asm/processor.h> - -static inline void check_bugs(void) -{ -// identify_cpu(&boot_cpu_data); -} diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index e715df5385d6..5656395c95ee 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -472,9 +472,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, #define pte_same(A,B) (pte_val(A) == pte_val(B)) -struct seq_file; -extern void arch_report_meminfo(struct seq_file *m); - #endif /* !__ASSEMBLY__ */ diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index ca4a302d4365..501160250bb7 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -426,10 +426,15 @@ void flush_dcache_page(struct page *page) offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; addr = mpnt->vm_start + offset; if (parisc_requires_coherency()) { + bool needs_flush = false; pte_t *ptep; ptep = get_ptep(mpnt->vm_mm, addr); - if (ptep && pte_needs_flush(*ptep)) + if (ptep) { + needs_flush = pte_needs_flush(*ptep); + pte_unmap(ptep); + } + if (needs_flush) flush_user_cache_page(mpnt, addr); } else { /* @@ -561,14 +566,20 @@ EXPORT_SYMBOL(flush_kernel_dcache_page_addr); static void flush_cache_page_if_present(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { - pte_t *ptep = get_ptep(vma->vm_mm, vmaddr); + bool needs_flush = false; + pte_t *ptep; /* * The pte check is racy and sometimes the flush will trigger * a non-access TLB miss. Hopefully, the page has already been * flushed. */ - if (ptep && pte_needs_flush(*ptep)) + ptep = get_ptep(vma->vm_mm, vmaddr); + if (ptep) { + needs_flush = pte_needs_flush(*ptep); + pte_unmap(ptep); + } + if (needs_flush) flush_cache_page(vma, vmaddr, pfn); } @@ -635,17 +646,22 @@ static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, u pte_t *ptep; for (addr = start; addr < end; addr += PAGE_SIZE) { + bool needs_flush = false; /* * The vma can contain pages that aren't present. Although * the pte search is expensive, we need the pte to find the * page pfn and to check whether the page should be flushed. */ ptep = get_ptep(vma->vm_mm, addr); - if (ptep && pte_needs_flush(*ptep)) { + if (ptep) { + needs_flush = pte_needs_flush(*ptep); + pfn = pte_pfn(*ptep); + pte_unmap(ptep); + } + if (needs_flush) { if (parisc_requires_coherency()) { flush_user_cache_page(vma, addr); } else { - pfn = pte_pfn(*ptep); if (WARN_ON(!pfn_valid(pfn))) return; __flush_cache_page(vma, addr, PFN_PHYS(pfn)); diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 71ed5391f29d..415f12d5bab3 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -164,7 +164,7 @@ static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, pmd_clear(pmd); return; } - pte = pte_offset_map(pmd, vaddr); + pte = pte_offset_kernel(pmd, vaddr); vaddr &= ~PMD_MASK; end = vaddr + size; if (end > PMD_SIZE) diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 24411ab79c30..abdbf038d643 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -171,8 +171,8 @@ void __noreturn arch_cpu_idle_dead(void) local_irq_disable(); - /* Tell __cpu_die() that this CPU is now safe to dispose of. */ - (void)cpu_report_death(); + /* Tell the core that this CPU is now safe to dispose of. */ + cpuhp_ap_report_dead(); /* Ensure that the cache lines are written out. */ flush_cache_all_local(); diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 83348125b524..d0eb1bd19a13 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -499,11 +499,10 @@ int __cpu_disable(void) void __cpu_die(unsigned int cpu) { pdc_cpu_rendezvous_lock(); +} - if (!cpu_wait_death(cpu, 5)) { - pr_crit("CPU%u: cpu didn't die\n", cpu); - return; - } +void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) +{ pr_info("CPU%u: is shutting down\n", cpu); /* set task's state to interruptible sleep */ diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index 0e42fceb2d5e..3c71fad78318 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -448,3 +448,4 @@ 448 common process_mrelease sys_process_mrelease 449 common futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index d1d3990b83f6..a8a1a7c1e16e 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -66,7 +66,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, if (pud) { pmd = pmd_alloc(mm, pud, addr); if (pmd) - pte = pte_alloc_map(mm, pmd, addr); + pte = pte_alloc_huge(mm, pmd, addr); } return pte; } @@ -90,7 +90,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, if (!pud_none(*pud)) { pmd = pmd_offset(pud, addr); if (!pmd_none(*pmd)) - pte = pte_offset_map(pmd, addr); + pte = pte_offset_huge(pmd, addr); } } } |