From c3fa64c99c61d99631a8e06a6cf991c35c98ec7d Mon Sep 17 00:00:00 2001 From: Fabiano Rosas Date: Wed, 25 May 2022 10:05:52 -0300 Subject: KVM: PPC: Book3S HV: Decouple the debug timing from the P8 entry path We are currently doing the timing for debug purposes of the P9 entry path using the accumulators and terminology defined by the old entry path for P8 machines. Not only the "real-mode" and "napping" mentions are out of place for the P9 Radix entry path but also we cannot change them because the timing code is coupled to the structures defined in struct kvm_vcpu_arch. Add a new CONFIG_KVM_BOOK3S_HV_P9_TIMING to enable the timing code for the P9 entry path. For now, just add the new CONFIG and duplicate the structures. A subsequent patch will add the P9 changes. Signed-off-by: Fabiano Rosas Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220525130554.2614394-4-farosas@linux.ibm.com --- arch/powerpc/kvm/book3s_hv.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kvm/book3s_hv.c') diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index e08fb3124dca..108ee563f72a 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2660,11 +2660,19 @@ static struct debugfs_timings_element { const char *name; size_t offset; } timings[] = { +#ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)}, {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)}, {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)}, {"guest", offsetof(struct kvm_vcpu, arch.guest_time)}, {"cede", offsetof(struct kvm_vcpu, arch.cede_time)}, +#else + {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)}, + {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)}, + {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)}, + {"guest", offsetof(struct kvm_vcpu, arch.guest_time)}, + {"cede", offsetof(struct kvm_vcpu, arch.cede_time)}, +#endif }; #define N_TIMINGS (ARRAY_SIZE(timings)) @@ -2783,8 +2791,9 @@ static const struct file_operations debugfs_timings_ops = { /* Create a debugfs directory for the vcpu */ static int kvmppc_arch_create_vcpu_debugfs_hv(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry) { - debugfs_create_file("timings", 0444, debugfs_dentry, vcpu, - &debugfs_timings_ops); + if (cpu_has_feature(CPU_FTR_ARCH_300) == IS_ENABLED(CONFIG_KVM_BOOK3S_HV_P9_TIMING)) + debugfs_create_file("timings", 0444, debugfs_dentry, vcpu, + &debugfs_timings_ops); return 0; } -- cgit v1.2.3 From b44bb1b7cbbae66ec73868f5fbd57c54f0612d1c Mon Sep 17 00:00:00 2001 From: Fabiano Rosas Date: Wed, 25 May 2022 10:05:54 -0300 Subject: KVM: PPC: Book3S HV: Provide more detailed timings for P9 entry path Alter the data collection points for the debug timing code in the P9 path to be more in line with what the code does. The points where we accumulate time are now the following: vcpu_entry: From vcpu_run_hv entry until the start of the inner loop; guest_entry: From the start of the inner loop until the guest entry in asm; in_guest: From the guest entry in asm until the return to KVM C code; guest_exit: From the return into KVM C code until the corresponding hypercall/page fault handling or re-entry into the guest; hypercall: Time spent handling hcalls in the kernel (hcalls can go to QEMU, not accounted here); page_fault: Time spent handling page faults; vcpu_exit: vcpu_run_hv exit (almost no code here currently). Like before, these are exposed in debugfs in a file called "timings". There are four values: - number of occurrences of the accumulation point; - total time the vcpu spent in the phase in ns; - shortest time the vcpu spent in the phase in ns; - longest time the vcpu spent in the phase in ns; === Before: rm_entry: 53132 16793518 256 4060 rm_intr: 53132 2125914 22 340 rm_exit: 53132 24108344 374 2180 guest: 53132 40980507996 404 9997650 cede: 0 0 0 0 After: vcpu_entry: 34637 7716108 178 4416 guest_entry: 52414 49365608 324 747542 in_guest: 52411 40828715840 258 9997480 guest_exit: 52410 19681717182 826 102496674 vcpu_exit: 34636 1744462 38 182 hypercall: 45712 22878288 38 1307962 page_fault: 992 111104034 568 168688 With just one instruction (hcall): vcpu_entry: 1 942 942 942 guest_entry: 1 4044 4044 4044 in_guest: 1 1540 1540 1540 guest_exit: 1 3542 3542 3542 vcpu_exit: 1 80 80 80 hypercall: 0 0 0 0 page_fault: 0 0 0 0 === Signed-off-by: Fabiano Rosas Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220525130554.2614394-6-farosas@linux.ibm.com --- arch/powerpc/include/asm/kvm_host.h | 12 +++++++----- arch/powerpc/kvm/Kconfig | 9 +++++---- arch/powerpc/kvm/book3s_hv.c | 23 ++++++++++++++++++----- arch/powerpc/kvm/book3s_hv_p9_entry.c | 14 ++++---------- 4 files changed, 34 insertions(+), 24 deletions(-) (limited to 'arch/powerpc/kvm/book3s_hv.c') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index a0f44762a069..eeba679802cf 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -831,11 +831,13 @@ struct kvm_vcpu_arch { struct kvmhv_tb_accumulator *cur_activity; /* What we're timing */ u64 cur_tb_start; /* when it started */ #ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING - struct kvmhv_tb_accumulator rm_entry; /* real-mode entry code */ - struct kvmhv_tb_accumulator rm_intr; /* real-mode intr handling */ - struct kvmhv_tb_accumulator rm_exit; /* real-mode exit code */ - struct kvmhv_tb_accumulator guest_time; /* guest execution */ - struct kvmhv_tb_accumulator cede_time; /* time napping inside guest */ + struct kvmhv_tb_accumulator vcpu_entry; + struct kvmhv_tb_accumulator vcpu_exit; + struct kvmhv_tb_accumulator in_guest; + struct kvmhv_tb_accumulator hcall; + struct kvmhv_tb_accumulator pg_fault; + struct kvmhv_tb_accumulator guest_entry; + struct kvmhv_tb_accumulator guest_exit; #else struct kvmhv_tb_accumulator rm_entry; /* real-mode entry code */ struct kvmhv_tb_accumulator rm_intr; /* real-mode intr handling */ diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 191347f44731..cedf1e0f50e1 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -135,10 +135,11 @@ config KVM_BOOK3S_HV_P9_TIMING select KVM_BOOK3S_HV_EXIT_TIMING depends on KVM_BOOK3S_HV_POSSIBLE && DEBUG_FS help - Calculate time taken for each vcpu in various parts of the - code. The total, minimum and maximum times in nanoseconds - together with the number of executions are reported in debugfs in - kvm/vm#/vcpu#/timings. + Calculate time taken for each vcpu during vcpu entry and + exit, time spent inside the guest and time spent handling + hypercalls and page faults. The total, minimum and maximum + times in nanoseconds together with the number of executions + are reported in debugfs in kvm/vm#/vcpu#/timings. If unsure, say N. diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 108ee563f72a..c68883170a82 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2661,11 +2661,13 @@ static struct debugfs_timings_element { size_t offset; } timings[] = { #ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING - {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)}, - {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)}, - {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)}, - {"guest", offsetof(struct kvm_vcpu, arch.guest_time)}, - {"cede", offsetof(struct kvm_vcpu, arch.cede_time)}, + {"vcpu_entry", offsetof(struct kvm_vcpu, arch.vcpu_entry)}, + {"guest_entry", offsetof(struct kvm_vcpu, arch.guest_entry)}, + {"in_guest", offsetof(struct kvm_vcpu, arch.in_guest)}, + {"guest_exit", offsetof(struct kvm_vcpu, arch.guest_exit)}, + {"vcpu_exit", offsetof(struct kvm_vcpu, arch.vcpu_exit)}, + {"hypercall", offsetof(struct kvm_vcpu, arch.hcall)}, + {"page_fault", offsetof(struct kvm_vcpu, arch.pg_fault)}, #else {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)}, {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)}, @@ -4014,8 +4016,10 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns mtspr(SPRN_DAR, vcpu->arch.shregs.dar); mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); switch_pmu_to_guest(vcpu, &host_os_sprs); + accumulate_time(vcpu, &vcpu->arch.in_guest); trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs), __pa(&vcpu->arch.regs)); + accumulate_time(vcpu, &vcpu->arch.guest_exit); kvmhv_restore_hv_return_state(vcpu, &hvregs); switch_pmu_to_host(vcpu, &host_os_sprs); vcpu->arch.shregs.msr = vcpu->arch.regs.msr; @@ -4703,6 +4707,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) struct kvm *kvm; unsigned long msr; + start_timing(vcpu, &vcpu->arch.vcpu_entry); + if (!vcpu->arch.sane) { run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return -EINVAL; @@ -4768,6 +4774,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; do { + accumulate_time(vcpu, &vcpu->arch.guest_entry); if (cpu_has_feature(CPU_FTR_ARCH_300)) r = kvmhv_run_single_vcpu(vcpu, ~(u64)0, vcpu->arch.vcore->lpcr); @@ -4775,6 +4782,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) r = kvmppc_run_vcpu(vcpu); if (run->exit_reason == KVM_EXIT_PAPR_HCALL) { + accumulate_time(vcpu, &vcpu->arch.hcall); + if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) { /* * These should have been caught reflected @@ -4790,6 +4799,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) trace_kvm_hcall_exit(vcpu, r); kvmppc_core_prepare_to_enter(vcpu); } else if (r == RESUME_PAGE_FAULT) { + accumulate_time(vcpu, &vcpu->arch.pg_fault); srcu_idx = srcu_read_lock(&kvm->srcu); r = kvmppc_book3s_hv_page_fault(vcpu, vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); @@ -4801,12 +4811,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) r = kvmppc_xics_rm_complete(vcpu, 0); } } while (is_kvmppc_resume_guest(r)); + accumulate_time(vcpu, &vcpu->arch.vcpu_exit); vcpu->arch.state = KVMPPC_VCPU_NOTREADY; atomic_dec(&kvm->arch.vcpus_running); srr_regs_clobbered(); + end_timing(vcpu); + return r; } diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c index 32e078db8da2..e740eca45862 100644 --- a/arch/powerpc/kvm/book3s_hv_p9_entry.c +++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c @@ -779,8 +779,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_HV); WARN_ON_ONCE(!(vcpu->arch.shregs.msr & MSR_ME)); - start_timing(vcpu, &vcpu->arch.rm_entry); - vcpu->arch.ceded = 0; /* Save MSR for restore, with EE clear. */ @@ -941,13 +939,13 @@ tm_return_to_guest: mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0); mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1); - accumulate_time(vcpu, &vcpu->arch.guest_time); - switch_pmu_to_guest(vcpu, &host_os_sprs); + accumulate_time(vcpu, &vcpu->arch.in_guest); + kvmppc_p9_enter_guest(vcpu); - switch_pmu_to_host(vcpu, &host_os_sprs); - accumulate_time(vcpu, &vcpu->arch.rm_intr); + accumulate_time(vcpu, &vcpu->arch.guest_exit); + switch_pmu_to_host(vcpu, &host_os_sprs); /* XXX: Could get these from r11/12 and paca exsave instead */ vcpu->arch.shregs.srr0 = mfspr(SPRN_SRR0); @@ -1042,8 +1040,6 @@ tm_return_to_guest: #endif } - accumulate_time(vcpu, &vcpu->arch.rm_exit); - /* Advance host PURR/SPURR by the amount used by guest */ purr = mfspr(SPRN_PURR); spurr = mfspr(SPRN_SPURR); @@ -1150,8 +1146,6 @@ tm_return_to_guest: asm volatile(PPC_CP_ABORT); out: - end_timing(vcpu); - return trap; } EXPORT_SYMBOL_GPL(kvmhv_vcpu_entry_p9); -- cgit v1.2.3 From 2b461880c20777d317b4ad24ef040918860133ca Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 18 Jul 2022 19:51:58 +1000 Subject: powerpc: Fix all occurences of duplicate words Since commit 87c78b612f4f ("powerpc: Fix all occurences of "the the"") fixed "the the", there's now a steady stream of patches fixing other duplicate words. Just fix them all at once, to save the overhead of dealing with individual patches for each case. This leaves a few cases of "that that", which in some contexts is correct. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220718095158.326606-1-mpe@ellerman.id.au --- arch/powerpc/crypto/aes-spe-glue.c | 2 +- arch/powerpc/kernel/btext.c | 2 +- arch/powerpc/kernel/eeh_driver.c | 2 +- arch/powerpc/kernel/exceptions-64s.S | 2 +- arch/powerpc/kernel/pci-common.c | 2 +- arch/powerpc/kernel/pci_dn.c | 2 +- arch/powerpc/kernel/ptrace/ptrace-vsx.c | 2 +- arch/powerpc/kernel/trace/ftrace.c | 6 +++--- arch/powerpc/kernel/watchdog.c | 2 +- arch/powerpc/kvm/book3s_hv.c | 2 +- arch/powerpc/mm/book3s64/hash_utils.c | 2 +- arch/powerpc/platforms/4xx/cpm.c | 2 +- arch/powerpc/platforms/powernv/vas-fault.c | 2 +- arch/powerpc/platforms/pseries/eeh_pseries.c | 2 +- arch/powerpc/platforms/pseries/papr_scm.c | 2 +- 15 files changed, 17 insertions(+), 17 deletions(-) (limited to 'arch/powerpc/kvm/book3s_hv.c') diff --git a/arch/powerpc/crypto/aes-spe-glue.c b/arch/powerpc/crypto/aes-spe-glue.c index e8dfe9fb0266..efab78a3a8f6 100644 --- a/arch/powerpc/crypto/aes-spe-glue.c +++ b/arch/powerpc/crypto/aes-spe-glue.c @@ -28,7 +28,7 @@ * instructions per clock cycle using one 32/64 bit unit (SU1) and one 32 * bit unit (SU2). One of these can be a memory access that is executed via * a single load and store unit (LSU). XTS-AES-256 takes ~780 operations per - * 16 byte block block or 25 cycles per byte. Thus 768 bytes of input data + * 16 byte block or 25 cycles per byte. Thus 768 bytes of input data * will need an estimated maximum of 20,000 cycles. Headroom for cache misses * included. Even with the low end model clocked at 667 MHz this equals to a * critical time window of less than 30us. The value has been chosen to diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 8f69bb07e500..2769889219bf 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -73,7 +73,7 @@ static inline void rmci_maybe_off(void) * the display during identify_machine() and MMU_Init() * * The display is mapped to virtual address 0xD0000000, rather - * than 1:1, because some some CHRP machines put the frame buffer + * than 1:1, because some CHRP machines put the frame buffer * in the region starting at 0xC0000000 (PAGE_OFFSET). * This mapping is temporary and will disappear as soon as the * setup done by MMU_Init() is applied. diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 260273e56431..f279295179bd 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -750,7 +750,7 @@ static void eeh_pe_cleanup(struct eeh_pe *pe) * @pdev: pci_dev to check * * This function may return a false positive if we can't determine the slot's - * presence state. This might happen for for PCIe slots if the PE containing + * presence state. This might happen for PCIe slots if the PE containing * the upstream bridge is also frozen, or the bridge is part of the same PE * as the device. * diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index b66dd6f775a4..3d0dc133a9ae 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -2779,7 +2779,7 @@ EXC_COMMON_BEGIN(soft_nmi_common) /* * An interrupt came in while soft-disabled. We set paca->irq_happened, then: - * - If it was a decrementer interrupt, we bump the dec to max and and return. + * - If it was a decrementer interrupt, we bump the dec to max and return. * - If it was a doorbell we return immediately since doorbells are edge * triggered and won't automatically refire. * - If it was a HMI we return immediately since we handled it in realmode diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index c87999289752..cbb912ee92fa 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -1088,7 +1088,7 @@ void pcibios_fixup_bus(struct pci_bus *bus) */ pci_read_bridge_bases(bus); - /* Now fixup the bus bus */ + /* Now fixup the bus */ pcibios_setup_bus_self(bus); } EXPORT_SYMBOL(pcibios_fixup_bus); diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index 938ab8838ab5..7a35fc25a304 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -259,7 +259,7 @@ void remove_sriov_vf_pdns(struct pci_dev *pdev) if (edev) { /* * We allocate pci_dn's for the totalvfs count, - * but only only the vfs that were activated + * but only the vfs that were activated * have a configured PE. */ if (edev->pe) diff --git a/arch/powerpc/kernel/ptrace/ptrace-vsx.c b/arch/powerpc/kernel/ptrace/ptrace-vsx.c index 1da4303128ef..7df08004c47d 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-vsx.c +++ b/arch/powerpc/kernel/ptrace/ptrace-vsx.c @@ -71,7 +71,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset, } /* - * Currently to set and and get all the vsx state, you need to call + * Currently to set and get all the vsx state, you need to call * the fp and VMX calls as well. This only get/sets the lower 32 * 128bit VSX registers. */ diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index cab67b5120b9..cb158c32b50b 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -125,7 +125,7 @@ __ftrace_make_nop(struct module *mod, return -EFAULT; } - /* Make sure that that this is still a 24bit jump */ + /* Make sure that this is still a 24bit jump */ if (!is_bl_op(op)) { pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op)); return -EINVAL; @@ -311,7 +311,7 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr) return -EFAULT; } - /* Make sure that that this is still a 24bit jump */ + /* Make sure that this is still a 24bit jump */ if (!is_bl_op(op)) { pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op)); return -EINVAL; @@ -564,7 +564,7 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, return -EFAULT; } - /* Make sure that that this is still a 24bit jump */ + /* Make sure that this is still a 24bit jump */ if (!is_bl_op(op)) { pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op)); return -EINVAL; diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index 7d28b9553654..dd882d00845d 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -353,7 +353,7 @@ static void watchdog_timer_interrupt(int cpu) if (__wd_nmi_output && xchg(&__wd_nmi_output, 0)) { /* * Something has called printk from NMI context. It might be - * stuck, so this this triggers a flush that will get that + * stuck, so this triggers a flush that will get that * printk output to the console. * * See wd_lockup_ipi. diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index c68883170a82..529b6c6895bf 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -5665,7 +5665,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) else kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq); - /* invalidate the entry (what do do on error from the above ?) */ + /* invalidate the entry (what to do on error from the above ?) */ pimap->mapped[i].r_hwirq = 0; /* diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index fc92613dc2bf..363a9447d63d 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -408,7 +408,7 @@ repeat: ssize); if (ret == -1) { /* - * Try to to keep bolted entries in primary. + * Try to keep bolted entries in primary. * Remove non bolted entries and try insert again */ ret = mmu_hash_ops.hpte_remove(hpteg); diff --git a/arch/powerpc/platforms/4xx/cpm.c b/arch/powerpc/platforms/4xx/cpm.c index 1d3bc35ee1a7..182e12855c27 100644 --- a/arch/powerpc/platforms/4xx/cpm.c +++ b/arch/powerpc/platforms/4xx/cpm.c @@ -63,7 +63,7 @@ static unsigned int cpm_set(unsigned int cpm_reg, unsigned int mask) * known as class 1, 2 and 3. For class 1 units, they are * unconditionally put to sleep when the corresponding CPM bit is * set. For class 2 and 3 units this is not case; if they can be - * put to to sleep, they will. Here we do not verify, we just + * put to sleep, they will. Here we do not verify, we just * set them and expect them to eventually go off when they can. */ value = dcr_read(cpm.dcr_host, cpm.dcr_offset[cpm_reg]); diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c index c1bfad56447d..2b47d5a86328 100644 --- a/arch/powerpc/platforms/powernv/vas-fault.c +++ b/arch/powerpc/platforms/powernv/vas-fault.c @@ -77,7 +77,7 @@ irqreturn_t vas_fault_thread_fn(int irq, void *data) /* * VAS can interrupt with multiple page faults. So process all * valid CRBs within fault FIFO until reaches invalid CRB. - * We use CCW[0] and pswid to validate validate CRBs: + * We use CCW[0] and pswid to validate CRBs: * * CCW[0] Reserved bit. When NX pastes CRB, CCW[0]=0 * OS sets this bit to 1 after reading CRB. diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 1b0c901a6f3b..8e40ccac0f44 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -71,7 +71,7 @@ static void pseries_pcibios_bus_add_device(struct pci_dev *pdev) if (pdev->is_virtfn) { /* * FIXME: This really should be handled by choosing the right - * parent PE in in pseries_eeh_init_edev(). + * parent PE in pseries_eeh_init_edev(). */ struct eeh_pe *physfn_pe = pci_dev_to_eeh_dev(pdev->physfn)->pe; struct eeh_dev *edev = pdn_to_eeh_dev(pdn); diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 69f21d30394b..20f6ed813bff 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -29,7 +29,7 @@ (1ul << ND_CMD_SET_CONFIG_DATA) | \ (1ul << ND_CMD_CALL)) -/* DIMM health bitmap bitmap indicators */ +/* DIMM health bitmap indicators */ /* SCM device is unable to persist memory contents */ #define PAPR_PMEM_UNARMED (1ULL << (63 - 0)) /* SCM device failed to persist memory contents */ -- cgit v1.2.3