summaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/smp_64.c
diff options
context:
space:
mode:
authorVijay Kumar <vijay.ac.kumar@oracle.com>2017-07-21 18:23:57 +0200
committerDavid S. Miller <davem@davemloft.net>2017-08-10 07:15:10 +0200
commit8536e02e912a46aa1c100bb1f5ccdca42e4e1ad2 (patch)
tree1a27965ab0da09dd07cfa6a7515aa21deee0984e /arch/sparc/kernel/smp_64.c
parentsparc64: Add a new hypercall CPU_POKE (diff)
downloadlinux-8536e02e912a46aa1c100bb1f5ccdca42e4e1ad2.tar.xz
linux-8536e02e912a46aa1c100bb1f5ccdca42e4e1ad2.zip
sparc64: Use CPU_POKE to resume idle cpu
Use CPU_POKE hypervisor call to resume idle cpu if supported. Signed-off-by: Vijay Kumar <vijay.ac.kumar@oracle.com> Reviewed-by: Anthony Yznaga <anthony.yznaga@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/smp_64.c')
-rw-r--r--arch/sparc/kernel/smp_64.c80
1 files changed, 77 insertions, 3 deletions
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 3218bc43302e..4898329970c5 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -74,6 +74,9 @@ EXPORT_SYMBOL(cpu_core_sib_cache_map);
static cpumask_t smp_commenced_mask;
+static DEFINE_PER_CPU(bool, poke);
+static bool cpu_poke;
+
void smp_info(struct seq_file *m)
{
int i;
@@ -1439,15 +1442,86 @@ void __init smp_cpus_done(unsigned int max_cpus)
{
}
+static void send_cpu_ipi(int cpu)
+{
+ xcall_deliver((u64) &xcall_receive_signal,
+ 0, 0, cpumask_of(cpu));
+}
+
+void scheduler_poke(void)
+{
+ if (!cpu_poke)
+ return;
+
+ if (!__this_cpu_read(poke))
+ return;
+
+ __this_cpu_write(poke, false);
+ set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
+}
+
+static unsigned long send_cpu_poke(int cpu)
+{
+ unsigned long hv_err;
+
+ per_cpu(poke, cpu) = true;
+ hv_err = sun4v_cpu_poke(cpu);
+ if (hv_err != HV_EOK) {
+ per_cpu(poke, cpu) = false;
+ pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n",
+ __func__, hv_err);
+ }
+
+ return hv_err;
+}
+
void smp_send_reschedule(int cpu)
{
if (cpu == smp_processor_id()) {
WARN_ON_ONCE(preemptible());
set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
- } else {
- xcall_deliver((u64) &xcall_receive_signal,
- 0, 0, cpumask_of(cpu));
+ return;
+ }
+
+ /* Use cpu poke to resume idle cpu if supported. */
+ if (cpu_poke && idle_cpu(cpu)) {
+ unsigned long ret;
+
+ ret = send_cpu_poke(cpu);
+ if (ret == HV_EOK)
+ return;
}
+
+ /* Use IPI in following cases:
+ * - cpu poke not supported
+ * - cpu not idle
+ * - send_cpu_poke() returns with error
+ */
+ send_cpu_ipi(cpu);
+}
+
+void smp_init_cpu_poke(void)
+{
+ unsigned long major;
+ unsigned long minor;
+ int ret;
+
+ if (tlb_type != hypervisor)
+ return;
+
+ ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor);
+ if (ret) {
+ pr_debug("HV_GRP_CORE is not registered\n");
+ return;
+ }
+
+ if (major == 1 && minor >= 6) {
+ /* CPU POKE is registered. */
+ cpu_poke = true;
+ return;
+ }
+
+ pr_debug("CPU_POKE not supported\n");
}
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)