summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShreyas B. Prabhu <shreyas@linux.vnet.ibm.com>2016-07-01 16:24:14 +0200
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-07-04 14:17:34 +0200
commitdbd1b8ea43b17e2ed4acda72f83ea17f69408682 (patch)
treeee37f87df9d788548910546c9be84d5581803514
parentLinux 4.7-rc5 (diff)
downloadlinux-dbd1b8ea43b17e2ed4acda72f83ea17f69408682.tar.xz
linux-dbd1b8ea43b17e2ed4acda72f83ea17f69408682.zip
cpuidle: Fix last_residency division
Snooze is a poll idle state in powernv and pseries platforms. Snooze has a timeout so that if a CPU stays in snooze for more than target residency of the next available idle state, then it would exit thereby giving chance to the cpuidle governor to re-evaluate and promote the CPU to a deeper idle state. Therefore whenever snooze exits due to this timeout, its last_residency will be target_residency of the next deeper state. Commit e93e59ce5b85 "cpuidle: Replace ktime_get() with local_clock()" changed the math around last_residency calculation. Specifically, while converting last_residency value from nano- to microseconds, it carries out right shift by 10. Because of that, in snooze timeout exit scenarios last_residency calculated is roughly 2.3% less than target_residency of the next available state. This pattern is picked up by get_typical_interval() in the menu governor and therefore expected_interval in menu_select() is frequently less than the target_residency of any state other than snooze. Due to this we are entering snooze at a higher rate, thereby affecting the single thread performance. Fix this by using more precise division via ktime_us_delta(). Fixes: e93e59ce5b85 "cpuidle: Replace ktime_get() with local_clock()" Reported-by: Anton Blanchard <anton@samba.org> Bisected-by: Shilpasri G Bhat <shilpa.bhat@linux.vnet.ibm.com> Signed-off-by: Shreyas B. Prabhu <shreyas@linux.vnet.ibm.com> Acked-by: Daniel Lezcano <daniel.lezcano@linaro.org> Acked-by: Balbir Singh <bsingharora@gmail.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--drivers/cpuidle/cpuidle.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index a4d0059e232c..c73207abb5a4 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -173,7 +173,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
struct cpuidle_state *target_state = &drv->states[index];
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
- u64 time_start, time_end;
+ ktime_t time_start, time_end;
s64 diff;
/*
@@ -195,13 +195,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
sched_idle_set_state(target_state);
trace_cpu_idle_rcuidle(index, dev->cpu);
- time_start = local_clock();
+ time_start = ns_to_ktime(local_clock());
stop_critical_timings();
entered_state = target_state->enter(dev, drv, index);
start_critical_timings();
- time_end = local_clock();
+ time_end = ns_to_ktime(local_clock());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
/* The cpu is no longer idle or about to enter idle. */
@@ -217,11 +217,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
if (!cpuidle_state_is_coupled(drv, index))
local_irq_enable();
- /*
- * local_clock() returns the time in nanosecond, let's shift
- * by 10 (divide by 1024) to have microsecond based time.
- */
- diff = (time_end - time_start) >> 10;
+ diff = ktime_us_delta(time_end, time_start);
if (diff > INT_MAX)
diff = INT_MAX;