diff options
author | Kristen Carlson Accardi <kristen@linux.intel.com> | 2014-12-10 21:39:38 +0100 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2014-12-11 00:24:42 +0100 |
commit | e0d4c8f80804000eadc106bc5167b96fc6231d98 (patch) | |
tree | 7ee4ae6b5a591ff27c69db620712825a2354063a | |
parent | intel_pstate: add kernel parameter to force loading (diff) | |
download | linux-e0d4c8f80804000eadc106bc5167b96fc6231d98.tar.xz linux-e0d4c8f80804000eadc106bc5167b96fc6231d98.zip |
intel_pstate: Add a few comments
Add a few comments in the code which calculates busyness to
clarify parts of the algorithm.
Signed-off-by: Kristen Carlson Accardi <kristen@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 32 |
1 files changed, 31 insertions, 1 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 0e841eecb743..742eefba12c2 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -199,7 +199,14 @@ static signed int pid_calc(struct _pid *pid, int32_t busy) pid->integral += fp_error; - /* limit the integral term */ + /* + * We limit the integral here so that it will never + * get higher than 30. This prevents it from becoming + * too large an input over long periods of time and allows + * it to get factored out sooner. + * + * The value of 30 was chosen through experimentation. + */ integral_limit = int_tofp(30); if (pid->integral > integral_limit) pid->integral = integral_limit; @@ -616,6 +623,11 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) if (limits.no_turbo || limits.turbo_disabled) max_perf = cpu->pstate.max_pstate; + /* + * performance can be limited by user through sysfs, by cpufreq + * policy, or by cpu specific default values determined through + * experimentation. + */ max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); @@ -717,11 +729,29 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) u32 duration_us; u32 sample_time; + /* + * core_busy is the ratio of actual performance to max + * max_pstate is the max non turbo pstate available + * current_pstate was the pstate that was requested during + * the last sample period. + * + * We normalize core_busy, which was our actual percent + * performance to what we requested during the last sample + * period. The result will be a percentage of busy at a + * specified pstate. + */ core_busy = cpu->sample.core_pct_busy; max_pstate = int_tofp(cpu->pstate.max_pstate); current_pstate = int_tofp(cpu->pstate.current_pstate); core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); + /* + * Since we have a deferred timer, it will not fire unless + * we are in C0. So, determine if the actual elapsed time + * is significantly greater (3x) than our sample interval. If it + * is, then we were idle for a long enough period of time + * to adjust our busyness. + */ sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC; duration_us = (u32) ktime_us_delta(cpu->sample.time, cpu->last_sample_time); |