summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-04-09 01:25:58 +0200
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-04-09 01:25:58 +0200
commit22590efb98ae0c84f798a9938c0b6d97bc89adf5 (patch)
tree0ee9817ec59901350d6deef093d613e427284619 /drivers
parentcpufreq: exynos: Use generic platdev driver (diff)
downloadlinux-22590efb98ae0c84f798a9938c0b6d97bc89adf5.tar.xz
linux-22590efb98ae0c84f798a9938c0b6d97bc89adf5.zip
intel_pstate: Avoid pointless FRAC_BITS shifts under div_fp()
There are multiple places in intel_pstate where int_tofp() is applied to both arguments of div_fp(), but this is pointless, because int_tofp() simply shifts its argument to the left by FRAC_BITS which mathematically is equivalent to multuplication by 2^FRAC_BITS, so if this is done to both arguments of a division, the extra factors will cancel each other during that operation anyway. Drop the pointless int_tofp() applied to div_fp() arguments throughout the driver. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/intel_pstate.c31
1 files changed, 13 insertions, 18 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6c7cff13f0ed..8a368d2ee25c 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -341,17 +341,17 @@ static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
static inline void pid_p_gain_set(struct _pid *pid, int percent)
{
- pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
+ pid->p_gain = div_fp(percent, 100);
}
static inline void pid_i_gain_set(struct _pid *pid, int percent)
{
- pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
+ pid->i_gain = div_fp(percent, 100);
}
static inline void pid_d_gain_set(struct _pid *pid, int percent)
{
- pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
+ pid->d_gain = div_fp(percent, 100);
}
static signed int pid_calc(struct _pid *pid, int32_t busy)
@@ -529,7 +529,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
- turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
+ turbo_fp = div_fp(no_turbo, total);
turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
return sprintf(buf, "%u\n", turbo_pct);
}
@@ -600,8 +600,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits->max_perf_pct);
limits->max_perf_pct = max(limits->min_perf_pct,
limits->max_perf_pct);
- limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
- int_tofp(100));
+ limits->max_perf = div_fp(limits->max_perf_pct, 100);
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
@@ -625,8 +624,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
limits->min_perf_pct);
limits->min_perf_pct = min(limits->max_perf_pct,
limits->min_perf_pct);
- limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
- int_tofp(100));
+ limits->min_perf = div_fp(limits->min_perf_pct, 100);
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
@@ -1011,8 +1009,8 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
struct sample *sample = &cpu->sample;
int64_t core_pct;
- core_pct = int_tofp(sample->aperf) * int_tofp(100);
- core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
+ core_pct = sample->aperf * int_tofp(100);
+ core_pct = div64_u64(core_pct, sample->mperf);
sample->core_pct_busy = (int32_t)core_pct;
}
@@ -1115,8 +1113,8 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
* specified pstate.
*/
core_busy = cpu->sample.core_pct_busy;
- max_pstate = int_tofp(cpu->pstate.max_pstate_physical);
- current_pstate = int_tofp(cpu->pstate.current_pstate);
+ max_pstate = cpu->pstate.max_pstate_physical;
+ current_pstate = cpu->pstate.current_pstate;
core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
/*
@@ -1127,8 +1125,7 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
*/
duration_ns = cpu->sample.time - cpu->last_sample_time;
if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
- sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
- int_tofp(duration_ns));
+ sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
core_busy = mul_fp(core_busy, sample_ratio);
}
@@ -1328,10 +1325,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
/* Make sure min_perf_pct <= max_perf_pct */
limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
- limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
- int_tofp(100));
- limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
- int_tofp(100));
+ limits->min_perf = div_fp(limits->min_perf_pct, 100);
+ limits->max_perf = div_fp(limits->max_perf_pct, 100);
out:
intel_pstate_set_update_util_hook(policy->cpu);