diff options
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 23 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 5 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 5 | ||||
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 138 |
4 files changed, 116 insertions, 55 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index e338d2f010fe..b8d95536ee22 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -924,7 +924,7 @@ cpufreq_freq_attr_rw(scaling_max_freq); cpufreq_freq_attr_rw(scaling_governor); cpufreq_freq_attr_rw(scaling_setspeed); -static struct attribute *default_attrs[] = { +static struct attribute *cpufreq_attrs[] = { &cpuinfo_min_freq.attr, &cpuinfo_max_freq.attr, &cpuinfo_transition_latency.attr, @@ -938,6 +938,7 @@ static struct attribute *default_attrs[] = { &scaling_setspeed.attr, NULL }; +ATTRIBUTE_GROUPS(cpufreq); #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) #define to_attr(a) container_of(a, struct freq_attr, attr) @@ -1000,14 +1001,13 @@ static const struct sysfs_ops sysfs_ops = { static struct kobj_type ktype_cpufreq = { .sysfs_ops = &sysfs_ops, - .default_attrs = default_attrs, + .default_groups = cpufreq_groups, .release = cpufreq_sysfs_release, }; -static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu) +static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu, + struct device *dev) { - struct device *dev = get_cpu_device(cpu); - if (unlikely(!dev)) return; @@ -1296,8 +1296,9 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy) if (policy->max_freq_req) { /* - * CPUFREQ_CREATE_POLICY notification is sent only after - * successfully adding max_freq_req request. + * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY + * notification, since CPUFREQ_CREATE_POLICY notification was + * sent after adding max_freq_req earlier. */ blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_REMOVE_POLICY, policy); @@ -1391,7 +1392,7 @@ static int cpufreq_online(unsigned int cpu) if (new_policy) { for_each_cpu(j, policy->related_cpus) { per_cpu(cpufreq_cpu_data, j) = policy; - add_cpu_dev_symlink(policy, j); + add_cpu_dev_symlink(policy, j, get_cpu_device(j)); } policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req), @@ -1403,7 +1404,7 @@ static int cpufreq_online(unsigned int cpu) ret = freq_qos_add_request(&policy->constraints, policy->min_freq_req, FREQ_QOS_MIN, - policy->min); + FREQ_QOS_MIN_DEFAULT_VALUE); if (ret < 0) { /* * So we don't call freq_qos_remove_request() for an @@ -1423,7 +1424,7 @@ static int cpufreq_online(unsigned int cpu) ret = freq_qos_add_request(&policy->constraints, policy->max_freq_req, FREQ_QOS_MAX, - policy->max); + FREQ_QOS_MAX_DEFAULT_VALUE); if (ret < 0) { policy->max_freq_req = NULL; goto out_destroy_policy; @@ -1565,7 +1566,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) /* Create sysfs link on CPU registration */ policy = per_cpu(cpufreq_cpu_data, cpu); if (policy) - add_cpu_dev_symlink(policy, cpu); + add_cpu_dev_symlink(policy, cpu, dev); return 0; } diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 0879ec3c170c..08515f7e515f 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -257,7 +257,7 @@ gov_attr_rw(ignore_nice_load); gov_attr_rw(down_threshold); gov_attr_rw(freq_step); -static struct attribute *cs_attributes[] = { +static struct attribute *cs_attrs[] = { &sampling_rate.attr, &sampling_down_factor.attr, &up_threshold.attr, @@ -266,6 +266,7 @@ static struct attribute *cs_attributes[] = { &freq_step.attr, NULL }; +ATTRIBUTE_GROUPS(cs); /************************** sysfs end ************************/ @@ -315,7 +316,7 @@ static void cs_start(struct cpufreq_policy *policy) static struct dbs_governor cs_governor = { .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"), - .kobj_type = { .default_attrs = cs_attributes }, + .kobj_type = { .default_groups = cs_groups }, .gov_dbs_update = cs_dbs_update, .alloc = cs_alloc, .free = cs_free, diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 3b8f924771b4..6a41ea4729b8 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -328,7 +328,7 @@ gov_attr_rw(sampling_down_factor); gov_attr_rw(ignore_nice_load); gov_attr_rw(powersave_bias); -static struct attribute *od_attributes[] = { +static struct attribute *od_attrs[] = { &sampling_rate.attr, &up_threshold.attr, &sampling_down_factor.attr, @@ -337,6 +337,7 @@ static struct attribute *od_attributes[] = { &io_is_busy.attr, NULL }; +ATTRIBUTE_GROUPS(od); /************************** sysfs end ************************/ @@ -401,7 +402,7 @@ static struct od_ops od_ops = { static struct dbs_governor od_dbs_gov = { .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"), - .kobj_type = { .default_attrs = od_attributes }, + .kobj_type = { .default_groups = od_groups }, .gov_dbs_update = od_dbs_update, .alloc = od_alloc, .free = od_free, diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 815df3daae9d..bc7f7e6759bd 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -338,6 +338,8 @@ static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); +#define CPPC_MAX_PERF U8_MAX + static void intel_pstate_set_itmt_prio(int cpu) { struct cppc_perf_caps cppc_perf; @@ -349,6 +351,14 @@ static void intel_pstate_set_itmt_prio(int cpu) return; /* + * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff. + * In this case we can't use CPPC.highest_perf to enable ITMT. + * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide. + */ + if (cppc_perf.highest_perf == CPPC_MAX_PERF) + cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached)); + + /* * The priorities can be set regardless of whether or not * sched_set_itmt_support(true) has been called and it is valid to * update them at any time after it has been called. @@ -654,19 +664,29 @@ static int intel_pstate_set_epb(int cpu, s16 pref) * 3 balance_power * 4 power */ + +enum energy_perf_value_index { + EPP_INDEX_DEFAULT = 0, + EPP_INDEX_PERFORMANCE, + EPP_INDEX_BALANCE_PERFORMANCE, + EPP_INDEX_BALANCE_POWERSAVE, + EPP_INDEX_POWERSAVE, +}; + static const char * const energy_perf_strings[] = { - "default", - "performance", - "balance_performance", - "balance_power", - "power", + [EPP_INDEX_DEFAULT] = "default", + [EPP_INDEX_PERFORMANCE] = "performance", + [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance", + [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power", + [EPP_INDEX_POWERSAVE] = "power", NULL }; -static const unsigned int epp_values[] = { - HWP_EPP_PERFORMANCE, - HWP_EPP_BALANCE_PERFORMANCE, - HWP_EPP_BALANCE_POWERSAVE, - HWP_EPP_POWERSAVE +static unsigned int epp_values[] = { + [EPP_INDEX_DEFAULT] = 0, /* Unused index */ + [EPP_INDEX_PERFORMANCE] = HWP_EPP_PERFORMANCE, + [EPP_INDEX_BALANCE_PERFORMANCE] = HWP_EPP_BALANCE_PERFORMANCE, + [EPP_INDEX_BALANCE_POWERSAVE] = HWP_EPP_BALANCE_POWERSAVE, + [EPP_INDEX_POWERSAVE] = HWP_EPP_POWERSAVE, }; static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp) @@ -680,14 +700,14 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw return epp; if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { - if (epp == HWP_EPP_PERFORMANCE) - return 1; - if (epp == HWP_EPP_BALANCE_PERFORMANCE) - return 2; - if (epp == HWP_EPP_BALANCE_POWERSAVE) - return 3; - if (epp == HWP_EPP_POWERSAVE) - return 4; + if (epp == epp_values[EPP_INDEX_PERFORMANCE]) + return EPP_INDEX_PERFORMANCE; + if (epp == epp_values[EPP_INDEX_BALANCE_PERFORMANCE]) + return EPP_INDEX_BALANCE_PERFORMANCE; + if (epp == epp_values[EPP_INDEX_BALANCE_POWERSAVE]) + return EPP_INDEX_BALANCE_POWERSAVE; + if (epp == epp_values[EPP_INDEX_POWERSAVE]) + return EPP_INDEX_POWERSAVE; *raw_epp = epp; return 0; } else if (boot_cpu_has(X86_FEATURE_EPB)) { @@ -747,7 +767,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, if (use_raw) epp = raw_epp; else if (epp == -EINVAL) - epp = epp_values[pref_index - 1]; + epp = epp_values[pref_index]; /* * To avoid confusion, refuse to set EPP to any values different @@ -833,7 +853,7 @@ static ssize_t store_energy_performance_preference( * upfront. */ if (!raw) - epp = ret ? epp_values[ret - 1] : cpu->epp_default; + epp = ret ? epp_values[ret] : cpu->epp_default; if (cpu->epp_cached != epp) { int err; @@ -1006,6 +1026,12 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu) */ value &= ~GENMASK_ULL(31, 24); value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); + /* + * However, make sure that EPP will be set to "performance" when + * the CPU is brought back online again and the "performance" + * scaling algorithm is still in effect. + */ + cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN; } /* @@ -1108,19 +1134,22 @@ static void intel_pstate_update_policies(void) cpufreq_update_policy(cpu); } +static void __intel_pstate_update_max_freq(struct cpudata *cpudata, + struct cpufreq_policy *policy) +{ + policy->cpuinfo.max_freq = global.turbo_disabled_mf ? + cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; + refresh_frequency_limits(policy); +} + static void intel_pstate_update_max_freq(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); - struct cpudata *cpudata; if (!policy) return; - cpudata = all_cpu_data[cpu]; - policy->cpuinfo.max_freq = global.turbo_disabled_mf ? - cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; - - refresh_frequency_limits(policy); + __intel_pstate_update_max_freq(all_cpu_data[cpu], policy); cpufreq_cpu_release(policy); } @@ -1568,8 +1597,15 @@ static void intel_pstate_notify_work(struct work_struct *work) { struct cpudata *cpudata = container_of(to_delayed_work(work), struct cpudata, hwp_notify_work); + struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu); + + if (policy) { + intel_pstate_get_hwp_cap(cpudata); + __intel_pstate_update_max_freq(cpudata, policy); + + cpufreq_cpu_release(policy); + } - cpufreq_update_policy(cpudata->cpu); wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); } @@ -1663,10 +1699,18 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); - if (cpudata->epp_default == -EINVAL) - cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); intel_pstate_enable_hwp_interrupt(cpudata); + + if (cpudata->epp_default >= 0) + return; + + if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) { + cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); + } else { + cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE]; + intel_pstate_set_epp(cpudata, cpudata->epp_default); + } } static int atom_get_min_pstate(void) @@ -2353,6 +2397,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { X86_MATCH(BROADWELL_D, core_funcs), X86_MATCH(BROADWELL_X, core_funcs), X86_MATCH(SKYLAKE_X, core_funcs), + X86_MATCH(ICELAKE_X, core_funcs), {} }; @@ -2469,18 +2514,14 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu, * HWP needs some special consideration, because HWP_REQUEST uses * abstract values to represent performance rather than pure ratios. */ - if (hwp_active) { - intel_pstate_get_hwp_cap(cpu); + if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) { + int scaling = cpu->pstate.scaling; + int freq; - if (cpu->pstate.scaling != perf_ctl_scaling) { - int scaling = cpu->pstate.scaling; - int freq; - - freq = max_policy_perf * perf_ctl_scaling; - max_policy_perf = DIV_ROUND_UP(freq, scaling); - freq = min_policy_perf * perf_ctl_scaling; - min_policy_perf = DIV_ROUND_UP(freq, scaling); - } + freq = max_policy_perf * perf_ctl_scaling; + max_policy_perf = DIV_ROUND_UP(freq, scaling); + freq = min_policy_perf * perf_ctl_scaling; + min_policy_perf = DIV_ROUND_UP(freq, scaling); } pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n", @@ -3332,6 +3373,16 @@ static bool intel_pstate_hwp_is_enabled(void) return !!(value & 0x1); } +static const struct x86_cpu_id intel_epp_balance_perf[] = { + /* + * Set EPP value as 102, this is the max suggested EPP + * which can result in one core turbo frequency for + * AlderLake Mobile CPUs. + */ + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102), + {} +}; + static int __init intel_pstate_init(void) { static struct cpudata **_all_cpu_data; @@ -3421,6 +3472,13 @@ hwp_cpu_matched: intel_pstate_sysfs_expose_params(); + if (hwp_active) { + const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf); + + if (id) + epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data; + } + mutex_lock(&intel_pstate_driver_lock); rc = intel_pstate_register_driver(default_driver); mutex_unlock(&intel_pstate_driver_lock); |