summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm50
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c12
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c54
-rw-r--r--drivers/cpufreq/amd-pstate.c219
-rw-r--r--drivers/cpufreq/amd-pstate.h14
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c2
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c11
-rw-r--r--drivers/cpufreq/cpufreq.c27
-rw-r--r--drivers/cpufreq/intel_pstate.c241
-rw-r--r--drivers/cpufreq/loongson3_cpufreq.c2
-rw-r--r--drivers/cpufreq/maple-cpufreq.c1
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c3
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c1
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c3
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c3
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c1
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c1
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c2
-rw-r--r--drivers/cpufreq/spear-cpufreq.c18
-rw-r--r--drivers/cpufreq/sti-cpufreq.c2
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c2
-rw-r--r--drivers/cpufreq/ti-cpufreq.c31
26 files changed, 484 insertions, 223 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 10cda6f2fe1d..2561b215432a 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -231,9 +231,7 @@ if X86
source "drivers/cpufreq/Kconfig.x86"
endif
-if ARM || ARM64
source "drivers/cpufreq/Kconfig.arm"
-endif
if PPC32 || PPC64
source "drivers/cpufreq/Kconfig.powerpc"
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 96b404ce829f..5f7e13e60c80 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -5,7 +5,7 @@
config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
tristate "Allwinner nvmem based SUN50I CPUFreq driver"
- depends on ARCH_SUNXI
+ depends on ARCH_SUNXI || COMPILE_TEST
depends on NVMEM_SUNXI_SID
select PM_OPP
help
@@ -26,15 +26,17 @@ config ARM_APPLE_SOC_CPUFREQ
config ARM_ARMADA_37XX_CPUFREQ
tristate "Armada 37xx CPUFreq support"
- depends on ARCH_MVEBU && CPUFREQ_DT
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on CPUFREQ_DT
help
This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
The Armada 37xx PMU supports 4 frequency and VDD levels.
config ARM_ARMADA_8K_CPUFREQ
tristate "Armada 8K CPUFreq driver"
- depends on ARCH_MVEBU && CPUFREQ_DT
- select ARMADA_AP_CPU_CLK
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on CPUFREQ_DT
+ select ARMADA_AP_CPU_CLK if COMMON_CLK
help
This enables the CPUFreq driver support for Marvell
Armada8k SOCs.
@@ -56,7 +58,7 @@ config ARM_SCPI_CPUFREQ
config ARM_VEXPRESS_SPC_CPUFREQ
tristate "Versatile Express SPC based CPUfreq driver"
depends on ARM_CPU_TOPOLOGY && HAVE_CLK
- depends on ARCH_VEXPRESS_SPC
+ depends on ARCH_VEXPRESS_SPC || COMPILE_TEST
select PM_OPP
help
This add the CPUfreq driver support for Versatile Express
@@ -75,7 +77,8 @@ config ARM_BRCMSTB_AVS_CPUFREQ
config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
- depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR
+ depends on ARCH_HIGHBANK || COMPILE_TEST
+ depends on CPUFREQ_DT && REGULATOR && PL320_MBOX
default m
help
This adds the CPUFreq driver for Calxeda Highbank SoC
@@ -96,7 +99,8 @@ config ARM_IMX6Q_CPUFREQ
config ARM_IMX_CPUFREQ_DT
tristate "Freescale i.MX8M cpufreq support"
- depends on ARCH_MXC && CPUFREQ_DT
+ depends on CPUFREQ_DT
+ depends on ARCH_MXC || COMPILE_TEST
help
This adds cpufreq driver support for Freescale i.MX7/i.MX8M
series SoCs, based on cpufreq-dt.
@@ -111,7 +115,8 @@ config ARM_KIRKWOOD_CPUFREQ
config ARM_MEDIATEK_CPUFREQ
tristate "CPU Frequency scaling support for MediaTek SoCs"
- depends on ARCH_MEDIATEK && REGULATOR
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on REGULATOR
select PM_OPP
help
This adds the CPUFreq driver support for MediaTek SoCs.
@@ -130,12 +135,12 @@ config ARM_MEDIATEK_CPUFREQ_HW
config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+"
- depends on ARCH_OMAP2PLUS
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
default ARCH_OMAP2PLUS
config ARM_QCOM_CPUFREQ_NVMEM
tristate "Qualcomm nvmem based CPUFreq"
- depends on ARCH_QCOM
+ depends on ARCH_QCOM || COMPILE_TEST
depends on NVMEM_QCOM_QFPROM
depends on QCOM_SMEM
select PM_OPP
@@ -166,7 +171,7 @@ config ARM_RASPBERRYPI_CPUFREQ
config ARM_S3C64XX_CPUFREQ
bool "Samsung S3C64XX"
- depends on CPU_S3C6410
+ depends on CPU_S3C6410 || COMPILE_TEST
default y
help
This adds the CPUFreq driver for Samsung S3C6410 SoC.
@@ -175,7 +180,7 @@ config ARM_S3C64XX_CPUFREQ
config ARM_S5PV210_CPUFREQ
bool "Samsung S5PV210 and S5PC110"
- depends on CPU_S5PV210
+ depends on CPU_S5PV210 || COMPILE_TEST
default y
help
This adds the CPUFreq driver for Samsung S5PV210 and
@@ -199,14 +204,15 @@ config ARM_SCMI_CPUFREQ
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
- depends on PLAT_SPEAR
+ depends on PLAT_SPEAR || COMPILE_TEST
default y
help
This adds the CPUFreq driver support for SPEAr SOCs.
config ARM_STI_CPUFREQ
tristate "STi CPUFreq support"
- depends on CPUFREQ_DT && SOC_STIH407
+ depends on CPUFREQ_DT
+ depends on SOC_STIH407 || COMPILE_TEST
help
This driver uses the generic OPP framework to match the running
platform with a predefined set of suitable values. If not provided
@@ -216,34 +222,38 @@ config ARM_STI_CPUFREQ
config ARM_TEGRA20_CPUFREQ
tristate "Tegra20/30 CPUFreq support"
- depends on ARCH_TEGRA && CPUFREQ_DT
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on CPUFREQ_DT
default y
help
This adds the CPUFreq driver support for Tegra20/30 SOCs.
config ARM_TEGRA124_CPUFREQ
bool "Tegra124 CPUFreq support"
- depends on ARCH_TEGRA && CPUFREQ_DT
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on CPUFREQ_DT
default y
help
This adds the CPUFreq driver support for Tegra124 SOCs.
config ARM_TEGRA186_CPUFREQ
tristate "Tegra186 CPUFreq support"
- depends on ARCH_TEGRA && TEGRA_BPMP
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on TEGRA_BPMP
help
This adds the CPUFreq driver support for Tegra186 SOCs.
config ARM_TEGRA194_CPUFREQ
tristate "Tegra194 CPUFreq support"
- depends on ARCH_TEGRA_194_SOC && TEGRA_BPMP
+ depends on ARCH_TEGRA_194_SOC || (64BIT && COMPILE_TEST)
+ depends on TEGRA_BPMP
default y
help
This adds CPU frequency driver support for Tegra194 SOCs.
config ARM_TI_CPUFREQ
bool "Texas Instruments CPUFreq support"
- depends on ARCH_OMAP2PLUS || ARCH_K3
+ depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
default y
help
This driver enables valid OPPs on the running platform based on
@@ -255,7 +265,7 @@ config ARM_TI_CPUFREQ
config ARM_PXA2xx_CPUFREQ
tristate "Intel PXA2xx CPUfreq driver"
- depends on PXA27x || PXA25x
+ depends on PXA27x || PXA25x || COMPILE_TEST
help
This add the CPUFreq driver support for Intel PXA2xx SOCs.
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index a8ca625a98b8..0f04feb6cafa 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -642,10 +642,16 @@ static u64 get_max_boost_ratio(unsigned int cpu)
return 0;
}
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
- highest_perf = amd_get_highest_perf();
- else
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+ ret = amd_get_boost_ratio_numerator(cpu, &highest_perf);
+ if (ret) {
+ pr_debug("CPU%d: Unable to get boost ratio numerator (%d)\n",
+ cpu, ret);
+ return 0;
+ }
+ } else {
highest_perf = perf_caps.highest_perf;
+ }
nominal_perf = perf_caps.nominal_perf;
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index 66b73c308ce6..f66701514d90 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -54,12 +54,14 @@ static void amd_pstate_ut_acpi_cpc_valid(u32 index);
static void amd_pstate_ut_check_enabled(u32 index);
static void amd_pstate_ut_check_perf(u32 index);
static void amd_pstate_ut_check_freq(u32 index);
+static void amd_pstate_ut_check_driver(u32 index);
static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = {
{"amd_pstate_ut_acpi_cpc_valid", amd_pstate_ut_acpi_cpc_valid },
{"amd_pstate_ut_check_enabled", amd_pstate_ut_check_enabled },
{"amd_pstate_ut_check_perf", amd_pstate_ut_check_perf },
- {"amd_pstate_ut_check_freq", amd_pstate_ut_check_freq }
+ {"amd_pstate_ut_check_freq", amd_pstate_ut_check_freq },
+ {"amd_pstate_ut_check_driver", amd_pstate_ut_check_driver }
};
static bool get_shared_mem(void)
@@ -160,14 +162,17 @@ static void amd_pstate_ut_check_perf(u32 index)
lowest_perf = AMD_CPPC_LOWEST_PERF(cap1);
}
- if ((highest_perf != READ_ONCE(cpudata->highest_perf)) ||
- (nominal_perf != READ_ONCE(cpudata->nominal_perf)) ||
+ if (highest_perf != READ_ONCE(cpudata->highest_perf) && !cpudata->hw_prefcore) {
+ pr_err("%s cpu%d highest=%d %d highest perf doesn't match\n",
+ __func__, cpu, highest_perf, cpudata->highest_perf);
+ goto skip_test;
+ }
+ if ((nominal_perf != READ_ONCE(cpudata->nominal_perf)) ||
(lowest_nonlinear_perf != READ_ONCE(cpudata->lowest_nonlinear_perf)) ||
(lowest_perf != READ_ONCE(cpudata->lowest_perf))) {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
- pr_err("%s cpu%d highest=%d %d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n",
- __func__, cpu, highest_perf, cpudata->highest_perf,
- nominal_perf, cpudata->nominal_perf,
+ pr_err("%s cpu%d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n",
+ __func__, cpu, nominal_perf, cpudata->nominal_perf,
lowest_nonlinear_perf, cpudata->lowest_nonlinear_perf,
lowest_perf, cpudata->lowest_perf);
goto skip_test;
@@ -254,6 +259,43 @@ skip_test:
cpufreq_cpu_put(policy);
}
+static int amd_pstate_set_mode(enum amd_pstate_mode mode)
+{
+ const char *mode_str = amd_pstate_get_mode_string(mode);
+
+ pr_debug("->setting mode to %s\n", mode_str);
+
+ return amd_pstate_update_status(mode_str, strlen(mode_str));
+}
+
+static void amd_pstate_ut_check_driver(u32 index)
+{
+ enum amd_pstate_mode mode1, mode2 = AMD_PSTATE_DISABLE;
+ int ret;
+
+ for (mode1 = AMD_PSTATE_DISABLE; mode1 < AMD_PSTATE_MAX; mode1++) {
+ ret = amd_pstate_set_mode(mode1);
+ if (ret)
+ goto out;
+ for (mode2 = AMD_PSTATE_DISABLE; mode2 < AMD_PSTATE_MAX; mode2++) {
+ if (mode1 == mode2)
+ continue;
+ ret = amd_pstate_set_mode(mode2);
+ if (ret)
+ goto out;
+ }
+ }
+out:
+ if (ret)
+ pr_warn("%s: failed to update status for %s->%s: %d\n", __func__,
+ amd_pstate_get_mode_string(mode1),
+ amd_pstate_get_mode_string(mode2), ret);
+
+ amd_pstate_ut_cases[index].result = ret ?
+ AMD_PSTATE_UT_RESULT_FAIL :
+ AMD_PSTATE_UT_RESULT_PASS;
+}
+
static int __init amd_pstate_ut_init(void)
{
u32 i = 0, arr_size = ARRAY_SIZE(amd_pstate_ut_cases);
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 68c616b572f2..15e201d5e911 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -52,26 +52,12 @@
#define AMD_PSTATE_TRANSITION_LATENCY 20000
#define AMD_PSTATE_TRANSITION_DELAY 1000
#define AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY 600
-#define CPPC_HIGHEST_PERF_PERFORMANCE 196
-#define CPPC_HIGHEST_PERF_DEFAULT 166
#define AMD_CPPC_EPP_PERFORMANCE 0x00
#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80
#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF
#define AMD_CPPC_EPP_POWERSAVE 0xFF
-/*
- * enum amd_pstate_mode - driver working mode of amd pstate
- */
-enum amd_pstate_mode {
- AMD_PSTATE_UNDEFINED = 0,
- AMD_PSTATE_DISABLE,
- AMD_PSTATE_PASSIVE,
- AMD_PSTATE_ACTIVE,
- AMD_PSTATE_GUIDED,
- AMD_PSTATE_MAX,
-};
-
static const char * const amd_pstate_mode_string[] = {
[AMD_PSTATE_UNDEFINED] = "undefined",
[AMD_PSTATE_DISABLE] = "disable",
@@ -81,6 +67,14 @@ static const char * const amd_pstate_mode_string[] = {
NULL,
};
+const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode)
+{
+ if (mode < 0 || mode >= AMD_PSTATE_MAX)
+ return NULL;
+ return amd_pstate_mode_string[mode];
+}
+EXPORT_SYMBOL_GPL(amd_pstate_get_mode_string);
+
struct quirk_entry {
u32 nominal_freq;
u32 lowest_freq;
@@ -321,7 +315,7 @@ static inline int pstate_enable(bool enable)
return 0;
for_each_present_cpu(cpu) {
- unsigned long logical_id = topology_logical_die_id(cpu);
+ unsigned long logical_id = topology_logical_package_id(cpu);
if (test_bit(logical_id, &logical_proc_id_mask))
continue;
@@ -372,43 +366,17 @@ static inline int amd_pstate_enable(bool enable)
return static_call(amd_pstate_enable)(enable);
}
-static u32 amd_pstate_highest_perf_set(struct amd_cpudata *cpudata)
-{
- struct cpuinfo_x86 *c = &cpu_data(0);
-
- /*
- * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f,
- * the highest performance level is set to 196.
- * https://bugzilla.kernel.org/show_bug.cgi?id=218759
- */
- if (c->x86 == 0x19 && (c->x86_model >= 0x70 && c->x86_model <= 0x7f))
- return CPPC_HIGHEST_PERF_PERFORMANCE;
-
- return CPPC_HIGHEST_PERF_DEFAULT;
-}
-
static int pstate_init_perf(struct amd_cpudata *cpudata)
{
u64 cap1;
- u32 highest_perf;
int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
&cap1);
if (ret)
return ret;
- /* For platforms that do not support the preferred core feature, the
- * highest_pef may be configured with 166 or 255, to avoid max frequency
- * calculated wrongly. we take the AMD_CPPC_HIGHEST_PERF(cap1) value as
- * the default max perf.
- */
- if (cpudata->hw_prefcore)
- highest_perf = amd_pstate_highest_perf_set(cpudata);
- else
- highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
-
- WRITE_ONCE(cpudata->highest_perf, highest_perf);
- WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
+ WRITE_ONCE(cpudata->highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
+ WRITE_ONCE(cpudata->max_limit_perf, AMD_CPPC_HIGHEST_PERF(cap1));
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
@@ -420,19 +388,13 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
static int cppc_init_perf(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
- u32 highest_perf;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
- if (cpudata->hw_prefcore)
- highest_perf = amd_pstate_highest_perf_set(cpudata);
- else
- highest_perf = cppc_perf.highest_perf;
-
- WRITE_ONCE(cpudata->highest_perf, highest_perf);
- WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
+ WRITE_ONCE(cpudata->highest_perf, cppc_perf.highest_perf);
+ WRITE_ONCE(cpudata->max_limit_perf, cppc_perf.highest_perf);
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
cppc_perf.lowest_nonlinear_perf);
@@ -554,12 +516,15 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
}
if (value == prev)
- return;
+ goto cpufreq_policy_put;
WRITE_ONCE(cpudata->cppc_req_cached, value);
amd_pstate_update_perf(cpudata, min_perf, des_perf,
max_perf, fast_switch);
+
+cpufreq_policy_put:
+ cpufreq_cpu_put(policy);
}
static int amd_pstate_verify(struct cpufreq_policy_data *policy)
@@ -656,7 +621,12 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
unsigned long max_perf, min_perf, des_perf,
cap_perf, lowest_nonlinear_perf;
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
+
+ if (!policy)
+ return;
+
+ cpudata = policy->driver_data;
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
@@ -692,7 +662,7 @@ static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
struct amd_cpudata *cpudata = policy->driver_data;
struct cppc_perf_ctrls perf_ctrls;
u32 highest_perf, nominal_perf, nominal_freq, max_freq;
- int ret;
+ int ret = 0;
highest_perf = READ_ONCE(cpudata->highest_perf);
nominal_perf = READ_ONCE(cpudata->nominal_perf);
@@ -803,66 +773,22 @@ static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
}
static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
-/*
- * Get the highest performance register value.
- * @cpu: CPU from which to get highest performance.
- * @highest_perf: Return address.
- *
- * Return: 0 for success, -EIO otherwise.
- */
-static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
-{
- int ret;
-
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- u64 cap1;
-
- ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
- if (ret)
- return ret;
- WRITE_ONCE(*highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
- } else {
- u64 cppc_highest_perf;
-
- ret = cppc_get_highest_perf(cpu, &cppc_highest_perf);
- if (ret)
- return ret;
- WRITE_ONCE(*highest_perf, cppc_highest_perf);
- }
-
- return (ret);
-}
-
#define CPPC_MAX_PERF U8_MAX
static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
{
- int ret, prio;
- u32 highest_perf;
-
- ret = amd_pstate_get_highest_perf(cpudata->cpu, &highest_perf);
- if (ret)
+ /* user disabled or not detected */
+ if (!amd_pstate_prefcore)
return;
cpudata->hw_prefcore = true;
- /* check if CPPC preferred core feature is enabled*/
- if (highest_perf < CPPC_MAX_PERF)
- prio = (int)highest_perf;
- else {
- pr_debug("AMD CPPC preferred core is unsupported!\n");
- cpudata->hw_prefcore = false;
- return;
- }
-
- if (!amd_pstate_prefcore)
- return;
/*
* The priorities can be set regardless of whether or not
* sched_set_itmt_support(true) has been called and it is valid to
* update them at any time after it has been called.
*/
- sched_set_itmt_core_prio(prio, cpudata->cpu);
+ sched_set_itmt_core_prio((int)READ_ONCE(cpudata->highest_perf), cpudata->cpu);
schedule_work(&sched_prefcore_work);
}
@@ -870,22 +796,27 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
static void amd_pstate_update_limits(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
u32 prev_high = 0, cur_high = 0;
int ret;
bool highest_perf_changed = false;
- mutex_lock(&amd_pstate_driver_lock);
- if ((!amd_pstate_prefcore) || (!cpudata->hw_prefcore))
- goto free_cpufreq_put;
+ if (!policy)
+ return;
+
+ cpudata = policy->driver_data;
+
+ if (!amd_pstate_prefcore)
+ return;
- ret = amd_pstate_get_highest_perf(cpu, &cur_high);
+ mutex_lock(&amd_pstate_driver_lock);
+ ret = amd_get_highest_perf(cpu, &cur_high);
if (ret)
goto free_cpufreq_put;
prev_high = READ_ONCE(cpudata->prefcore_ranking);
- if (prev_high != cur_high) {
- highest_perf_changed = true;
+ highest_perf_changed = (prev_high != cur_high);
+ if (highest_perf_changed) {
WRITE_ONCE(cpudata->prefcore_ranking, cur_high);
if (cur_high < CPPC_MAX_PERF)
@@ -949,8 +880,8 @@ static u32 amd_pstate_get_transition_latency(unsigned int cpu)
static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
{
int ret;
- u32 min_freq;
- u32 highest_perf, max_freq;
+ u32 min_freq, max_freq;
+ u64 numerator;
u32 nominal_perf, nominal_freq;
u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
u32 boost_ratio, lowest_nonlinear_ratio;
@@ -972,8 +903,10 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
nominal_perf = READ_ONCE(cpudata->nominal_perf);
- highest_perf = READ_ONCE(cpudata->highest_perf);
- boost_ratio = div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
+ ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
+ if (ret)
+ return ret;
+ boost_ratio = div_u64(numerator << SCHED_CAPACITY_SHIFT, nominal_perf);
max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
@@ -1028,12 +961,12 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
cpudata->cpu = policy->cpu;
- amd_pstate_init_prefcore(cpudata);
-
ret = amd_pstate_init_perf(cpudata);
if (ret)
goto free_cpudata1;
+ amd_pstate_init_prefcore(cpudata);
+
ret = amd_pstate_init_freq(cpudata);
if (ret)
goto free_cpudata1;
@@ -1349,7 +1282,7 @@ static ssize_t amd_pstate_show_status(char *buf)
return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);
}
-static int amd_pstate_update_status(const char *buf, size_t size)
+int amd_pstate_update_status(const char *buf, size_t size)
{
int mode_idx;
@@ -1366,6 +1299,7 @@ static int amd_pstate_update_status(const char *buf, size_t size)
return 0;
}
+EXPORT_SYMBOL_GPL(amd_pstate_update_status);
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1483,12 +1417,12 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
cpudata->cpu = policy->cpu;
cpudata->epp_policy = 0;
- amd_pstate_init_prefcore(cpudata);
-
ret = amd_pstate_init_perf(cpudata);
if (ret)
goto free_cpudata1;
+ amd_pstate_init_prefcore(cpudata);
+
ret = amd_pstate_init_freq(cpudata);
if (ret)
goto free_cpudata1;
@@ -1555,7 +1489,7 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
pr_debug("CPU %d exiting\n", policy->cpu);
}
-static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
+static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
@@ -1605,7 +1539,7 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
* This return value can only be negative for shared_memory
* systems where EPP register read/write not supported.
*/
- return;
+ return epp;
}
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
@@ -1618,12 +1552,13 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
}
WRITE_ONCE(cpudata->cppc_req_cached, value);
- amd_pstate_set_epp(cpudata, epp);
+ return amd_pstate_set_epp(cpudata, epp);
}
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
if (!policy->cpuinfo.max_freq)
return -ENODEV;
@@ -1633,7 +1568,9 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
cpudata->policy = policy->policy;
- amd_pstate_epp_update_limit(policy);
+ ret = amd_pstate_epp_update_limit(policy);
+ if (ret)
+ return ret;
/*
* policy->cur is never updated with the amd_pstate_epp driver, but it
@@ -1834,20 +1771,34 @@ static bool amd_cppc_supported(void)
}
/*
- * If the CPPC feature is disabled in the BIOS for processors that support MSR-based CPPC,
- * the AMD Pstate driver may not function correctly.
- * Check the CPPC flag and display a warning message if the platform supports CPPC.
- * Note: below checking code will not abort the driver registeration process because of
- * the code is added for debugging purposes.
+ * If the CPPC feature is disabled in the BIOS for processors
+ * that support MSR-based CPPC, the AMD Pstate driver may not
+ * function correctly.
+ *
+ * For such processors, check the CPPC flag and display a
+ * warning message if the platform supports CPPC.
+ *
+ * Note: The code check below will not abort the driver
+ * registration process because of the code is added for
+ * debugging purposes. Besides, it may still be possible for
+ * the driver to work using the shared-memory mechanism.
*/
if (!cpu_feature_enabled(X86_FEATURE_CPPC)) {
- if (cpu_feature_enabled(X86_FEATURE_ZEN1) || cpu_feature_enabled(X86_FEATURE_ZEN2)) {
- if (c->x86_model > 0x60 && c->x86_model < 0xaf)
+ if (cpu_feature_enabled(X86_FEATURE_ZEN2)) {
+ switch (c->x86_model) {
+ case 0x60 ... 0x6F:
+ case 0x80 ... 0xAF:
warn = true;
- } else if (cpu_feature_enabled(X86_FEATURE_ZEN3) || cpu_feature_enabled(X86_FEATURE_ZEN4)) {
- if ((c->x86_model > 0x10 && c->x86_model < 0x1F) ||
- (c->x86_model > 0x40 && c->x86_model < 0xaf))
+ break;
+ }
+ } else if (cpu_feature_enabled(X86_FEATURE_ZEN3) ||
+ cpu_feature_enabled(X86_FEATURE_ZEN4)) {
+ switch (c->x86_model) {
+ case 0x10 ... 0x1F:
+ case 0x40 ... 0xAF:
warn = true;
+ break;
+ }
} else if (cpu_feature_enabled(X86_FEATURE_ZEN5)) {
warn = true;
}
@@ -1933,6 +1884,12 @@ static int __init amd_pstate_init(void)
static_call_update(amd_pstate_update_perf, cppc_update_perf);
}
+ if (amd_pstate_prefcore) {
+ ret = amd_detect_prefcore(&amd_pstate_prefcore);
+ if (ret)
+ return ret;
+ }
+
/* enable amd pstate feature */
ret = amd_pstate_enable(true);
if (ret) {
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index cc8bb2bc325a..cd573bc6b6db 100644
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -103,4 +103,18 @@ struct amd_cpudata {
bool boost_state;
};
+/*
+ * enum amd_pstate_mode - driver working mode of amd pstate
+ */
+enum amd_pstate_mode {
+ AMD_PSTATE_UNDEFINED = 0,
+ AMD_PSTATE_DISABLE,
+ AMD_PSTATE_PASSIVE,
+ AMD_PSTATE_ACTIVE,
+ AMD_PSTATE_GUIDED,
+ AMD_PSTATE_MAX,
+};
+const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode);
+int amd_pstate_update_status(const char *buf, size_t size);
+
#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
index af34c22fa273..4dcacab9b4bf 100644
--- a/drivers/cpufreq/apple-soc-cpufreq.c
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -85,7 +85,7 @@ static const struct apple_soc_cpufreq_info soc_default_info = {
.cur_pstate_mask = 0, /* fallback */
};
-static const struct of_device_id apple_soc_cpufreq_of_match[] = {
+static const struct of_device_id apple_soc_cpufreq_of_match[] __maybe_unused = {
{
.compatible = "apple,t8103-cluster-cpufreq",
.data = &soc_t8103_info,
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index ce5a5641b6dd..7a979db81f09 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -132,7 +132,7 @@ static int __init armada_8k_cpufreq_init(void)
int ret = 0, opps_index = 0, cpu, nb_cpus;
struct freq_table *freq_tables;
struct device_node *node;
- struct cpumask cpus;
+ static struct cpumask cpus;
node = of_find_matching_node_and_match(NULL, armada_8k_cpufreq_of_match,
NULL);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index cac379ba006d..18942bfe9c95 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -166,6 +166,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,sm6350", },
{ .compatible = "qcom,sm6375", },
{ .compatible = "qcom,sm7225", },
+ { .compatible = "qcom,sm7325", },
{ .compatible = "qcom,sm8150", },
{ .compatible = "qcom,sm8250", },
{ .compatible = "qcom,sm8350", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 6532c4d71338..983443396f8f 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -69,7 +69,6 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
static const char *find_supply_name(struct device *dev)
{
struct device_node *np __free(device_node) = of_node_get(dev->of_node);
- struct property *pp;
int cpu = dev->id;
/* This must be valid for sure */
@@ -77,14 +76,10 @@ static const char *find_supply_name(struct device *dev)
return NULL;
/* Try "cpu0" for older DTs */
- if (!cpu) {
- pp = of_find_property(np, "cpu0-supply", NULL);
- if (pp)
- return "cpu0";
- }
+ if (!cpu && of_property_present(np, "cpu0-supply"))
+ return "cpu0";
- pp = of_find_property(np, "cpu-supply", NULL);
- if (pp)
+ if (of_property_present(np, "cpu-supply"))
return "cpu";
dev_dbg(dev, "no regulator for cpu%d\n", cpu);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 04fc786dd2c0..f98c9438760c 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -575,30 +575,11 @@ unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
return policy->transition_delay_us;
latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
- if (latency) {
- unsigned int max_delay_us = 2 * MSEC_PER_SEC;
+ if (latency)
+ /* Give a 50% breathing room between updates */
+ return latency + (latency >> 1);
- /*
- * If the platform already has high transition_latency, use it
- * as-is.
- */
- if (latency > max_delay_us)
- return latency;
-
- /*
- * For platforms that can change the frequency very fast (< 2
- * us), the above formula gives a decent transition delay. But
- * for platforms where transition_latency is in milliseconds, it
- * ends up giving unrealistic values.
- *
- * Cap the default transition delay to 2 ms, which seems to be
- * a reasonable amount of time after which we should reevaluate
- * the frequency.
- */
- return min(latency * LATENCY_MULTIPLIER, max_delay_us);
- }
-
- return LATENCY_MULTIPLIER;
+ return USEC_PER_MSEC;
}
EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 392a8000b238..aaea9a39eced 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -16,6 +16,7 @@
#include <linux/tick.h>
#include <linux/slab.h>
#include <linux/sched/cpufreq.h>
+#include <linux/sched/smt.h>
#include <linux/list.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
@@ -215,6 +216,7 @@ struct global_params {
* @hwp_req_cached: Cached value of the last HWP Request MSR
* @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
* @last_io_update: Last time when IO wake flag was set
+ * @capacity_perf: Highest perf used for scale invariance
* @sched_flags: Store scheduler flags for possible cross CPU update
* @hwp_boost_min: Last HWP boosted min performance
* @suspended: Whether or not the driver has been suspended.
@@ -253,6 +255,7 @@ struct cpudata {
u64 hwp_req_cached;
u64 hwp_cap_cached;
u64 last_io_update;
+ unsigned int capacity_perf;
unsigned int sched_flags;
u32 hwp_boost_min;
bool suspended;
@@ -295,6 +298,7 @@ static int hwp_mode_bdw __ro_after_init;
static bool per_cpu_limits __ro_after_init;
static bool hwp_forced __ro_after_init;
static bool hwp_boost __read_mostly;
+static bool hwp_is_hybrid;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
@@ -934,6 +938,139 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
NULL,
};
+static struct cpudata *hybrid_max_perf_cpu __read_mostly;
+/*
+ * Protects hybrid_max_perf_cpu, the capacity_perf fields in struct cpudata,
+ * and the x86 arch scale-invariance information from concurrent updates.
+ */
+static DEFINE_MUTEX(hybrid_capacity_lock);
+
+static void hybrid_set_cpu_capacity(struct cpudata *cpu)
+{
+ arch_set_cpu_capacity(cpu->cpu, cpu->capacity_perf,
+ hybrid_max_perf_cpu->capacity_perf,
+ cpu->capacity_perf,
+ cpu->pstate.max_pstate_physical);
+
+ pr_debug("CPU%d: perf = %u, max. perf = %u, base perf = %d\n", cpu->cpu,
+ cpu->capacity_perf, hybrid_max_perf_cpu->capacity_perf,
+ cpu->pstate.max_pstate_physical);
+}
+
+static void hybrid_clear_cpu_capacity(unsigned int cpunum)
+{
+ arch_set_cpu_capacity(cpunum, 1, 1, 1, 1);
+}
+
+static void hybrid_get_capacity_perf(struct cpudata *cpu)
+{
+ if (READ_ONCE(global.no_turbo)) {
+ cpu->capacity_perf = cpu->pstate.max_pstate_physical;
+ return;
+ }
+
+ cpu->capacity_perf = HWP_HIGHEST_PERF(READ_ONCE(cpu->hwp_cap_cached));
+}
+
+static void hybrid_set_capacity_of_cpus(void)
+{
+ int cpunum;
+
+ for_each_online_cpu(cpunum) {
+ struct cpudata *cpu = all_cpu_data[cpunum];
+
+ if (cpu)
+ hybrid_set_cpu_capacity(cpu);
+ }
+}
+
+static void hybrid_update_cpu_capacity_scaling(void)
+{
+ struct cpudata *max_perf_cpu = NULL;
+ unsigned int max_cap_perf = 0;
+ int cpunum;
+
+ for_each_online_cpu(cpunum) {
+ struct cpudata *cpu = all_cpu_data[cpunum];
+
+ if (!cpu)
+ continue;
+
+ /*
+ * During initialization, CPU performance at full capacity needs
+ * to be determined.
+ */
+ if (!hybrid_max_perf_cpu)
+ hybrid_get_capacity_perf(cpu);
+
+ /*
+ * If hybrid_max_perf_cpu is not NULL at this point, it is
+ * being replaced, so don't take it into account when looking
+ * for the new one.
+ */
+ if (cpu == hybrid_max_perf_cpu)
+ continue;
+
+ if (cpu->capacity_perf > max_cap_perf) {
+ max_cap_perf = cpu->capacity_perf;
+ max_perf_cpu = cpu;
+ }
+ }
+
+ if (max_perf_cpu) {
+ hybrid_max_perf_cpu = max_perf_cpu;
+ hybrid_set_capacity_of_cpus();
+ } else {
+ pr_info("Found no CPUs with nonzero maximum performance\n");
+ /* Revert to the flat CPU capacity structure. */
+ for_each_online_cpu(cpunum)
+ hybrid_clear_cpu_capacity(cpunum);
+ }
+}
+
+static void __hybrid_init_cpu_capacity_scaling(void)
+{
+ hybrid_max_perf_cpu = NULL;
+ hybrid_update_cpu_capacity_scaling();
+}
+
+static void hybrid_init_cpu_capacity_scaling(void)
+{
+ bool disable_itmt = false;
+
+ mutex_lock(&hybrid_capacity_lock);
+
+ /*
+ * If hybrid_max_perf_cpu is set at this point, the hybrid CPU capacity
+ * scaling has been enabled already and the driver is just changing the
+ * operation mode.
+ */
+ if (hybrid_max_perf_cpu) {
+ __hybrid_init_cpu_capacity_scaling();
+ goto unlock;
+ }
+
+ /*
+ * On hybrid systems, use asym capacity instead of ITMT, but because
+ * the capacity of SMT threads is not deterministic even approximately,
+ * do not do that when SMT is in use.
+ */
+ if (hwp_is_hybrid && !sched_smt_active() && arch_enable_hybrid_capacity_scale()) {
+ __hybrid_init_cpu_capacity_scaling();
+ disable_itmt = true;
+ }
+
+unlock:
+ mutex_unlock(&hybrid_capacity_lock);
+
+ /*
+ * Disabling ITMT causes sched domains to be rebuilt to disable asym
+ * packing and enable asym capacity.
+ */
+ if (disable_itmt)
+ sched_clear_itmt_support();
+}
+
static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
{
u64 cap;
@@ -962,6 +1099,43 @@ static void intel_pstate_get_hwp_cap(struct cpudata *cpu)
}
}
+static void hybrid_update_capacity(struct cpudata *cpu)
+{
+ unsigned int max_cap_perf;
+
+ mutex_lock(&hybrid_capacity_lock);
+
+ if (!hybrid_max_perf_cpu)
+ goto unlock;
+
+ /*
+ * The maximum performance of the CPU may have changed, but assume
+ * that the performance of the other CPUs has not changed.
+ */
+ max_cap_perf = hybrid_max_perf_cpu->capacity_perf;
+
+ intel_pstate_get_hwp_cap(cpu);
+
+ hybrid_get_capacity_perf(cpu);
+ /* Should hybrid_max_perf_cpu be replaced by this CPU? */
+ if (cpu->capacity_perf > max_cap_perf) {
+ hybrid_max_perf_cpu = cpu;
+ hybrid_set_capacity_of_cpus();
+ goto unlock;
+ }
+
+ /* If this CPU is hybrid_max_perf_cpu, should it be replaced? */
+ if (cpu == hybrid_max_perf_cpu && cpu->capacity_perf < max_cap_perf) {
+ hybrid_update_cpu_capacity_scaling();
+ goto unlock;
+ }
+
+ hybrid_set_cpu_capacity(cpu);
+
+unlock:
+ mutex_unlock(&hybrid_capacity_lock);
+}
+
static void intel_pstate_hwp_set(unsigned int cpu)
{
struct cpudata *cpu_data = all_cpu_data[cpu];
@@ -1070,6 +1244,22 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+
+ mutex_lock(&hybrid_capacity_lock);
+
+ if (!hybrid_max_perf_cpu) {
+ mutex_unlock(&hybrid_capacity_lock);
+
+ return;
+ }
+
+ if (hybrid_max_perf_cpu == cpu)
+ hybrid_update_cpu_capacity_scaling();
+
+ mutex_unlock(&hybrid_capacity_lock);
+
+ /* Reset the capacity of the CPU going offline to the initial value. */
+ hybrid_clear_cpu_capacity(cpu->cpu);
}
#define POWER_CTL_EE_ENABLE 1
@@ -1165,21 +1355,46 @@ static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
static void intel_pstate_update_limits(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
+ struct cpudata *cpudata;
if (!policy)
return;
- __intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
+ cpudata = all_cpu_data[cpu];
+
+ __intel_pstate_update_max_freq(cpudata, policy);
+
+ /* Prevent the driver from being unregistered now. */
+ mutex_lock(&intel_pstate_driver_lock);
cpufreq_cpu_release(policy);
+
+ hybrid_update_capacity(cpudata);
+
+ mutex_unlock(&intel_pstate_driver_lock);
}
static void intel_pstate_update_limits_for_all(void)
{
int cpu;
- for_each_possible_cpu(cpu)
- intel_pstate_update_limits(cpu);
+ for_each_possible_cpu(cpu) {
+ struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
+
+ if (!policy)
+ continue;
+
+ __intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
+
+ cpufreq_cpu_release(policy);
+ }
+
+ mutex_lock(&hybrid_capacity_lock);
+
+ if (hybrid_max_perf_cpu)
+ __hybrid_init_cpu_capacity_scaling();
+
+ mutex_unlock(&hybrid_capacity_lock);
}
/************************** sysfs begin ************************/
@@ -1618,6 +1833,13 @@ static void intel_pstate_notify_work(struct work_struct *work)
__intel_pstate_update_max_freq(cpudata, policy);
cpufreq_cpu_release(policy);
+
+ /*
+ * The driver will not be unregistered while this function is
+ * running, so update the capacity without acquiring the driver
+ * lock.
+ */
+ hybrid_update_capacity(cpudata);
}
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
@@ -2034,8 +2256,10 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
if (pstate_funcs.get_cpu_scaling) {
cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu);
- if (cpu->pstate.scaling != perf_ctl_scaling)
+ if (cpu->pstate.scaling != perf_ctl_scaling) {
intel_pstate_hybrid_hwp_adjust(cpu);
+ hwp_is_hybrid = true;
+ }
} else {
cpu->pstate.scaling = perf_ctl_scaling;
}
@@ -2425,6 +2649,10 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(INTEL_ICELAKE_X, core_funcs),
X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs),
X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_GRANITERAPIDS_D, core_funcs),
+ X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs),
+ X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs),
{}
};
#endif
@@ -2703,6 +2931,8 @@ static int intel_pstate_cpu_online(struct cpufreq_policy *policy)
*/
intel_pstate_hwp_reenable(cpu);
cpu->suspended = false;
+
+ hybrid_update_capacity(cpu);
}
return 0;
@@ -3143,6 +3373,8 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
global.min_perf_pct = min_perf_pct_min();
+ hybrid_init_cpu_capacity_scaling();
+
return 0;
}
@@ -3405,6 +3637,7 @@ static const struct x86_cpu_id intel_epp_default[] = {
*/
X86_MATCH_VFM(INTEL_ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)),
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
X86_MATCH_VFM(INTEL_METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
179, 64, 16)),
X86_MATCH_VFM(INTEL_ARROWLAKE, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
diff --git a/drivers/cpufreq/loongson3_cpufreq.c b/drivers/cpufreq/loongson3_cpufreq.c
index 5f79b6de127c..6b5e6798d9a2 100644
--- a/drivers/cpufreq/loongson3_cpufreq.c
+++ b/drivers/cpufreq/loongson3_cpufreq.c
@@ -176,7 +176,7 @@ static DEFINE_PER_CPU(struct loongson3_freq_data *, freq_data);
static inline int do_service_request(u32 id, u32 info, u32 cmd, u32 val, u32 extra)
{
int retries;
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu = raw_smp_processor_id();
unsigned int package = cpu_data[cpu].package;
union smc_message msg, last;
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index f9306410a07f..690da85c4865 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -238,4 +238,5 @@ bail_noprops:
module_init(maple_cpufreq_init);
+MODULE_DESCRIPTION("cpufreq driver for Maple 970FX/970MP boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 3a1aadaa723c..663f61565cf7 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -738,7 +738,7 @@ static const struct mtk_cpufreq_platform_data mt8516_platform_data = {
};
/* List of machines supported by this driver */
-static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
+static const struct of_device_id mtk_cpufreq_machines[] __initconst __maybe_unused = {
{ .compatible = "mediatek,mt2701", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt2712", .data = &mt2701_platform_data },
{ .compatible = "mediatek,mt7622", .data = &mt7622_platform_data },
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 3458d5cc9b7f..de8be0a8932d 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -28,9 +28,6 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <asm/smp_plat.h>
-#include <asm/cpu.h>
-
/* OPP tolerance in percentage */
#define OPP_TOLERANCE 4
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index ee925b53b6b9..5fc9cb480516 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -269,5 +269,6 @@ static void __exit pas_cpufreq_exit(void)
module_init(pas_cpufreq_init);
module_exit(pas_cpufreq_exit);
+MODULE_DESCRIPTION("cpufreq driver for PA Semi PWRficient");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>, Olof Johansson <olof@lixom.net>");
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 2cd2b06849a2..74ff6c47df29 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -505,7 +505,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
continue;
if (strcmp(loc, "CPU CLOCK"))
continue;
- if (!of_get_property(hwclock, "platform-get-frequency", NULL))
+ if (!of_property_present(hwclock, "platform-get-frequency"))
continue;
break;
}
@@ -671,4 +671,5 @@ static int __init g5_cpufreq_init(void)
module_init(g5_cpufreq_init);
+MODULE_DESCRIPTION("cpufreq driver for SMU & 970FX based G5 Macs");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 50c62929f7ca..8de759247771 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -692,7 +692,7 @@ static void gpstate_timer_handler(struct timer_list *t)
}
/*
- * If PMCR was last updated was using fast_swtich then
+ * If PMCR was last updated was using fast_switch then
* We may have wrong in gpstate->last_lpstate_idx
* value. Hence, read from PMCR to get correct data.
*/
@@ -1160,5 +1160,6 @@ static void __exit powernv_cpufreq_exit(void)
}
module_exit(powernv_cpufreq_exit);
+MODULE_DESCRIPTION("cpufreq driver for IBM/OpenPOWER powernv systems");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>");
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index 5ee4c7bfdcc5..98595b3ea13f 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -168,5 +168,6 @@ static void __exit cbe_cpufreq_exit(void)
module_init(cbe_cpufreq_init);
module_exit(cbe_cpufreq_exit);
+MODULE_DESCRIPTION("cpufreq driver for Cell BE processors");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 370fe6a0104b..900d6844c43d 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 939702dfa73f..703308fb891a 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -611,7 +611,7 @@ static struct platform_driver qcom_cpufreq_driver = {
},
};
-static const struct of_device_id qcom_cpufreq_match_list[] __initconst = {
+static const struct of_device_id qcom_cpufreq_match_list[] __initconst __maybe_unused = {
{ .compatible = "qcom,apq8096", .data = &match_data_kryo },
{ .compatible = "qcom,msm8909", .data = &match_data_msm8909 },
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 78b875db6b66..d8ab5b01d46d 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -171,10 +171,9 @@ static struct cpufreq_driver spear_cpufreq_driver = {
static int spear_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
- const struct property *prop;
struct cpufreq_frequency_table *freq_tbl;
- const __be32 *val;
- int cnt, i, ret;
+ u32 val;
+ int cnt, ret, i = 0;
np = of_cpu_device_node_get(0);
if (!np) {
@@ -186,26 +185,23 @@ static int spear_cpufreq_probe(struct platform_device *pdev)
&spear_cpufreq.transition_latency))
spear_cpufreq.transition_latency = CPUFREQ_ETERNAL;
- prop = of_find_property(np, "cpufreq_tbl", NULL);
- if (!prop || !prop->value) {
+ cnt = of_property_count_u32_elems(np, "cpufreq_tbl");
+ if (cnt <= 0) {
pr_err("Invalid cpufreq_tbl\n");
ret = -ENODEV;
goto out_put_node;
}
- cnt = prop->length / sizeof(u32);
- val = prop->value;
-
freq_tbl = kcalloc(cnt + 1, sizeof(*freq_tbl), GFP_KERNEL);
if (!freq_tbl) {
ret = -ENOMEM;
goto out_put_node;
}
- for (i = 0; i < cnt; i++)
- freq_tbl[i].frequency = be32_to_cpup(val++);
+ of_property_for_each_u32(np, "cpufreq_tbl", val)
+ freq_tbl[i++].frequency = val;
- freq_tbl[i].frequency = CPUFREQ_TABLE_END;
+ freq_tbl[cnt].frequency = CPUFREQ_TABLE_END;
spear_cpufreq.freq_tbl = freq_tbl;
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index 8e2e703c3865..b15b3142b5fe 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -267,7 +267,7 @@ static int __init sti_cpufreq_init(void)
goto skip_voltage_scaling;
}
- if (!of_get_property(ddata.cpu->of_node, "operating-points-v2", NULL)) {
+ if (!of_property_present(ddata.cpu->of_node, "operating-points-v2")) {
dev_err(ddata.cpu, "OPP-v2 not supported\n");
goto skip_voltage_scaling;
}
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 95ac8d46c156..293921acec93 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -146,7 +146,7 @@ static bool dt_has_supported_hw(void)
return false;
for_each_child_of_node_scoped(np, opp) {
- if (of_find_property(opp, "opp-supported-hw", NULL)) {
+ if (of_property_present(opp, "opp-supported-hw")) {
has_opp_supported_hw = true;
break;
}
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 4d3f27958fbd..ba621ce1cdda 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -16,6 +16,7 @@
#include <linux/pm_opp.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
#define REVISION_MASK 0xF
#define REVISION_SHIFT 28
@@ -90,6 +91,9 @@ struct ti_cpufreq_soc_data {
unsigned long efuse_shift;
unsigned long rev_offset;
bool multi_regulator;
+/* Backward compatibility hack: Might have missing syscon */
+#define TI_QUIRK_SYSCON_MAY_BE_MISSING 0x1
+ u8 quirks;
};
struct ti_cpufreq_data {
@@ -254,6 +258,7 @@ static struct ti_cpufreq_soc_data omap34xx_soc_data = {
.efuse_mask = BIT(3),
.rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
.multi_regulator = false,
+ .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
};
/*
@@ -281,6 +286,7 @@ static struct ti_cpufreq_soc_data omap36xx_soc_data = {
.efuse_mask = BIT(9),
.rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
.multi_regulator = true,
+ .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
};
/*
@@ -295,6 +301,14 @@ static struct ti_cpufreq_soc_data am3517_soc_data = {
.efuse_mask = 0,
.rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
.multi_regulator = false,
+ .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING,
+};
+
+static const struct soc_device_attribute k3_cpufreq_soc[] = {
+ { .family = "AM62X", .revision = "SR1.0" },
+ { .family = "AM62AX", .revision = "SR1.0" },
+ { .family = "AM62PX", .revision = "SR1.0" },
+ { /* sentinel */ }
};
static struct ti_cpufreq_soc_data am625_soc_data = {
@@ -340,7 +354,7 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset,
&efuse);
- if (ret == -EIO) {
+ if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) {
/* not a syscon register! */
void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
opp_data->soc_data->efuse_offset, 4);
@@ -378,10 +392,20 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
struct device *dev = opp_data->cpu_dev;
u32 revision;
int ret;
+ if (soc_device_match(k3_cpufreq_soc)) {
+ /*
+ * Since the SR is 1.0, hard code the revision_value as
+ * 0x1 here. This way we avoid re using the same register
+ * that is giving us required information inside socinfo
+ * anyway.
+ */
+ *revision_value = 0x1;
+ goto done;
+ }
ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset,
&revision);
- if (ret == -EIO) {
+ if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) {
/* not a syscon register! */
void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
opp_data->soc_data->rev_offset, 4);
@@ -400,6 +424,7 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
*revision_value = BIT((revision >> REVISION_SHIFT) & REVISION_MASK);
+done:
return 0;
}
@@ -419,7 +444,7 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data)
return 0;
}
-static const struct of_device_id ti_cpufreq_of_match[] = {
+static const struct of_device_id ti_cpufreq_of_match[] __maybe_unused = {
{ .compatible = "ti,am33xx", .data = &am3x_soc_data, },
{ .compatible = "ti,am3517", .data = &am3517_soc_data, },
{ .compatible = "ti,am43", .data = &am4x_soc_data, },