summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2023-10-16 12:24:35 +0200
committerCatalin Marinas <catalin.marinas@arm.com>2023-10-16 15:17:03 +0200
commit14567ba42c5747f50584eb463ac8029f2a30e0d1 (patch)
treecef4d460117c1391d0b588ddc828955ba9f54a29
parentarm64: Use build-time assertions for cpucap ordering (diff)
downloadlinux-14567ba42c5747f50584eb463ac8029f2a30e0d1.tar.xz
linux-14567ba42c5747f50584eb463ac8029f2a30e0d1.zip
arm64: Rename SVE/SME cpu_enable functions
The arm64_cpu_capabilities::cpu_enable() callbacks for SVE, SME, SME2, and FA64 are named with an unusual "${feature}_kernel_enable" pattern rather than the much more common "cpu_enable_${feature}". Now that we only use these as cpu_enable() callbacks, it would be nice to have them match the usual scheme. This patch renames the cpu_enable() callbacks to match this scheme. At the same time, the comment above cpu_enable_sve() is removed for consistency with the other cpu_enable() callbacks. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Mark Brown <broonie@kernel.org> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/include/asm/fpsimd.h8
-rw-r--r--arch/arm64/kernel/cpufeature.c8
-rw-r--r--arch/arm64/kernel/fpsimd.c12
3 files changed, 12 insertions, 16 deletions
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index bd7bea92dae0..0cbaa06b394a 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -149,10 +149,10 @@ extern void sme_save_state(void *state, int zt);
extern void sme_load_state(void const *state, int zt);
struct arm64_cpu_capabilities;
-extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
-extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused);
-extern void sme2_kernel_enable(const struct arm64_cpu_capabilities *__unused);
-extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused);
+extern void cpu_enable_sve(const struct arm64_cpu_capabilities *__unused);
+extern void cpu_enable_sme(const struct arm64_cpu_capabilities *__unused);
+extern void cpu_enable_sme2(const struct arm64_cpu_capabilities *__unused);
+extern void cpu_enable_fa64(const struct arm64_cpu_capabilities *__unused);
extern u64 read_zcr_features(void);
extern u64 read_smcr_features(void);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index e3e0d3d3bd6b..fb828f8c49e3 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2425,7 +2425,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.desc = "Scalable Vector Extension",
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_SVE,
- .cpu_enable = sve_kernel_enable,
+ .cpu_enable = cpu_enable_sve,
.matches = has_cpuid_feature,
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, SVE, IMP)
},
@@ -2678,7 +2678,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_SME,
.matches = has_cpuid_feature,
- .cpu_enable = sme_kernel_enable,
+ .cpu_enable = cpu_enable_sme,
ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, IMP)
},
/* FA64 should be sorted after the base SME capability */
@@ -2687,7 +2687,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_SME_FA64,
.matches = has_cpuid_feature,
- .cpu_enable = fa64_kernel_enable,
+ .cpu_enable = cpu_enable_fa64,
ARM64_CPUID_FIELDS(ID_AA64SMFR0_EL1, FA64, IMP)
},
{
@@ -2695,7 +2695,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_SME2,
.matches = has_cpuid_feature,
- .cpu_enable = sme2_kernel_enable,
+ .cpu_enable = cpu_enable_sme2,
ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, SME2)
},
#endif /* CONFIG_ARM64_SME */
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 48338cf8f90b..45ea9cabbaa4 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1160,11 +1160,7 @@ fail:
panic("Cannot allocate percpu memory for EFI SVE save/restore");
}
-/*
- * Enable SVE for EL1.
- * Intended for use by the cpufeatures code during CPU boot.
- */
-void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+void cpu_enable_sve(const struct arm64_cpu_capabilities *__always_unused p)
{
write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
isb();
@@ -1295,7 +1291,7 @@ static void sme_free(struct task_struct *task)
task->thread.sme_state = NULL;
}
-void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+void cpu_enable_sme(const struct arm64_cpu_capabilities *__always_unused p)
{
/* Set priority for all PEs to architecturally defined minimum */
write_sysreg_s(read_sysreg_s(SYS_SMPRI_EL1) & ~SMPRI_EL1_PRIORITY_MASK,
@@ -1310,7 +1306,7 @@ void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
isb();
}
-void sme2_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+void cpu_enable_sme2(const struct arm64_cpu_capabilities *__always_unused p)
{
/* This must be enabled after SME */
BUILD_BUG_ON(ARM64_SME2 <= ARM64_SME);
@@ -1320,7 +1316,7 @@ void sme2_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
SYS_SMCR_EL1);
}
-void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+void cpu_enable_fa64(const struct arm64_cpu_capabilities *__always_unused p)
{
/* This must be enabled after SME */
BUILD_BUG_ON(ARM64_SME_FA64 <= ARM64_SME);