summaryrefslogtreecommitdiffstats
path: root/arch/x86/events
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2021-06-23 14:02:04 +0200
committerBorislav Petkov <bp@suse.de>2021-06-23 18:46:20 +0200
commita75c52896b6d42d6600db4d4dd9f7e4bde9218db (patch)
treef5d969fc7a9b0799e401fcd67f85e81b150807c1 /arch/x86/events
parentx86/fpu: Rename "dynamic" XSTATEs to "independent" (diff)
downloadlinux-a75c52896b6d42d6600db4d4dd9f7e4bde9218db.tar.xz
linux-a75c52896b6d42d6600db4d4dd9f7e4bde9218db.zip
x86/fpu/xstate: Sanitize handling of independent features
The copy functions for the independent features are horribly named and the supervisor and independent part is just overengineered. The point is that the supplied mask has either to be a subset of the independent features or a subset of the task->fpu.xstate managed features. Rewrite it so it checks for invalid overlaps of these areas in the caller supplied feature mask. Rename it so it follows the new naming convention for these operations. Mop up the function documentation. This allows to use that function for other purposes as well. Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Borislav Petkov <bp@suse.de> Tested-by: Kan Liang <kan.liang@linux.intel.com> Link: https://lkml.kernel.org/r/20210623121455.004880675@linutronix.de
Diffstat (limited to 'arch/x86/events')
-rw-r--r--arch/x86/events/intel/lbr.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 2fa2df3d4e9b..f338645071c8 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -491,7 +491,7 @@ static void intel_pmu_arch_lbr_xrstors(void *ctx)
{
struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
- copy_kernel_to_independent_supervisor(&task_ctx->xsave, XFEATURE_MASK_LBR);
+ xrstors(&task_ctx->xsave, XFEATURE_MASK_LBR);
}
static __always_inline bool lbr_is_reset_in_cstate(void *ctx)
@@ -576,7 +576,7 @@ static void intel_pmu_arch_lbr_xsaves(void *ctx)
{
struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
- copy_independent_supervisor_to_kernel(&task_ctx->xsave, XFEATURE_MASK_LBR);
+ xsaves(&task_ctx->xsave, XFEATURE_MASK_LBR);
}
static void __intel_pmu_lbr_save(void *ctx)
@@ -992,7 +992,7 @@ static void intel_pmu_arch_lbr_read_xsave(struct cpu_hw_events *cpuc)
intel_pmu_store_lbr(cpuc, NULL);
return;
}
- copy_independent_supervisor_to_kernel(&xsave->xsave, XFEATURE_MASK_LBR);
+ xsaves(&xsave->xsave, XFEATURE_MASK_LBR);
intel_pmu_store_lbr(cpuc, xsave->lbr.entries);
}