summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/perf_event.c
diff options
context:
space:
mode:
authorAndrew Murray <andrew.murray@arm.com>2019-04-09 21:22:13 +0200
committerMarc Zyngier <marc.zyngier@arm.com>2019-04-24 16:35:58 +0200
commitd1947bc4bc63e56014bb4d812e0db89944ed4a0f (patch)
tree61fe62bd264ccd2f02c460fd7929a9b11dd82ef1 /arch/arm64/kernel/perf_event.c
parentarm64: KVM: Add accessors to track guest/host only counters (diff)
downloadlinux-d1947bc4bc63e56014bb4d812e0db89944ed4a0f.tar.xz
linux-d1947bc4bc63e56014bb4d812e0db89944ed4a0f.zip
arm64: arm_pmu: Add !VHE support for exclude_host/exclude_guest attributes
Add support for the :G and :H attributes in perf by handling the exclude_host/exclude_guest event attributes. We notify KVM of counters that we wish to be enabled or disabled on guest entry/exit and thus defer from starting or stopping events based on their event attributes. With !VHE we switch the counters between host/guest at EL2. We are able to eliminate counters counting host events on the boundaries of guest entry/exit when using :G by filtering out EL2 for exclude_host. When using !exclude_hv there is a small blackout window at the guest entry/exit where host events are not captured. Signed-off-by: Andrew Murray <andrew.murray@arm.com> Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm64/kernel/perf_event.c')
-rw-r--r--arch/arm64/kernel/perf_event.c43
1 files changed, 36 insertions, 7 deletions
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index cccf4fc86d67..6bb28aaf5aea 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -26,6 +26,7 @@
#include <linux/acpi.h>
#include <linux/clocksource.h>
+#include <linux/kvm_host.h>
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
@@ -528,11 +529,21 @@ static inline int armv8pmu_enable_counter(int idx)
static inline void armv8pmu_enable_event_counter(struct perf_event *event)
{
+ struct perf_event_attr *attr = &event->attr;
int idx = event->hw.idx;
+ u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
- armv8pmu_enable_counter(idx);
if (armv8pmu_event_is_chained(event))
- armv8pmu_enable_counter(idx - 1);
+ counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
+
+ kvm_set_pmu_events(counter_bits, attr);
+
+ /* We rely on the hypervisor switch code to enable guest counters */
+ if (!kvm_pmu_counter_deferred(attr)) {
+ armv8pmu_enable_counter(idx);
+ if (armv8pmu_event_is_chained(event))
+ armv8pmu_enable_counter(idx - 1);
+ }
}
static inline int armv8pmu_disable_counter(int idx)
@@ -545,11 +556,21 @@ static inline int armv8pmu_disable_counter(int idx)
static inline void armv8pmu_disable_event_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
+ struct perf_event_attr *attr = &event->attr;
int idx = hwc->idx;
+ u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
if (armv8pmu_event_is_chained(event))
- armv8pmu_disable_counter(idx - 1);
- armv8pmu_disable_counter(idx);
+ counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
+
+ kvm_clr_pmu_events(counter_bits);
+
+ /* We rely on the hypervisor switch code to disable guest counters */
+ if (!kvm_pmu_counter_deferred(attr)) {
+ if (armv8pmu_event_is_chained(event))
+ armv8pmu_disable_counter(idx - 1);
+ armv8pmu_disable_counter(idx);
+ }
}
static inline int armv8pmu_enable_intens(int idx)
@@ -829,11 +850,16 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
if (!attr->exclude_kernel)
config_base |= ARMV8_PMU_INCLUDE_EL2;
} else {
- if (attr->exclude_kernel)
- config_base |= ARMV8_PMU_EXCLUDE_EL1;
- if (!attr->exclude_hv)
+ if (!attr->exclude_hv && !attr->exclude_host)
config_base |= ARMV8_PMU_INCLUDE_EL2;
}
+
+ /*
+ * Filter out !VHE kernels and guest kernels
+ */
+ if (attr->exclude_kernel)
+ config_base |= ARMV8_PMU_EXCLUDE_EL1;
+
if (attr->exclude_user)
config_base |= ARMV8_PMU_EXCLUDE_EL0;
@@ -863,6 +889,9 @@ static void armv8pmu_reset(void *info)
armv8pmu_disable_intens(idx);
}
+ /* Clear the counters we flip at guest entry/exit */
+ kvm_clr_pmu_events(U32_MAX);
+
/*
* Initialize & Reset PMNC. Request overflow interrupt for
* 64 bit cycle counter but cheat in armv8pmu_write_counter().