diff options
author | Olof Johansson <olof@lixom.net> | 2012-09-05 07:12:41 +0200 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2012-09-05 07:12:41 +0200 |
commit | cd754736097fa6ce03b3543ec111f7a13a0dd2f2 (patch) | |
tree | 0e20b4f55f588dac96d311bc0c514b8d07846d90 /arch/arm/include/asm | |
parent | Merge branch 'marco-prepare' of git://gitorious.org/sirfprima2-kernel/sirfpri... (diff) | |
parent | ARM: perf: move irq registration into pmu implementation (diff) | |
download | linux-cd754736097fa6ce03b3543ec111f7a13a0dd2f2.tar.xz linux-cd754736097fa6ce03b3543ec111f7a13a0dd2f2.zip |
Merge tag 'arm-perf-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into next/cleanup
From Will Deacon:
Bunch of perf updates for the ARM backend that pave the way for
big.LITTLE support in the future. The separation of CPU and PMU code
is also the start of being able to move some of this stuff under
drivers/.
* tag 'arm-perf-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux:
ARM: perf: move irq registration into pmu implementation
ARM: perf: move CPU-specific PMU handling code into separate file
ARM: perf: prepare for moving CPU PMU code into separate file
ARM: perf: probe devicetree in preference to current CPU
ARM: perf: remove mysterious compiler barrier
ARM: pmu: remove arm_pmu_type enumeration
ARM: pmu: remove unused reservation mechanism
ARM: perf: add devicetree bindings for 11MPcore, A5, A7 and A15 PMUs
ARM: PMU: Add runtime PM Support
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r-- | arch/arm/include/asm/perf_event.h | 9 | ||||
-rw-r--r-- | arch/arm/include/asm/pmu.h | 77 |
2 files changed, 32 insertions, 54 deletions
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index e074948d8143..625cd621a436 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -12,6 +12,13 @@ #ifndef __ARM_PERF_EVENT_H__ #define __ARM_PERF_EVENT_H__ -/* Nothing to see here... */ +/* + * The ARMv7 CPU PMU supports up to 32 event counters. + */ +#define ARMPMU_MAX_HWEVENTS 32 + +#define HW_OP_UNSUPPORTED 0xFFFF +#define C(_x) PERF_COUNT_HW_CACHE_##_x +#define CACHE_OP_UNSUPPORTED 0xFFFF #endif /* __ARM_PERF_EVENT_H__ */ diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 4432305f4a2a..a26170dce02e 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -16,69 +16,30 @@ #include <linux/perf_event.h> /* - * Types of PMUs that can be accessed directly and require mutual - * exclusion between profiling tools. - */ -enum arm_pmu_type { - ARM_PMU_DEVICE_CPU = 0, - ARM_NUM_PMU_DEVICES, -}; - -/* * struct arm_pmu_platdata - ARM PMU platform data * * @handle_irq: an optional handler which will be called from the * interrupt and passed the address of the low level handler, * and can be used to implement any platform specific handling * before or after calling it. - * @enable_irq: an optional handler which will be called after - * request_irq and be used to handle some platform specific - * irq enablement - * @disable_irq: an optional handler which will be called before - * free_irq and be used to handle some platform specific - * irq disablement + * @runtime_resume: an optional handler which will be called by the + * runtime PM framework following a call to pm_runtime_get(). + * Note that if pm_runtime_get() is called more than once in + * succession this handler will only be called once. + * @runtime_suspend: an optional handler which will be called by the + * runtime PM framework following a call to pm_runtime_put(). + * Note that if pm_runtime_get() is called more than once in + * succession this handler will only be called following the + * final call to pm_runtime_put() that actually disables the + * hardware. */ struct arm_pmu_platdata { irqreturn_t (*handle_irq)(int irq, void *dev, irq_handler_t pmu_handler); - void (*enable_irq)(int irq); - void (*disable_irq)(int irq); + int (*runtime_resume)(struct device *dev); + int (*runtime_suspend)(struct device *dev); }; -#ifdef CONFIG_CPU_HAS_PMU - -/** - * reserve_pmu() - reserve the hardware performance counters - * - * Reserve the hardware performance counters in the system for exclusive use. - * Returns 0 on success or -EBUSY if the lock is already held. - */ -extern int -reserve_pmu(enum arm_pmu_type type); - -/** - * release_pmu() - Relinquish control of the performance counters - * - * Release the performance counters and allow someone else to use them. - */ -extern void -release_pmu(enum arm_pmu_type type); - -#else /* CONFIG_CPU_HAS_PMU */ - -#include <linux/err.h> - -static inline int -reserve_pmu(enum arm_pmu_type type) -{ - return -ENODEV; -} - -static inline void -release_pmu(enum arm_pmu_type type) { } - -#endif /* CONFIG_CPU_HAS_PMU */ - #ifdef CONFIG_HW_PERF_EVENTS /* The events for a given PMU register set. */ @@ -103,7 +64,6 @@ struct pmu_hw_events { struct arm_pmu { struct pmu pmu; - enum arm_pmu_type type; cpumask_t active_irqs; char *name; irqreturn_t (*handle_irq)(int irq_num, void *dev); @@ -118,6 +78,8 @@ struct arm_pmu { void (*start)(void); void (*stop)(void); void (*reset)(void *); + int (*request_irq)(irq_handler_t handler); + void (*free_irq)(void); int (*map_event)(struct perf_event *event); int num_events; atomic_t active_events; @@ -129,7 +91,9 @@ struct arm_pmu { #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) -int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type); +extern const struct dev_pm_ops armpmu_dev_pm_ops; + +int armpmu_register(struct arm_pmu *armpmu, char *name, int type); u64 armpmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, @@ -139,6 +103,13 @@ int armpmu_event_set_period(struct perf_event *event, struct hw_perf_event *hwc, int idx); +int armpmu_map_event(struct perf_event *event, + const unsigned (*event_map)[PERF_COUNT_HW_MAX], + const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX], + u32 raw_event_mask); + #endif /* CONFIG_HW_PERF_EVENTS */ #endif /* __ARM_PMU_H__ */ |