diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-07-15 16:36:50 +0200 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-07-15 16:36:50 +0200 |
commit | ca6e4405779ed56ebac941570615abd667c72c02 (patch) | |
tree | 1b4fef494bc8d64ed889d52c7aabb4e09e4ffef3 /arch/x86/kvm/pmu.h | |
parent | drm/i915: always disable irqs in intel_pipe_update_start (diff) | |
parent | drm/i915: Do not call intel_crtc_disable if the crtc is already disabled. (diff) | |
download | linux-ca6e4405779ed56ebac941570615abd667c72c02.tar.xz linux-ca6e4405779ed56ebac941570615abd667c72c02.zip |
Merge tag 'drm-intel-fixes-2015-07-15' into drm-intel-next-queued
Backmerge fixes since it's getting out of hand again with the massive
split due to atomic between -next and 4.2-rc. All the bugfixes in
4.2-rc are addressed already (by converting more towards atomic
instead of minimal duct-tape) so just always pick the version in next
for the conflicts in modeset code.
All the other conflicts are just adjacent lines changed.
Conflicts:
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_ringbuffer.h
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'arch/x86/kvm/pmu.h')
-rw-r--r-- | arch/x86/kvm/pmu.h | 118 |
1 files changed, 118 insertions, 0 deletions
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h new file mode 100644 index 000000000000..f96e1f962587 --- /dev/null +++ b/arch/x86/kvm/pmu.h @@ -0,0 +1,118 @@ +#ifndef __KVM_X86_PMU_H +#define __KVM_X86_PMU_H + +#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) +#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) +#define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) + +/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */ +#define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf) + +struct kvm_event_hw_type_mapping { + u8 eventsel; + u8 unit_mask; + unsigned event_type; +}; + +struct kvm_pmu_ops { + unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select, + u8 unit_mask); + unsigned (*find_fixed_event)(int idx); + bool (*pmc_is_enabled)(struct kvm_pmc *pmc); + struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); + struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx); + int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx); + bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); + int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data); + int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); + void (*refresh)(struct kvm_vcpu *vcpu); + void (*init)(struct kvm_vcpu *vcpu); + void (*reset)(struct kvm_vcpu *vcpu); +}; + +static inline u64 pmc_bitmask(struct kvm_pmc *pmc) +{ + struct kvm_pmu *pmu = pmc_to_pmu(pmc); + + return pmu->counter_bitmask[pmc->type]; +} + +static inline u64 pmc_read_counter(struct kvm_pmc *pmc) +{ + u64 counter, enabled, running; + + counter = pmc->counter; + if (pmc->perf_event) + counter += perf_event_read_value(pmc->perf_event, + &enabled, &running); + /* FIXME: Scaling needed? */ + return counter & pmc_bitmask(pmc); +} + +static inline void pmc_stop_counter(struct kvm_pmc *pmc) +{ + if (pmc->perf_event) { + pmc->counter = pmc_read_counter(pmc); + perf_event_release_kernel(pmc->perf_event); + pmc->perf_event = NULL; + } +} + +static inline bool pmc_is_gp(struct kvm_pmc *pmc) +{ + return pmc->type == KVM_PMC_GP; +} + +static inline bool pmc_is_fixed(struct kvm_pmc *pmc) +{ + return pmc->type == KVM_PMC_FIXED; +} + +static inline bool pmc_is_enabled(struct kvm_pmc *pmc) +{ + return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc); +} + +/* returns general purpose PMC with the specified MSR. Note that it can be + * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a + * paramenter to tell them apart. + */ +static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, + u32 base) +{ + if (msr >= base && msr < base + pmu->nr_arch_gp_counters) + return &pmu->gp_counters[msr - base]; + + return NULL; +} + +/* returns fixed PMC with the specified MSR */ +static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) +{ + int base = MSR_CORE_PERF_FIXED_CTR0; + + if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) + return &pmu->fixed_counters[msr - base]; + + return NULL; +} + +void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel); +void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx); +void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx); + +void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); +void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); +int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); +int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx); +bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr); +int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data); +int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); +void kvm_pmu_refresh(struct kvm_vcpu *vcpu); +void kvm_pmu_reset(struct kvm_vcpu *vcpu); +void kvm_pmu_init(struct kvm_vcpu *vcpu); +void kvm_pmu_destroy(struct kvm_vcpu *vcpu); + +extern struct kvm_pmu_ops intel_pmu_ops; +extern struct kvm_pmu_ops amd_pmu_ops; +#endif /* __KVM_X86_PMU_H */ |