summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_perf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_perf.c')
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c582
1 files changed, 466 insertions, 116 deletions
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 0defbb43ceea..0dd597a7a11f 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -204,10 +204,12 @@
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_clock_utils.h"
+#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_lrc.h"
#include "gt/intel_lrc_reg.h"
#include "gt/intel_ring.h"
+#include "gt/uc/intel_guc_slpc.h"
#include "i915_drv.h"
#include "i915_file_private.h"
@@ -286,6 +288,7 @@ static u32 i915_perf_stream_paranoid = true;
#define OAREPORT_REASON_CTX_SWITCH (1<<3)
#define OAREPORT_REASON_CLK_RATIO (1<<5)
+#define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
/* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
*
@@ -320,6 +323,8 @@ static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A12] = { 0, 64 },
[I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
+ [I915_OAR_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
+ [I915_OA_FORMAT_A24u40_A14u32_B8_C8] = { 5, 256 },
};
#define SAMPLE_OA_REPORT (1<<0)
@@ -462,7 +467,7 @@ static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
{
u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
- int report_size = stream->oa_buffer.format_size;
+ int report_size = stream->oa_buffer.format->size;
unsigned long flags;
bool pollin;
u32 hw_tail;
@@ -599,7 +604,7 @@ static int append_oa_sample(struct i915_perf_stream *stream,
size_t *offset,
const u8 *report)
{
- int report_size = stream->oa_buffer.format_size;
+ int report_size = stream->oa_buffer.format->size;
struct drm_i915_perf_record_header header;
header.type = DRM_I915_PERF_RECORD_SAMPLE;
@@ -649,14 +654,13 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
size_t *offset)
{
struct intel_uncore *uncore = stream->uncore;
- int report_size = stream->oa_buffer.format_size;
+ int report_size = stream->oa_buffer.format->size;
u8 *oa_buf_base = stream->oa_buffer.vaddr;
u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
u32 mask = (OA_BUFFER_SIZE - 1);
size_t start_offset = *offset;
unsigned long flags;
u32 head, tail;
- u32 taken;
int ret = 0;
if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
@@ -692,7 +696,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
for (/* none */;
- (taken = OA_TAKEN(tail, head));
+ OA_TAKEN(tail, head);
head = (head + report_size) & mask) {
u8 *report = oa_buf_base + head;
u32 *report32 = (void *)report;
@@ -774,7 +778,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* switches since it's not-uncommon for periodic samples to
* identify a switch before any 'context switch' report.
*/
- if (!stream->perf->exclusive_stream->ctx ||
+ if (!stream->ctx ||
stream->specific_ctx_id == ctx_id ||
stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
reason & OAREPORT_REASON_CTX_SWITCH) {
@@ -783,7 +787,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* While filtering for a single context we avoid
* leaking the IDs of other contexts.
*/
- if (stream->perf->exclusive_stream->ctx &&
+ if (stream->ctx &&
stream->specific_ctx_id != ctx_id) {
report32[2] = INVALID_CTX_ID;
}
@@ -943,14 +947,13 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
size_t *offset)
{
struct intel_uncore *uncore = stream->uncore;
- int report_size = stream->oa_buffer.format_size;
+ int report_size = stream->oa_buffer.format->size;
u8 *oa_buf_base = stream->oa_buffer.vaddr;
u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
u32 mask = (OA_BUFFER_SIZE - 1);
size_t start_offset = *offset;
unsigned long flags;
u32 head, tail;
- u32 taken;
int ret = 0;
if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
@@ -984,7 +987,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
for (/* none */;
- (taken = OA_TAKEN(tail, head));
+ OA_TAKEN(tail, head);
head = (head + report_size) & mask) {
u8 *report = oa_buf_base + head;
u32 *report32 = (void *)report;
@@ -1233,6 +1236,196 @@ retry:
return stream->pinned_ctx;
}
+static int
+__store_reg_to_mem(struct i915_request *rq, i915_reg_t reg, u32 ggtt_offset)
+{
+ u32 *cs, cmd;
+
+ cmd = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ if (GRAPHICS_VER(rq->engine->i915) >= 8)
+ cmd++;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = cmd;
+ *cs++ = i915_mmio_reg_offset(reg);
+ *cs++ = ggtt_offset;
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int
+__read_reg(struct intel_context *ce, i915_reg_t reg, u32 ggtt_offset)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+
+ err = __store_reg_to_mem(rq, reg, ggtt_offset);
+
+ i915_request_add(rq);
+ if (!err && i915_request_wait(rq, 0, HZ / 2) < 0)
+ err = -ETIME;
+
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int
+gen12_guc_sw_ctx_id(struct intel_context *ce, u32 *ctx_id)
+{
+ struct i915_vma *scratch;
+ u32 *val;
+ int err;
+
+ scratch = __vm_create_scratch_for_read_pinned(&ce->engine->gt->ggtt->vm, 4);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ err = i915_vma_sync(scratch);
+ if (err)
+ goto err_scratch;
+
+ err = __read_reg(ce, RING_EXECLIST_STATUS_HI(ce->engine->mmio_base),
+ i915_ggtt_offset(scratch));
+ if (err)
+ goto err_scratch;
+
+ val = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(val)) {
+ err = PTR_ERR(val);
+ goto err_scratch;
+ }
+
+ *ctx_id = *val;
+ i915_gem_object_unpin_map(scratch->obj);
+
+err_scratch:
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+/*
+ * For execlist mode of submission, pick an unused context id
+ * 0 - (NUM_CONTEXT_TAG -1) are used by other contexts
+ * XXX_MAX_CONTEXT_HW_ID is used by idle context
+ *
+ * For GuC mode of submission read context id from the upper dword of the
+ * EXECLIST_STATUS register. Note that we read this value only once and expect
+ * that the value stays fixed for the entire OA use case. There are cases where
+ * GuC KMD implementation may deregister a context to reuse it's context id, but
+ * we prevent that from happening to the OA context by pinning it.
+ */
+static int gen12_get_render_context_id(struct i915_perf_stream *stream)
+{
+ u32 ctx_id, mask;
+ int ret;
+
+ if (intel_engine_uses_guc(stream->engine)) {
+ ret = gen12_guc_sw_ctx_id(stream->pinned_ctx, &ctx_id);
+ if (ret)
+ return ret;
+
+ mask = ((1U << GEN12_GUC_SW_CTX_ID_WIDTH) - 1) <<
+ (GEN12_GUC_SW_CTX_ID_SHIFT - 32);
+ } else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 50)) {
+ ctx_id = (XEHP_MAX_CONTEXT_HW_ID - 1) <<
+ (XEHP_SW_CTX_ID_SHIFT - 32);
+
+ mask = ((1U << XEHP_SW_CTX_ID_WIDTH) - 1) <<
+ (XEHP_SW_CTX_ID_SHIFT - 32);
+ } else {
+ ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) <<
+ (GEN11_SW_CTX_ID_SHIFT - 32);
+
+ mask = ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) <<
+ (GEN11_SW_CTX_ID_SHIFT - 32);
+ }
+ stream->specific_ctx_id = ctx_id & mask;
+ stream->specific_ctx_id_mask = mask;
+
+ return 0;
+}
+
+static bool oa_find_reg_in_lri(u32 *state, u32 reg, u32 *offset, u32 end)
+{
+ u32 idx = *offset;
+ u32 len = min(MI_LRI_LEN(state[idx]) + idx, end);
+ bool found = false;
+
+ idx++;
+ for (; idx < len; idx += 2) {
+ if (state[idx] == reg) {
+ found = true;
+ break;
+ }
+ }
+
+ *offset = idx;
+ return found;
+}
+
+static u32 oa_context_image_offset(struct intel_context *ce, u32 reg)
+{
+ u32 offset, len = (ce->engine->context_size - PAGE_SIZE) / 4;
+ u32 *state = ce->lrc_reg_state;
+
+ for (offset = 0; offset < len; ) {
+ if (IS_MI_LRI_CMD(state[offset])) {
+ /*
+ * We expect reg-value pairs in MI_LRI command, so
+ * MI_LRI_LEN() should be even, if not, issue a warning.
+ */
+ drm_WARN_ON(&ce->engine->i915->drm,
+ MI_LRI_LEN(state[offset]) & 0x1);
+
+ if (oa_find_reg_in_lri(state, reg, &offset, len))
+ break;
+ } else {
+ offset++;
+ }
+ }
+
+ return offset < len ? offset : U32_MAX;
+}
+
+static int set_oa_ctx_ctrl_offset(struct intel_context *ce)
+{
+ i915_reg_t reg = GEN12_OACTXCONTROL(ce->engine->mmio_base);
+ struct i915_perf *perf = &ce->engine->i915->perf;
+ u32 offset = perf->ctx_oactxctrl_offset;
+
+ /* Do this only once. Failure is stored as offset of U32_MAX */
+ if (offset)
+ goto exit;
+
+ offset = oa_context_image_offset(ce, i915_mmio_reg_offset(reg));
+ perf->ctx_oactxctrl_offset = offset;
+
+ drm_dbg(&ce->engine->i915->drm,
+ "%s oa ctx control at 0x%08x dword offset\n",
+ ce->engine->name, offset);
+
+exit:
+ return offset && offset != U32_MAX ? 0 : -ENODEV;
+}
+
+static bool engine_supports_mi_query(struct intel_engine_cs *engine)
+{
+ return engine->class == RENDER_CLASS;
+}
+
/**
* oa_get_render_ctx_id - determine and hold ctx hw id
* @stream: An i915-perf stream opened for OA metrics
@@ -1246,11 +1439,27 @@ retry:
static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
struct intel_context *ce;
+ int ret = 0;
ce = oa_pin_context(stream);
if (IS_ERR(ce))
return PTR_ERR(ce);
+ if (engine_supports_mi_query(stream->engine)) {
+ /*
+ * We are enabling perf query here. If we don't find the context
+ * offset here, just return an error.
+ */
+ ret = set_oa_ctx_ctrl_offset(ce);
+ if (ret) {
+ intel_context_unpin(ce);
+ drm_err(&stream->perf->i915->drm,
+ "Enabling perf query failed for %s\n",
+ stream->engine->name);
+ return ret;
+ }
+ }
+
switch (GRAPHICS_VER(ce->engine->i915)) {
case 7: {
/*
@@ -1292,24 +1501,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
case 11:
case 12:
- if (GRAPHICS_VER_FULL(ce->engine->i915) >= IP_VER(12, 50)) {
- stream->specific_ctx_id_mask =
- ((1U << XEHP_SW_CTX_ID_WIDTH) - 1) <<
- (XEHP_SW_CTX_ID_SHIFT - 32);
- stream->specific_ctx_id =
- (XEHP_MAX_CONTEXT_HW_ID - 1) <<
- (XEHP_SW_CTX_ID_SHIFT - 32);
- } else {
- stream->specific_ctx_id_mask =
- ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
- /*
- * Pick an unused context id
- * 0 - BITS_PER_LONG are used by other contexts
- * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
- */
- stream->specific_ctx_id =
- (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
- }
+ ret = gen12_get_render_context_id(stream);
break;
default:
@@ -1323,7 +1515,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
stream->specific_ctx_id,
stream->specific_ctx_id_mask);
- return 0;
+ return ret;
}
/**
@@ -1375,8 +1567,9 @@ free_noa_wait(struct i915_perf_stream *stream)
static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
{
struct i915_perf *perf = stream->perf;
+ struct intel_gt *gt = stream->engine->gt;
- if (WARN_ON(stream != perf->exclusive_stream))
+ if (WARN_ON(stream != gt->perf.exclusive_stream))
return;
/*
@@ -1385,11 +1578,20 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
*
* See i915_oa_init_reg_state() and lrc_configure_all_contexts()
*/
- WRITE_ONCE(perf->exclusive_stream, NULL);
+ WRITE_ONCE(gt->perf.exclusive_stream, NULL);
perf->ops.disable_metric_set(stream);
free_oa_buffer(stream);
+ /*
+ * Wa_16011777198:dg2: Unset the override of GUCRC mode to enable rc6.
+ */
+ if (intel_uc_uses_guc_rc(&gt->uc) &&
+ (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)))
+ drm_WARN_ON(&gt->i915->drm,
+ intel_guc_slpc_unset_gucrc_mode(&gt->uc.guc.slpc));
+
intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
intel_engine_pm_put(stream->engine);
@@ -1563,6 +1765,7 @@ static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
static int alloc_oa_buffer(struct i915_perf_stream *stream)
{
struct drm_i915_private *i915 = stream->perf->i915;
+ struct intel_gt *gt = stream->engine->gt;
struct drm_i915_gem_object *bo;
struct i915_vma *vma;
int ret;
@@ -1582,11 +1785,22 @@ static int alloc_oa_buffer(struct i915_perf_stream *stream)
i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
/* PreHSW required 512K alignment, HSW requires 16M */
- vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
+ vma = i915_vma_instance(bo, &gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unref;
}
+
+ /*
+ * PreHSW required 512K alignment.
+ * HSW and onwards, align to requested size of OA buffer.
+ */
+ ret = i915_vma_pin(vma, 0, SZ_16M, PIN_GLOBAL | PIN_HIGH);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Failed to pin OA buffer %d\n", ret);
+ goto err_unref;
+ }
+
stream->oa_buffer.vma = vma;
stream->oa_buffer.vaddr =
@@ -1636,6 +1850,7 @@ static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
static int alloc_noa_wait(struct i915_perf_stream *stream)
{
struct drm_i915_private *i915 = stream->perf->i915;
+ struct intel_gt *gt = stream->engine->gt;
struct drm_i915_gem_object *bo;
struct i915_vma *vma;
const u64 delay_ticks = 0xffffffffffffffff -
@@ -1654,6 +1869,9 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
DELTA_TARGET,
N_CS_GPR
};
+ i915_reg_t mi_predicate_result = HAS_MI_SET_PREDICATE(i915) ?
+ MI_PREDICATE_RESULT_2_ENGINE(base) :
+ MI_PREDICATE_RESULT_1(RENDER_RING_BASE);
bo = i915_gem_object_create_internal(i915, 4096);
if (IS_ERR(bo)) {
@@ -1673,12 +1891,16 @@ retry:
* multiple OA config BOs will have a jump to this address and it
* needs to be fixed during the lifetime of the i915/perf stream.
*/
- vma = i915_gem_object_ggtt_pin_ww(bo, &ww, NULL, 0, 0, PIN_HIGH);
+ vma = i915_vma_instance(bo, &gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_ww;
}
+ ret = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (ret)
+ goto out_ww;
+
batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
if (IS_ERR(batch)) {
ret = PTR_ERR(batch);
@@ -1691,7 +1913,7 @@ retry:
stream, cs, true /* save */, CS_GPR(i),
INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
cs = save_restore_register(
- stream, cs, true /* save */, MI_PREDICATE_RESULT_1(RENDER_RING_BASE),
+ stream, cs, true /* save */, mi_predicate_result,
INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
/* First timestamp snapshot location. */
@@ -1745,7 +1967,10 @@ retry:
*/
*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
- *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1(RENDER_RING_BASE));
+ *cs++ = i915_mmio_reg_offset(mi_predicate_result);
+
+ if (HAS_MI_SET_PREDICATE(i915))
+ *cs++ = MI_SET_PREDICATE | 1;
/* Restart from the beginning if we had timestamps roll over. */
*cs++ = (GRAPHICS_VER(i915) < 8 ?
@@ -1755,6 +1980,9 @@ retry:
*cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
*cs++ = 0;
+ if (HAS_MI_SET_PREDICATE(i915))
+ *cs++ = MI_SET_PREDICATE;
+
/*
* Now add the diff between to previous timestamps and add it to :
* (((1 * << 64) - 1) - delay_ns)
@@ -1782,7 +2010,10 @@ retry:
*/
*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
- *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1(RENDER_RING_BASE));
+ *cs++ = i915_mmio_reg_offset(mi_predicate_result);
+
+ if (HAS_MI_SET_PREDICATE(i915))
+ *cs++ = MI_SET_PREDICATE | 1;
/* Predicate the jump. */
*cs++ = (GRAPHICS_VER(i915) < 8 ?
@@ -1792,13 +2023,16 @@ retry:
*cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
*cs++ = 0;
+ if (HAS_MI_SET_PREDICATE(i915))
+ *cs++ = MI_SET_PREDICATE;
+
/* Restore registers. */
for (i = 0; i < N_CS_GPR; i++)
cs = save_restore_register(
stream, cs, false /* restore */, CS_GPR(i),
INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
cs = save_restore_register(
- stream, cs, false /* restore */, MI_PREDICATE_RESULT_1(RENDER_RING_BASE),
+ stream, cs, false /* restore */, mi_predicate_result,
INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
/* And return to the ring. */
@@ -2283,11 +2517,12 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream,
{
int err;
struct intel_context *ce = stream->pinned_ctx;
- u32 format = stream->oa_buffer.format;
+ u32 format = stream->oa_buffer.format->format;
+ u32 offset = stream->perf->ctx_oactxctrl_offset;
struct flex regs_context[] = {
{
GEN8_OACTXCONTROL,
- stream->perf->ctx_oactxctrl_offset + 1,
+ offset + 1,
active ? GEN8_OA_COUNTER_RESUME : 0,
},
};
@@ -2312,12 +2547,13 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream,
},
};
- /* Modify the context image of pinned context with regs_context*/
+ /* Modify the context image of pinned context with regs_context */
err = intel_context_lock_pinned(ce);
if (err)
return err;
- err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context));
+ err = gen8_modify_context(ce, regs_context,
+ ARRAY_SIZE(regs_context));
intel_context_unlock_pinned(ce);
if (err)
return err;
@@ -2359,10 +2595,11 @@ oa_configure_all_contexts(struct i915_perf_stream *stream,
{
struct drm_i915_private *i915 = stream->perf->i915;
struct intel_engine_cs *engine;
+ struct intel_gt *gt = stream->engine->gt;
struct i915_gem_context *ctx, *cn;
int err;
- lockdep_assert_held(&stream->perf->lock);
+ lockdep_assert_held(&gt->perf.lock);
/*
* The OA register config is setup through the context image. This image
@@ -2442,6 +2679,7 @@ lrc_configure_all_contexts(struct i915_perf_stream *stream,
const struct i915_oa_config *oa_config,
struct i915_active *active)
{
+ u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */
const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
@@ -2452,7 +2690,7 @@ lrc_configure_all_contexts(struct i915_perf_stream *stream,
},
{
GEN8_OACTXCONTROL,
- stream->perf->ctx_oactxctrl_offset + 1,
+ ctx_oactxctrl + 1,
},
{ EU_PERF_CNTL0, ctx_flexeuN(0) },
{ EU_PERF_CNTL1, ctx_flexeuN(1) },
@@ -2540,12 +2778,26 @@ static int
gen12_enable_metric_set(struct i915_perf_stream *stream,
struct i915_active *active)
{
+ struct drm_i915_private *i915 = stream->perf->i915;
struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config;
bool periodic = stream->periodic;
u32 period_exponent = stream->period_exponent;
+ u32 sqcnt1;
int ret;
+ /*
+ * Wa_1508761755:xehpsdv, dg2
+ * EU NOA signals behave incorrectly if EU clock gating is enabled.
+ * Disable thread stall DOP gating and EU DOP gating.
+ */
+ if (IS_XEHPSDV(i915) || IS_DG2(i915)) {
+ intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
+ _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+ intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(GEN12_DISABLE_DOP_GATING));
+ }
+
intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
/* Disable clk ratio reports, like previous Gens. */
_MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
@@ -2563,6 +2815,16 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
: 0);
/*
+ * Initialize Super Queue Internal Cnt Register
+ * Set PMON Enable in order to collect valid metrics.
+ * Enable byets per clock reporting in OA for XEHPSDV onward.
+ */
+ sqcnt1 = GEN12_SQCNT1_PMON_ENABLE |
+ (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0);
+
+ intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1);
+
+ /*
* Update all contexts prior writing the mux configurations as we need
* to make sure all slices/subslices are ON before writing to NOA
* registers.
@@ -2611,6 +2873,19 @@ static void gen11_disable_metric_set(struct i915_perf_stream *stream)
static void gen12_disable_metric_set(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
+ struct drm_i915_private *i915 = stream->perf->i915;
+ u32 sqcnt1;
+
+ /*
+ * Wa_1508761755:xehpsdv, dg2
+ * Enable thread stall DOP gating and EU DOP gating.
+ */
+ if (IS_XEHPSDV(i915) || IS_DG2(i915)) {
+ intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
+ _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
+ intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
+ }
/* Reset all contexts' slices/subslices configurations. */
gen12_configure_all_contexts(stream, NULL, NULL);
@@ -2621,6 +2896,12 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
/* Make sure we disable noa to save power. */
intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
+
+ sqcnt1 = GEN12_SQCNT1_PMON_ENABLE |
+ (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0);
+
+ /* Reset PMON Enable to save power. */
+ intel_uncore_rmw(uncore, GEN12_SQCNT1, sqcnt1, 0);
}
static void gen7_oa_enable(struct i915_perf_stream *stream)
@@ -2630,7 +2911,7 @@ static void gen7_oa_enable(struct i915_perf_stream *stream)
u32 ctx_id = stream->specific_ctx_id;
bool periodic = stream->periodic;
u32 period_exponent = stream->period_exponent;
- u32 report_format = stream->oa_buffer.format;
+ u32 report_format = stream->oa_buffer.format->format;
/*
* Reset buf pointers so we don't forward reports from before now.
@@ -2656,7 +2937,7 @@ static void gen7_oa_enable(struct i915_perf_stream *stream)
static void gen8_oa_enable(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
- u32 report_format = stream->oa_buffer.format;
+ u32 report_format = stream->oa_buffer.format->format;
/*
* Reset buf pointers so we don't forward reports from before now.
@@ -2682,7 +2963,7 @@ static void gen8_oa_enable(struct i915_perf_stream *stream)
static void gen12_oa_enable(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
- u32 report_format = stream->oa_buffer.format;
+ u32 report_format = stream->oa_buffer.format->format;
/*
* If we don't want OA reports from the OA buffer, then we don't even
@@ -2838,6 +3119,30 @@ get_sseu_config(struct intel_sseu *out_sseu,
return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
}
+/*
+ * OA timestamp frequency = CS timestamp frequency in most platforms. On some
+ * platforms OA unit ignores the CTC_SHIFT and the 2 timestamps differ. In such
+ * cases, return the adjusted CS timestamp frequency to the user.
+ */
+u32 i915_perf_oa_timestamp_frequency(struct drm_i915_private *i915)
+{
+ /* Wa_18013179988:dg2 */
+ if (IS_DG2(i915)) {
+ intel_wakeref_t wakeref;
+ u32 reg, shift;
+
+ with_intel_runtime_pm(to_gt(i915)->uncore->rpm, wakeref)
+ reg = intel_uncore_read(to_gt(i915)->uncore, RPM_CONFIG0);
+
+ shift = REG_FIELD_GET(GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK,
+ reg);
+
+ return to_gt(i915)->clock_frequency << (3 - shift);
+ }
+
+ return to_gt(i915)->clock_frequency;
+}
+
/**
* i915_oa_stream_init - validate combined props for OA stream and init
* @stream: An i915 perf stream
@@ -2862,7 +3167,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
{
struct drm_i915_private *i915 = stream->perf->i915;
struct i915_perf *perf = stream->perf;
- int format_size;
+ struct intel_gt *gt;
int ret;
if (!props->engine) {
@@ -2870,6 +3175,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
"OA engine not specified\n");
return -EINVAL;
}
+ gt = props->engine->gt;
/*
* If the sysfs metrics/ directory wasn't registered for some
@@ -2900,7 +3206,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* counter reports and marshal to the appropriate client
* we currently only allow exclusive access
*/
- if (perf->exclusive_stream) {
+ if (gt->perf.exclusive_stream) {
drm_dbg(&stream->perf->i915->drm,
"OA unit already in use\n");
return -EBUSY;
@@ -2917,20 +3223,15 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
stream->sample_size = sizeof(struct drm_i915_perf_record_header);
- format_size = perf->oa_formats[props->oa_format].size;
+ stream->oa_buffer.format = &perf->oa_formats[props->oa_format];
+ if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format->size == 0))
+ return -EINVAL;
stream->sample_flags = props->sample_flags;
- stream->sample_size += format_size;
-
- stream->oa_buffer.format_size = format_size;
- if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0))
- return -EINVAL;
+ stream->sample_size += stream->oa_buffer.format->size;
stream->hold_preemption = props->hold_preemption;
- stream->oa_buffer.format =
- perf->oa_formats[props->oa_format].format;
-
stream->periodic = props->oa_periodic;
if (stream->periodic)
stream->period_exponent = props->oa_period_exponent;
@@ -2974,14 +3275,31 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
intel_engine_pm_get(stream->engine);
intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
+ /*
+ * Wa_16011777198:dg2: GuC resets render as part of the Wa. This causes
+ * OA to lose the configuration state. Prevent this by overriding GUCRC
+ * mode.
+ */
+ if (intel_uc_uses_guc_rc(&gt->uc) &&
+ (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))) {
+ ret = intel_guc_slpc_override_gucrc_mode(&gt->uc.guc.slpc,
+ SLPC_GUCRC_MODE_GUCRC_NO_RC6);
+ if (ret) {
+ drm_dbg(&stream->perf->i915->drm,
+ "Unable to override gucrc mode\n");
+ goto err_config;
+ }
+ }
+
ret = alloc_oa_buffer(stream);
if (ret)
goto err_oa_buf_alloc;
stream->ops = &i915_oa_stream_ops;
- perf->sseu = props->sseu;
- WRITE_ONCE(perf->exclusive_stream, stream);
+ stream->engine->gt->perf.sseu = props->sseu;
+ WRITE_ONCE(gt->perf.exclusive_stream, stream);
ret = i915_perf_stream_enable_sync(stream);
if (ret) {
@@ -2999,11 +3317,12 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
stream->poll_check_timer.function = oa_poll_check_timer_cb;
init_waitqueue_head(&stream->poll_wq);
spin_lock_init(&stream->oa_buffer.ptr_lock);
+ mutex_init(&stream->lock);
return 0;
err_enable:
- WRITE_ONCE(perf->exclusive_stream, NULL);
+ WRITE_ONCE(gt->perf.exclusive_stream, NULL);
perf->ops.disable_metric_set(stream);
free_oa_buffer(stream);
@@ -3033,7 +3352,7 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
return;
/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
- stream = READ_ONCE(engine->i915->perf.exclusive_stream);
+ stream = READ_ONCE(engine->gt->perf.exclusive_stream);
if (stream && GRAPHICS_VER(stream->perf->i915) < 12)
gen8_update_reg_state_unlocked(ce, stream);
}
@@ -3062,7 +3381,6 @@ static ssize_t i915_perf_read(struct file *file,
loff_t *ppos)
{
struct i915_perf_stream *stream = file->private_data;
- struct i915_perf *perf = stream->perf;
size_t offset = 0;
int ret;
@@ -3086,14 +3404,14 @@ static ssize_t i915_perf_read(struct file *file,
if (ret)
return ret;
- mutex_lock(&perf->lock);
+ mutex_lock(&stream->lock);
ret = stream->ops->read(stream, buf, count, &offset);
- mutex_unlock(&perf->lock);
+ mutex_unlock(&stream->lock);
} while (!offset && !ret);
} else {
- mutex_lock(&perf->lock);
+ mutex_lock(&stream->lock);
ret = stream->ops->read(stream, buf, count, &offset);
- mutex_unlock(&perf->lock);
+ mutex_unlock(&stream->lock);
}
/* We allow the poll checking to sometimes report false positive EPOLLIN
@@ -3140,9 +3458,6 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
* &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
* will be woken for new stream data.
*
- * Note: The &perf->lock mutex has been taken to serialize
- * with any non-file-operation driver hooks.
- *
* Returns: any poll events that are ready without sleeping
*/
static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
@@ -3181,12 +3496,11 @@ static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
{
struct i915_perf_stream *stream = file->private_data;
- struct i915_perf *perf = stream->perf;
__poll_t ret;
- mutex_lock(&perf->lock);
+ mutex_lock(&stream->lock);
ret = i915_perf_poll_locked(stream, file, wait);
- mutex_unlock(&perf->lock);
+ mutex_unlock(&stream->lock);
return ret;
}
@@ -3285,9 +3599,6 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
* @cmd: the ioctl request
* @arg: the ioctl data
*
- * Note: The &perf->lock mutex has been taken to serialize
- * with any non-file-operation driver hooks.
- *
* Returns: zero on success or a negative error code. Returns -EINVAL for
* an unknown ioctl request.
*/
@@ -3325,12 +3636,11 @@ static long i915_perf_ioctl(struct file *file,
unsigned long arg)
{
struct i915_perf_stream *stream = file->private_data;
- struct i915_perf *perf = stream->perf;
long ret;
- mutex_lock(&perf->lock);
+ mutex_lock(&stream->lock);
ret = i915_perf_ioctl_locked(stream, cmd, arg);
- mutex_unlock(&perf->lock);
+ mutex_unlock(&stream->lock);
return ret;
}
@@ -3342,7 +3652,7 @@ static long i915_perf_ioctl(struct file *file,
* Frees all resources associated with the given i915 perf @stream, disabling
* any associated data capture in the process.
*
- * Note: The &perf->lock mutex has been taken to serialize
+ * Note: The &gt->perf.lock mutex has been taken to serialize
* with any non-file-operation driver hooks.
*/
static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
@@ -3374,10 +3684,16 @@ static int i915_perf_release(struct inode *inode, struct file *file)
{
struct i915_perf_stream *stream = file->private_data;
struct i915_perf *perf = stream->perf;
+ struct intel_gt *gt = stream->engine->gt;
- mutex_lock(&perf->lock);
+ /*
+ * Within this call, we know that the fd is being closed and we have no
+ * other user of stream->lock. Use the perf lock to destroy the stream
+ * here.
+ */
+ mutex_lock(&gt->perf.lock);
i915_perf_destroy_locked(stream);
- mutex_unlock(&perf->lock);
+ mutex_unlock(&gt->perf.lock);
/* Release the reference the perf stream kept on the driver. */
drm_dev_put(&perf->i915->drm);
@@ -3410,7 +3726,7 @@ static const struct file_operations fops = {
* See i915_perf_ioctl_open() for interface details.
*
* Implements further stream config validation and stream initialization on
- * behalf of i915_perf_open_ioctl() with the &perf->lock mutex
+ * behalf of i915_perf_open_ioctl() with the &gt->perf.lock mutex
* taken to serialize with any non-file-operation driver hooks.
*
* Note: at this point the @props have only been validated in isolation and
@@ -3565,8 +3881,10 @@ err:
static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
{
- return intel_gt_clock_interval_to_ns(to_gt(perf->i915),
- 2ULL << exponent);
+ u64 nom = (2ULL << exponent) * NSEC_PER_SEC;
+ u32 den = i915_perf_oa_timestamp_frequency(perf->i915);
+
+ return div_u64(nom + den - 1, den);
}
static __always_inline bool
@@ -3794,7 +4112,7 @@ static int read_properties_unlocked(struct i915_perf *perf,
* mutex to avoid an awkward lockdep with mmap_lock.
*
* Most of the implementation details are handled by
- * i915_perf_open_ioctl_locked() after taking the &perf->lock
+ * i915_perf_open_ioctl_locked() after taking the &gt->perf.lock
* mutex for serializing with any non-file-operation driver hooks.
*
* Return: A newly opened i915 Perf stream file descriptor or negative
@@ -3805,6 +4123,7 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
{
struct i915_perf *perf = &to_i915(dev)->perf;
struct drm_i915_perf_open_param *param = data;
+ struct intel_gt *gt;
struct perf_open_properties props;
u32 known_open_flags;
int ret;
@@ -3831,9 +4150,11 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- mutex_lock(&perf->lock);
+ gt = props.engine->gt;
+
+ mutex_lock(&gt->perf.lock);
ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
- mutex_unlock(&perf->lock);
+ mutex_unlock(&gt->perf.lock);
return ret;
}
@@ -3849,6 +4170,7 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
void i915_perf_register(struct drm_i915_private *i915)
{
struct i915_perf *perf = &i915->perf;
+ struct intel_gt *gt = to_gt(i915);
if (!perf->i915)
return;
@@ -3857,13 +4179,13 @@ void i915_perf_register(struct drm_i915_private *i915)
* i915_perf_open_ioctl(); considering that we register after
* being exposed to userspace.
*/
- mutex_lock(&perf->lock);
+ mutex_lock(&gt->perf.lock);
perf->metrics_kobj =
kobject_create_and_add("metrics",
&i915->drm.primary->kdev->kobj);
- mutex_unlock(&perf->lock);
+ mutex_unlock(&gt->perf.lock);
}
/**
@@ -3939,6 +4261,11 @@ static const struct i915_range gen12_oa_b_counters[] = {
{}
};
+static const struct i915_range xehp_oa_b_counters[] = {
+ { .start = 0xdc48, .end = 0xdc48 }, /* OAA_ENABLE_REG */
+ { .start = 0xdd00, .end = 0xdd48 }, /* OAG_LCE0_0 - OAA_LENABLE_REG */
+};
+
static const struct i915_range gen7_oa_mux_regs[] = {
{ .start = 0x91b8, .end = 0x91cc }, /* OA_PERFCNT[1-2], OA_PERFMATRIX */
{ .start = 0x9800, .end = 0x9888 }, /* MICRO_BP0_0 - NOA_WRITE */
@@ -4013,6 +4340,12 @@ static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
return reg_in_range_table(addr, gen12_oa_b_counters);
}
+static bool xehp_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
+{
+ return reg_in_range_table(addr, xehp_oa_b_counters) ||
+ reg_in_range_table(addr, gen12_oa_b_counters);
+}
+
static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
return reg_in_range_table(addr, gen12_oa_mux_regs);
@@ -4411,11 +4744,47 @@ static void oa_init_supported_formats(struct i915_perf *perf)
oa_format_add(perf, I915_OA_FORMAT_C4_B8);
break;
+ case INTEL_DG2:
+ oa_format_add(perf, I915_OAR_FORMAT_A32u40_A4u32_B8_C8);
+ oa_format_add(perf, I915_OA_FORMAT_A24u40_A14u32_B8_C8);
+ break;
+
default:
MISSING_CASE(platform);
}
}
+static void i915_perf_init_info(struct drm_i915_private *i915)
+{
+ struct i915_perf *perf = &i915->perf;
+
+ switch (GRAPHICS_VER(i915)) {
+ case 8:
+ perf->ctx_oactxctrl_offset = 0x120;
+ perf->ctx_flexeu0_offset = 0x2ce;
+ perf->gen8_valid_ctx_bit = BIT(25);
+ break;
+ case 9:
+ perf->ctx_oactxctrl_offset = 0x128;
+ perf->ctx_flexeu0_offset = 0x3de;
+ perf->gen8_valid_ctx_bit = BIT(16);
+ break;
+ case 11:
+ perf->ctx_oactxctrl_offset = 0x124;
+ perf->ctx_flexeu0_offset = 0x78e;
+ perf->gen8_valid_ctx_bit = BIT(16);
+ break;
+ case 12:
+ /*
+ * Calculate offset at runtime in oa_pin_context for gen12 and
+ * cache the value in perf->ctx_oactxctrl_offset.
+ */
+ break;
+ default:
+ MISSING_CASE(GRAPHICS_VER(i915));
+ }
+}
+
/**
* i915_perf_init - initialize i915-perf state on module bind
* @i915: i915 device instance
@@ -4429,12 +4798,6 @@ void i915_perf_init(struct drm_i915_private *i915)
{
struct i915_perf *perf = &i915->perf;
- /* XXX const struct i915_perf_ops! */
-
- /* i915_perf is not enabled for DG2 yet */
- if (IS_DG2(i915))
- return;
-
perf->oa_formats = oa_formats;
if (IS_HASWELL(i915)) {
perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
@@ -4454,6 +4817,7 @@ void i915_perf_init(struct drm_i915_private *i915)
* execlist mode by default.
*/
perf->ops.read = gen8_oa_read;
+ i915_perf_init_info(i915);
if (IS_GRAPHICS_VER(i915, 8, 9)) {
perf->ops.is_valid_b_counter_reg =
@@ -4473,18 +4837,6 @@ void i915_perf_init(struct drm_i915_private *i915)
perf->ops.enable_metric_set = gen8_enable_metric_set;
perf->ops.disable_metric_set = gen8_disable_metric_set;
perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
-
- if (GRAPHICS_VER(i915) == 8) {
- perf->ctx_oactxctrl_offset = 0x120;
- perf->ctx_flexeu0_offset = 0x2ce;
-
- perf->gen8_valid_ctx_bit = BIT(25);
- } else {
- perf->ctx_oactxctrl_offset = 0x128;
- perf->ctx_flexeu0_offset = 0x3de;
-
- perf->gen8_valid_ctx_bit = BIT(16);
- }
} else if (GRAPHICS_VER(i915) == 11) {
perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
@@ -4498,13 +4850,10 @@ void i915_perf_init(struct drm_i915_private *i915)
perf->ops.enable_metric_set = gen8_enable_metric_set;
perf->ops.disable_metric_set = gen11_disable_metric_set;
perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
-
- perf->ctx_oactxctrl_offset = 0x124;
- perf->ctx_flexeu0_offset = 0x78e;
-
- perf->gen8_valid_ctx_bit = BIT(16);
} else if (GRAPHICS_VER(i915) == 12) {
perf->ops.is_valid_b_counter_reg =
+ HAS_OA_SLICE_CONTRIB_LIMITS(i915) ?
+ xehp_is_valid_b_counter_addr :
gen12_is_valid_b_counter_addr;
perf->ops.is_valid_mux_reg =
gen12_is_valid_mux_addr;
@@ -4516,14 +4865,15 @@ void i915_perf_init(struct drm_i915_private *i915)
perf->ops.enable_metric_set = gen12_enable_metric_set;
perf->ops.disable_metric_set = gen12_disable_metric_set;
perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
-
- perf->ctx_flexeu0_offset = 0;
- perf->ctx_oactxctrl_offset = 0x144;
}
}
if (perf->ops.enable_metric_set) {
- mutex_init(&perf->lock);
+ struct intel_gt *gt;
+ int i;
+
+ for_each_gt(gt, i915, i)
+ mutex_init(&gt->perf.lock);
/* Choose a representative limit */
oa_sample_rate_hard_limit = to_gt(i915)->clock_frequency / 2;