diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 1323 |
1 files changed, 577 insertions, 746 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 68c5af079ef8..cca7792f26d5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -34,6 +34,11 @@ #include "i915_trace.h" #include "intel_drv.h" +/* Rough estimate of the typical request size, performing a flush, + * set-context and then emitting the batch. + */ +#define LEGACY_REQUEST_SIZE 200 + int __intel_ring_space(int head, int tail, int size) { int space = head - tail; @@ -53,18 +58,10 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf) ringbuf->tail, ringbuf->size); } -bool intel_engine_stopped(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->dev->dev_private; - return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine); -} - static void __intel_ring_advance(struct intel_engine_cs *engine) { struct intel_ringbuffer *ringbuf = engine->buffer; ringbuf->tail &= ringbuf->size - 1; - if (intel_engine_stopped(engine)) - return; engine->write_tail(engine, ringbuf->tail); } @@ -101,7 +98,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 flush_domains) { struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; u32 cmd; int ret; @@ -140,7 +136,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, cmd |= MI_EXE_FLUSH; if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && - (IS_G4X(dev) || IS_GEN5(dev))) + (IS_G4X(req->i915) || IS_GEN5(req->i915))) cmd |= MI_INVALIDATE_ISP; ret = intel_ring_begin(req, 2); @@ -426,19 +422,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, static void ring_write_tail(struct intel_engine_cs *engine, u32 value) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; I915_WRITE_TAIL(engine, value); } u64 intel_ring_get_active_head(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u64 acthd; - if (INTEL_INFO(engine->dev)->gen >= 8) + if (INTEL_GEN(dev_priv) >= 8) acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), RING_ACTHD_UDW(engine->mmio_base)); - else if (INTEL_INFO(engine->dev)->gen >= 4) + else if (INTEL_GEN(dev_priv) >= 4) acthd = I915_READ(RING_ACTHD(engine->mmio_base)); else acthd = I915_READ(ACTHD); @@ -448,25 +444,24 @@ u64 intel_ring_get_active_head(struct intel_engine_cs *engine) static void ring_setup_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u32 addr; addr = dev_priv->status_page_dmah->busaddr; - if (INTEL_INFO(engine->dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; I915_WRITE(HWS_PGA, addr); } static void intel_ring_setup_status_page(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; i915_reg_t mmio; /* The ring status page addresses are no longer next to the rest of * the ring registers as of gen7. */ - if (IS_GEN7(dev)) { + if (IS_GEN7(dev_priv)) { switch (engine->id) { case RCS: mmio = RENDER_HWS_PGA_GEN7; @@ -486,7 +481,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) mmio = VEBOX_HWS_PGA_GEN7; break; } - } else if (IS_GEN6(engine->dev)) { + } else if (IS_GEN6(dev_priv)) { mmio = RING_HWS_PGA_GEN6(engine->mmio_base); } else { /* XXX: gen8 returns to sanity */ @@ -503,7 +498,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) * arises: do we still need this and if so how should we go about * invalidating the TLB? */ - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { + if (IS_GEN(dev_priv, 6, 7)) { i915_reg_t reg = RING_INSTPM(engine->mmio_base); /* ring should be idle before issuing a sync flush*/ @@ -512,8 +507,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) I915_WRITE(reg, _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | INSTPM_SYNC_FLUSH)); - if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, - 1000)) + if (intel_wait_for_register(dev_priv, + reg, INSTPM_SYNC_FLUSH, 0, + 1000)) DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", engine->name); } @@ -521,11 +517,15 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) static bool stop_ring(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; - if (!IS_GEN2(engine->dev)) { + if (!IS_GEN2(dev_priv)) { I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); - if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { + if (intel_wait_for_register(dev_priv, + RING_MI_MODE(engine->mmio_base), + MODE_IDLE, + MODE_IDLE, + 1000)) { DRM_ERROR("%s : timed out trying to stop ring\n", engine->name); /* Sometimes we observe that the idle flag is not @@ -541,7 +541,7 @@ static bool stop_ring(struct intel_engine_cs *engine) I915_WRITE_HEAD(engine, 0); engine->write_tail(engine, 0); - if (!IS_GEN2(engine->dev)) { + if (!IS_GEN2(dev_priv)) { (void)I915_READ_CTL(engine); I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); } @@ -556,8 +556,7 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine) static int init_ring_common(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct intel_ringbuffer *ringbuf = engine->buffer; struct drm_i915_gem_object *obj = ringbuf->obj; int ret = 0; @@ -587,7 +586,7 @@ static int init_ring_common(struct intel_engine_cs *engine) } } - if (I915_NEED_GFX_HWS(dev)) + if (I915_NEED_GFX_HWS(dev_priv)) intel_ring_setup_status_page(engine); else ring_setup_phys_status_page(engine); @@ -641,59 +640,42 @@ out: return ret; } -void -intel_fini_pipe_control(struct intel_engine_cs *engine) +void intel_fini_pipe_control(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - if (engine->scratch.obj == NULL) return; - if (INTEL_INFO(dev)->gen >= 5) { - kunmap(sg_page(engine->scratch.obj->pages->sgl)); - i915_gem_object_ggtt_unpin(engine->scratch.obj); - } - + i915_gem_object_ggtt_unpin(engine->scratch.obj); drm_gem_object_unreference(&engine->scratch.obj->base); engine->scratch.obj = NULL; } -int -intel_init_pipe_control(struct intel_engine_cs *engine) +int intel_init_pipe_control(struct intel_engine_cs *engine, int size) { + struct drm_i915_gem_object *obj; int ret; WARN_ON(engine->scratch.obj); - engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096); - if (engine->scratch.obj == NULL) { - DRM_ERROR("Failed to allocate seqno page\n"); - ret = -ENOMEM; + obj = i915_gem_object_create_stolen(&engine->i915->drm, size); + if (!obj) + obj = i915_gem_object_create(&engine->i915->drm, size); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate scratch page\n"); + ret = PTR_ERR(obj); goto err; } - ret = i915_gem_object_set_cache_level(engine->scratch.obj, - I915_CACHE_LLC); - if (ret) - goto err_unref; - - ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0); + ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH); if (ret) goto err_unref; - engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj); - engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl)); - if (engine->scratch.cpu_page == NULL) { - ret = -ENOMEM; - goto err_unpin; - } - + engine->scratch.obj = obj; + engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", engine->name, engine->scratch.gtt_offset); return 0; -err_unpin: - i915_gem_object_ggtt_unpin(engine->scratch.obj); err_unref: drm_gem_object_unreference(&engine->scratch.obj->base); err: @@ -702,11 +684,9 @@ err: static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) { - int ret, i; struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_workarounds *w = &dev_priv->workarounds; + struct i915_workarounds *w = &req->i915->workarounds; + int ret, i; if (w->count == 0) return 0; @@ -795,7 +775,7 @@ static int wa_add(struct drm_i915_private *dev_priv, static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, i915_reg_t reg) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct i915_workarounds *wa = &dev_priv->workarounds; const uint32_t index = wa->hw_whitelist_count[engine->id]; @@ -811,8 +791,7 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, static int gen8_init_workarounds(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); @@ -863,9 +842,8 @@ static int gen8_init_workarounds(struct intel_engine_cs *engine) static int bdw_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen8_init_workarounds(engine); if (ret) @@ -885,16 +863,15 @@ static int bdw_init_workarounds(struct intel_engine_cs *engine) /* WaForceContextSaveRestoreNonCoherent:bdw */ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ - (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); + (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); return 0; } static int chv_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen8_init_workarounds(engine); if (ret) @@ -911,8 +888,7 @@ static int chv_init_workarounds(struct intel_engine_cs *engine) static int gen9_init_workarounds(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; int ret; /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */ @@ -937,14 +913,14 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, GEN9_DG_MIRROR_FIX_ENABLE); /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, GEN9_RHWO_OPTIMIZATION_DISABLE); /* @@ -970,8 +946,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) GEN9_CCS_TLB_PREFETCH_ENABLE); /* WaDisableMaskBasedCammingInRCC:skl,bxt */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, PIXEL_MASK_CAMMING_DISABLE); @@ -1035,8 +1011,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) static int skl_tune_iz_hashing(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u8 vals[3] = { 0, 0, 0 }; unsigned int i; @@ -1077,9 +1052,8 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine) static int skl_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen9_init_workarounds(engine); if (ret) @@ -1090,12 +1064,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) * until D0 which is the default case so this is equivalent to * !WaDisablePerCtxtPreemptionGranularityControl:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) { + if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) { I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); } - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) { /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ I915_WRITE(FF_SLICE_CS_CHICKEN2, _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); @@ -1104,30 +1078,30 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes * involving this register should also be added to WA batch as required. */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_E0)) + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) /* WaDisableLSQCROPERFforOCL:skl */ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_RO_PERF_DIS); /* WaEnableGapsTsvCreditFix:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) { + if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) { I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE)); } /* WaDisablePowerCompilerClockGating:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0)) + if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0)) WA_SET_BIT_MASKED(HIZ_CHICKEN, BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); /* WaBarrierPerformanceFixDisable:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0)) + if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0)) WA_SET_BIT_MASKED(HDC_CHICKEN0, HDC_FENCE_DEST_SLM_DISABLE | HDC_BARRIER_PERFORMANCE_DISABLE); /* WaDisableSbeCacheDispatchPortSharing:skl */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0)) WA_SET_BIT_MASKED( GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); @@ -1135,6 +1109,11 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) /* WaDisableGafsUnitClkGating:skl */ WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + /* WaInPlaceDecompressionHang:skl */ + if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) + WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + /* WaDisableLSQCROPERFforOCL:skl */ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); if (ret) @@ -1145,9 +1124,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) static int bxt_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen9_init_workarounds(engine); if (ret) @@ -1155,11 +1133,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaStoreMultiplePTEenable:bxt */ /* This is a requirement according to Hardware specification */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); /* WaSetClckGatingDisableMedia:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); } @@ -1168,8 +1146,14 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); + /* WaDisablePooledEuLoadBalancingFix:bxt */ + if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { + WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2, + GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); + } + /* WaDisableSbeCacheDispatchPortSharing:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) { WA_SET_BIT_MASKED( GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); @@ -1179,7 +1163,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ /* WaDisableLSQCROPERFforOCL:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1); if (ret) return ret; @@ -1189,17 +1173,27 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) return ret; } + /* WaProgramL3SqcReg1DefaultForPerf:bxt */ + if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) + I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | + L3_HIGH_PRIO_CREDITS(2)); + /* WaInsertDummyPushConstPs:bxt */ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + /* WaInPlaceDecompressionHang:bxt */ + if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) + WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + return 0; } static int kbl_init_workarounds(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; int ret; ret = gen9_init_workarounds(engine); @@ -1241,6 +1235,10 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); + /* WaInPlaceDecompressionHang:kbl */ + WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + /* WaDisableLSQCROPERFforOCL:kbl */ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); if (ret) @@ -1251,24 +1249,23 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) int init_workarounds_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; WARN_ON(engine->id != RCS); dev_priv->workarounds.count = 0; dev_priv->workarounds.hw_whitelist_count[RCS] = 0; - if (IS_BROADWELL(dev)) + if (IS_BROADWELL(dev_priv)) return bdw_init_workarounds(engine); - if (IS_CHERRYVIEW(dev)) + if (IS_CHERRYVIEW(dev_priv)) return chv_init_workarounds(engine); - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev_priv)) return skl_init_workarounds(engine); - if (IS_BROXTON(dev)) + if (IS_BROXTON(dev_priv)) return bxt_init_workarounds(engine); if (IS_KABYLAKE(dev_priv)) @@ -1279,14 +1276,13 @@ int init_workarounds_ring(struct intel_engine_cs *engine) static int init_render_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; int ret = init_ring_common(engine); if (ret) return ret; /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ - if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) + if (IS_GEN(dev_priv, 4, 6)) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); /* We need to disable the AsyncFlip performance optimisations in order @@ -1295,22 +1291,22 @@ static int init_render_ring(struct intel_engine_cs *engine) * * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv */ - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) + if (IS_GEN(dev_priv, 6, 7)) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); /* Required for the hardware to program scanline values for waiting */ /* WaEnableFlushTlbInvalidationMode:snb */ - if (INTEL_INFO(dev)->gen == 6) + if (IS_GEN6(dev_priv)) I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ - if (IS_GEN7(dev)) + if (IS_GEN7(dev_priv)) I915_WRITE(GFX_MODE_GEN7, _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); - if (IS_GEN6(dev)) { + if (IS_GEN6(dev_priv)) { /* From the Sandybridge PRM, volume 1 part 3, page 24: * "If this bit is set, STCunit will have LRA as replacement * policy. [...] This bit must be reset. LRA replacement @@ -1320,19 +1316,18 @@ static int init_render_ring(struct intel_engine_cs *engine) _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); } - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) + if (IS_GEN(dev_priv, 6, 7)) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); - if (HAS_L3_DPF(dev)) - I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); + if (INTEL_INFO(dev_priv)->gen >= 6) + I915_WRITE_IMR(engine, ~engine->irq_keep_mask); return init_workarounds_ring(engine); } static void render_ring_cleanup(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; if (dev_priv->semaphore_obj) { i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); @@ -1348,13 +1343,12 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, { #define MBOX_UPDATE_DWORDS 8 struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *waiter; enum intel_engine_id id; int ret, num_rings; - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; #undef MBOX_UPDATE_DWORDS @@ -1363,19 +1357,17 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, return ret; for_each_engine_id(waiter, dev_priv, id) { - u32 seqno; u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) continue; - seqno = i915_gem_request_get_seqno(signaller_req); intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_FLUSH_ENABLE); + PIPE_CONTROL_CS_STALL); intel_ring_emit(signaller, lower_32_bits(gtt_offset)); intel_ring_emit(signaller, upper_32_bits(gtt_offset)); - intel_ring_emit(signaller, seqno); + intel_ring_emit(signaller, signaller_req->seqno); intel_ring_emit(signaller, 0); intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | MI_SEMAPHORE_TARGET(waiter->hw_id)); @@ -1390,13 +1382,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, { #define MBOX_UPDATE_DWORDS 6 struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *waiter; enum intel_engine_id id; int ret, num_rings; - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; #undef MBOX_UPDATE_DWORDS @@ -1405,18 +1396,16 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, return ret; for_each_engine_id(waiter, dev_priv, id) { - u32 seqno; u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) continue; - seqno = i915_gem_request_get_seqno(signaller_req); intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); intel_ring_emit(signaller, lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT); intel_ring_emit(signaller, upper_32_bits(gtt_offset)); - intel_ring_emit(signaller, seqno); + intel_ring_emit(signaller, signaller_req->seqno); intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | MI_SEMAPHORE_TARGET(waiter->hw_id)); intel_ring_emit(signaller, 0); @@ -1429,14 +1418,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req, unsigned int num_dwords) { struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *useless; enum intel_engine_id id; int ret, num_rings; #define MBOX_UPDATE_DWORDS 3 - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); #undef MBOX_UPDATE_DWORDS @@ -1448,11 +1436,9 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req, i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id]; if (i915_mmio_reg_valid(mbox_reg)) { - u32 seqno = i915_gem_request_get_seqno(signaller_req); - intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit_reg(signaller, mbox_reg); - intel_ring_emit(signaller, seqno); + intel_ring_emit(signaller, signaller_req->seqno); } } @@ -1488,17 +1474,45 @@ gen6_add_request(struct drm_i915_gem_request *req) intel_ring_emit(engine, MI_STORE_DWORD_INDEX); intel_ring_emit(engine, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(engine, req->seqno); + intel_ring_emit(engine, MI_USER_INTERRUPT); + __intel_ring_advance(engine); + + return 0; +} + +static int +gen8_render_add_request(struct drm_i915_gem_request *req) +{ + struct intel_engine_cs *engine = req->engine; + int ret; + + if (engine->semaphore.signal) + ret = engine->semaphore.signal(req, 8); + else + ret = intel_ring_begin(req, 8); + if (ret) + return ret; + + intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE)); + intel_ring_emit(engine, intel_hws_seqno_address(req->engine)); + intel_ring_emit(engine, 0); intel_ring_emit(engine, i915_gem_request_get_seqno(req)); + /* We're thrashing one dword of HWS. */ + intel_ring_emit(engine, 0); intel_ring_emit(engine, MI_USER_INTERRUPT); + intel_ring_emit(engine, MI_NOOP); __intel_ring_advance(engine); return 0; } -static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, +static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv, u32 seqno) { - struct drm_i915_private *dev_priv = dev->dev_private; return dev_priv->last_seqno < seqno; } @@ -1516,7 +1530,9 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, u32 seqno) { struct intel_engine_cs *waiter = waiter_req->engine; - struct drm_i915_private *dev_priv = waiter->dev->dev_private; + struct drm_i915_private *dev_priv = waiter_req->i915; + u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id); + struct i915_hw_ppgtt *ppgtt; int ret; ret = intel_ring_begin(waiter_req, 4); @@ -1525,14 +1541,20 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT | - MI_SEMAPHORE_POLL | MI_SEMAPHORE_SAD_GTE_SDD); intel_ring_emit(waiter, seqno); - intel_ring_emit(waiter, - lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); - intel_ring_emit(waiter, - upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); + intel_ring_emit(waiter, lower_32_bits(offset)); + intel_ring_emit(waiter, upper_32_bits(offset)); intel_ring_advance(waiter); + + /* When the !RCS engines idle waiting upon a semaphore, they lose their + * pagetables and we must reload them before executing the batch. + * We do this on the i915_switch_context() following the wait and + * before the dispatch. + */ + ppgtt = waiter_req->ctx->ppgtt; + if (ppgtt && waiter_req->engine->id != RCS) + ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine); return 0; } @@ -1561,7 +1583,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req, return ret; /* If seqno wrap happened, omit the wait with no-ops */ - if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { + if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) { intel_ring_emit(waiter, dw1 | wait_mbox); intel_ring_emit(waiter, seqno); intel_ring_emit(waiter, 0); @@ -1577,72 +1599,28 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req, return 0; } -#define PIPE_CONTROL_FLUSH(ring__, addr__) \ -do { \ - intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ - PIPE_CONTROL_DEPTH_STALL); \ - intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ - intel_ring_emit(ring__, 0); \ - intel_ring_emit(ring__, 0); \ -} while (0) - -static int -pc_render_add_request(struct drm_i915_gem_request *req) +static void +gen5_seqno_barrier(struct intel_engine_cs *ring) { - struct intel_engine_cs *engine = req->engine; - u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; - int ret; - - /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently - * incoherent with writes to memory, i.e. completely fubar, - * so we need to use PIPE_NOTIFY instead. + /* MI_STORE are internally buffered by the GPU and not flushed + * either by MI_FLUSH or SyncFlush or any other combination of + * MI commands. * - * However, we also need to workaround the qword write - * incoherence by flushing the 6 PIPE_NOTIFY buffers out to - * memory before requesting an interrupt. + * "Only the submission of the store operation is guaranteed. + * The write result will be complete (coherent) some time later + * (this is practically a finite period but there is no guaranteed + * latency)." + * + * Empirically, we observe that we need a delay of at least 75us to + * be sure that the seqno write is visible by the CPU. */ - ret = intel_ring_begin(req, 32); - if (ret) - return ret; - - intel_ring_emit(engine, - GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WRITE_FLUSH | - PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); - intel_ring_emit(engine, - engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(engine, i915_gem_request_get_seqno(req)); - intel_ring_emit(engine, 0); - PIPE_CONTROL_FLUSH(engine, scratch_addr); - scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ - PIPE_CONTROL_FLUSH(engine, scratch_addr); - scratch_addr += 2 * CACHELINE_BYTES; - PIPE_CONTROL_FLUSH(engine, scratch_addr); - scratch_addr += 2 * CACHELINE_BYTES; - PIPE_CONTROL_FLUSH(engine, scratch_addr); - scratch_addr += 2 * CACHELINE_BYTES; - PIPE_CONTROL_FLUSH(engine, scratch_addr); - scratch_addr += 2 * CACHELINE_BYTES; - PIPE_CONTROL_FLUSH(engine, scratch_addr); - - intel_ring_emit(engine, - GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WRITE_FLUSH | - PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | - PIPE_CONTROL_NOTIFY); - intel_ring_emit(engine, - engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(engine, i915_gem_request_get_seqno(req)); - intel_ring_emit(engine, 0); - __intel_ring_advance(engine); - - return 0; + usleep_range(125, 250); } static void gen6_seqno_barrier(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; /* Workaround to force correct ordering between irq and seqno writes on * ivb (and maybe also on snb) by reading from a CS register (like @@ -1664,133 +1642,54 @@ gen6_seqno_barrier(struct intel_engine_cs *engine) spin_unlock_irq(&dev_priv->uncore.lock); } -static u32 -ring_get_seqno(struct intel_engine_cs *engine) -{ - return intel_read_status_page(engine, I915_GEM_HWS_INDEX); -} - static void -ring_set_seqno(struct intel_engine_cs *engine, u32 seqno) -{ - intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); -} - -static u32 -pc_render_get_seqno(struct intel_engine_cs *engine) +gen5_irq_enable(struct intel_engine_cs *engine) { - return engine->scratch.cpu_page[0]; + gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask); } static void -pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno) -{ - engine->scratch.cpu_page[0] = seqno; -} - -static bool -gen5_ring_get_irq(struct intel_engine_cs *engine) +gen5_irq_disable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) - gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask); } static void -gen5_ring_put_irq(struct intel_engine_cs *engine) +i9xx_irq_enable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) - gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); -} - -static bool -i9xx_ring_get_irq(struct intel_engine_cs *engine) -{ - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - - if (!intel_irqs_enabled(dev_priv)) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - dev_priv->irq_mask &= ~engine->irq_enable_mask; - I915_WRITE(IMR, dev_priv->irq_mask); - POSTING_READ(IMR); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + struct drm_i915_private *dev_priv = engine->i915; - return true; + dev_priv->irq_mask &= ~engine->irq_enable_mask; + I915_WRITE(IMR, dev_priv->irq_mask); + POSTING_READ_FW(RING_IMR(engine->mmio_base)); } static void -i9xx_ring_put_irq(struct intel_engine_cs *engine) +i9xx_irq_disable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; + struct drm_i915_private *dev_priv = engine->i915; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - dev_priv->irq_mask |= engine->irq_enable_mask; - I915_WRITE(IMR, dev_priv->irq_mask); - POSTING_READ(IMR); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + dev_priv->irq_mask |= engine->irq_enable_mask; + I915_WRITE(IMR, dev_priv->irq_mask); } -static bool -i8xx_ring_get_irq(struct intel_engine_cs *engine) +static void +i8xx_irq_enable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - - if (!intel_irqs_enabled(dev_priv)) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - dev_priv->irq_mask &= ~engine->irq_enable_mask; - I915_WRITE16(IMR, dev_priv->irq_mask); - POSTING_READ16(IMR); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + struct drm_i915_private *dev_priv = engine->i915; - return true; + dev_priv->irq_mask &= ~engine->irq_enable_mask; + I915_WRITE16(IMR, dev_priv->irq_mask); + POSTING_READ16(RING_IMR(engine->mmio_base)); } static void -i8xx_ring_put_irq(struct intel_engine_cs *engine) +i8xx_irq_disable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; + struct drm_i915_private *dev_priv = engine->i915; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - dev_priv->irq_mask |= engine->irq_enable_mask; - I915_WRITE16(IMR, dev_priv->irq_mask); - POSTING_READ16(IMR); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + dev_priv->irq_mask |= engine->irq_enable_mask; + I915_WRITE16(IMR, dev_priv->irq_mask); } static int @@ -1824,135 +1723,68 @@ i9xx_add_request(struct drm_i915_gem_request *req) intel_ring_emit(engine, MI_STORE_DWORD_INDEX); intel_ring_emit(engine, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(engine, i915_gem_request_get_seqno(req)); + intel_ring_emit(engine, req->seqno); intel_ring_emit(engine, MI_USER_INTERRUPT); __intel_ring_advance(engine); return 0; } -static bool -gen6_ring_get_irq(struct intel_engine_cs *engine) +static void +gen6_irq_enable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) - I915_WRITE_IMR(engine, - ~(engine->irq_enable_mask | - GT_PARITY_ERROR(dev))); - else - I915_WRITE_IMR(engine, ~engine->irq_enable_mask); - gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + struct drm_i915_private *dev_priv = engine->i915; - return true; + I915_WRITE_IMR(engine, + ~(engine->irq_enable_mask | + engine->irq_keep_mask)); + gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); } static void -gen6_ring_put_irq(struct intel_engine_cs *engine) +gen6_irq_disable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; + struct drm_i915_private *dev_priv = engine->i915; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) - I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); - else - I915_WRITE_IMR(engine, ~0); - gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + I915_WRITE_IMR(engine, ~engine->irq_keep_mask); + gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); } -static bool -hsw_vebox_get_irq(struct intel_engine_cs *engine) +static void +hsw_vebox_irq_enable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; + struct drm_i915_private *dev_priv = engine->i915; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - I915_WRITE_IMR(engine, ~engine->irq_enable_mask); - gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + I915_WRITE_IMR(engine, ~engine->irq_enable_mask); + gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask); } static void -hsw_vebox_put_irq(struct intel_engine_cs *engine) +hsw_vebox_irq_disable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; + struct drm_i915_private *dev_priv = engine->i915; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - I915_WRITE_IMR(engine, ~0); - gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + I915_WRITE_IMR(engine, ~0); + gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask); } -static bool -gen8_ring_get_irq(struct intel_engine_cs *engine) +static void +gen8_irq_enable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; - - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; + struct drm_i915_private *dev_priv = engine->i915; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) { - I915_WRITE_IMR(engine, - ~(engine->irq_enable_mask | - GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); - } else { - I915_WRITE_IMR(engine, ~engine->irq_enable_mask); - } - POSTING_READ(RING_IMR(engine->mmio_base)); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + I915_WRITE_IMR(engine, + ~(engine->irq_enable_mask | + engine->irq_keep_mask)); + POSTING_READ_FW(RING_IMR(engine->mmio_base)); } static void -gen8_ring_put_irq(struct intel_engine_cs *engine) +gen8_irq_disable(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long flags; + struct drm_i915_private *dev_priv = engine->i915; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) { - I915_WRITE_IMR(engine, - ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); - } else { - I915_WRITE_IMR(engine, ~0); - } - POSTING_READ(RING_IMR(engine->mmio_base)); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + I915_WRITE_IMR(engine, ~engine->irq_keep_mask); } static int @@ -2066,12 +1898,12 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req, static void cleanup_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; if (!dev_priv->status_page_dmah) return; - drm_pci_free(engine->dev, dev_priv->status_page_dmah); + drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah); engine->status_page.page_addr = NULL; } @@ -2097,10 +1929,10 @@ static int init_status_page(struct intel_engine_cs *engine) unsigned flags; int ret; - obj = i915_gem_alloc_object(engine->dev, 4096); - if (obj == NULL) { + obj = i915_gem_object_create(&engine->i915->drm, 4096); + if (IS_ERR(obj)) { DRM_ERROR("Failed to allocate status page\n"); - return -ENOMEM; + return PTR_ERR(obj); } ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); @@ -2108,7 +1940,7 @@ static int init_status_page(struct intel_engine_cs *engine) goto err_unref; flags = 0; - if (!HAS_LLC(engine->dev)) + if (!HAS_LLC(engine->i915)) /* On g33, we cannot place HWS above 256MiB, so * restrict its pinning to the low mappable arena. * Though this restriction is not documented for @@ -2142,11 +1974,11 @@ err_unref: static int init_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; if (!dev_priv->status_page_dmah) { dev_priv->status_page_dmah = - drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE); + drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); if (!dev_priv->status_page_dmah) return -ENOMEM; } @@ -2159,20 +1991,22 @@ static int init_phys_status_page(struct intel_engine_cs *engine) void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) { + GEM_BUG_ON(ringbuf->vma == NULL); + GEM_BUG_ON(ringbuf->virtual_start == NULL); + if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) i915_gem_object_unpin_map(ringbuf->obj); else - iounmap(ringbuf->virtual_start); + i915_vma_unpin_iomap(ringbuf->vma); ringbuf->virtual_start = NULL; - ringbuf->vma = NULL; + i915_gem_object_ggtt_unpin(ringbuf->obj); + ringbuf->vma = NULL; } -int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, +int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv, struct intel_ringbuffer *ringbuf) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_object *obj = ringbuf->obj; /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ unsigned flags = PIN_OFFSET_BIAS | 4096; @@ -2206,10 +2040,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, /* Access through the GTT requires the device to be awake. */ assert_rpm_wakelock_held(dev_priv); - addr = ioremap_wc(ggtt->mappable_base + - i915_gem_obj_ggtt_offset(obj), ringbuf->size); - if (addr == NULL) { - ret = -ENOMEM; + addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj)); + if (IS_ERR(addr)) { + ret = PTR_ERR(addr); goto err_unpin; } } @@ -2238,9 +2071,9 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev, if (!HAS_LLC(dev)) obj = i915_gem_object_create_stolen(dev, ringbuf->size); if (obj == NULL) - obj = i915_gem_alloc_object(dev, ringbuf->size); - if (obj == NULL) - return -ENOMEM; + obj = i915_gem_object_create(dev, ringbuf->size); + if (IS_ERR(obj)) + return PTR_ERR(obj); /* mark ring buffers as read-only from GPU side by default */ obj->gt_ro = 1; @@ -2272,13 +2105,13 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) * of the buffer. */ ring->effective_size = size; - if (IS_I830(engine->dev) || IS_845G(engine->dev)) + if (IS_I830(engine->i915) || IS_845G(engine->i915)) ring->effective_size -= 2 * CACHELINE_BYTES; ring->last_retired_head = -1; intel_ring_update_space(ring); - ret = intel_alloc_ringbuffer_obj(engine->dev, ring); + ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring); if (ret) { DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", engine->name, ret); @@ -2298,15 +2131,67 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring) kfree(ring); } +static int intel_ring_context_pin(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + struct intel_context *ce = &ctx->engine[engine->id]; + int ret; + + lockdep_assert_held(&ctx->i915->drm.struct_mutex); + + if (ce->pin_count++) + return 0; + + if (ce->state) { + ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0); + if (ret) + goto error; + } + + /* The kernel context is only used as a placeholder for flushing the + * active context. It is never used for submitting user rendering and + * as such never requires the golden render context, and so we can skip + * emitting it when we switch to the kernel context. This is required + * as during eviction we cannot allocate and pin the renderstate in + * order to initialise the context. + */ + if (ctx == ctx->i915->kernel_context) + ce->initialised = true; + + i915_gem_context_reference(ctx); + return 0; + +error: + ce->pin_count = 0; + return ret; +} + +static void intel_ring_context_unpin(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + struct intel_context *ce = &ctx->engine[engine->id]; + + lockdep_assert_held(&ctx->i915->drm.struct_mutex); + + if (--ce->pin_count) + return; + + if (ce->state) + i915_gem_object_ggtt_unpin(ce->state); + + i915_gem_context_unreference(ctx); +} + static int intel_init_ring_buffer(struct drm_device *dev, struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_ringbuffer *ringbuf; int ret; WARN_ON(engine->buffer); - engine->dev = dev; + engine->i915 = dev_priv; INIT_LIST_HEAD(&engine->active_list); INIT_LIST_HEAD(&engine->request_list); INIT_LIST_HEAD(&engine->execlist_queue); @@ -2315,7 +2200,20 @@ static int intel_init_ring_buffer(struct drm_device *dev, memset(engine->semaphore.sync_seqno, 0, sizeof(engine->semaphore.sync_seqno)); - init_waitqueue_head(&engine->irq_queue); + ret = intel_engine_init_breadcrumbs(engine); + if (ret) + goto error; + + /* We may need to do things with the shrinker which + * require us to immediately switch back to the default + * context. This can cause a problem as pinning the + * default context also requires GTT space which may not + * be available. To avoid this we always pin the default + * context. + */ + ret = intel_ring_context_pin(dev_priv->kernel_context, engine); + if (ret) + goto error; ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE); if (IS_ERR(ringbuf)) { @@ -2324,7 +2222,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, } engine->buffer = ringbuf; - if (I915_NEED_GFX_HWS(dev)) { + if (I915_NEED_GFX_HWS(dev_priv)) { ret = init_status_page(engine); if (ret) goto error; @@ -2335,7 +2233,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, goto error; } - ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); + ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf); if (ret) { DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", engine->name, ret); @@ -2361,11 +2259,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) if (!intel_engine_initialized(engine)) return; - dev_priv = to_i915(engine->dev); + dev_priv = engine->i915; if (engine->buffer) { intel_stop_engine(engine); - WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); + WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); intel_unpin_ringbuffer_obj(engine->buffer); intel_ringbuffer_free(engine->buffer); @@ -2375,7 +2273,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) if (engine->cleanup) engine->cleanup(engine); - if (I915_NEED_GFX_HWS(engine->dev)) { + if (I915_NEED_GFX_HWS(dev_priv)) { cleanup_status_page(engine); } else { WARN_ON(engine->id != RCS); @@ -2384,7 +2282,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) i915_cmd_parser_fini_ring(engine); i915_gem_batch_pool_fini(&engine->batch_pool); - engine->dev = NULL; + intel_engine_fini_breadcrumbs(engine); + + intel_ring_context_unpin(dev_priv->kernel_context, engine); + + engine->i915 = NULL; } int intel_engine_idle(struct intel_engine_cs *engine) @@ -2407,46 +2309,22 @@ int intel_engine_idle(struct intel_engine_cs *engine) int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) { - request->ringbuf = request->engine->buffer; - return 0; -} + int ret; -int intel_ring_reserve_space(struct drm_i915_gem_request *request) -{ - /* - * The first call merely notes the reserve request and is common for - * all back ends. The subsequent localised _begin() call actually - * ensures that the reservation is available. Without the begin, if - * the request creator immediately submitted the request without - * adding any commands to it then there might not actually be - * sufficient room for the submission commands. + /* Flush enough space to reduce the likelihood of waiting after + * we start building the request - in which case we will just + * have to repeat work. */ - intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); - - return intel_ring_begin(request, 0); -} + request->reserved_space += LEGACY_REQUEST_SIZE; -void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) -{ - GEM_BUG_ON(ringbuf->reserved_size); - ringbuf->reserved_size = size; -} - -void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf) -{ - GEM_BUG_ON(!ringbuf->reserved_size); - ringbuf->reserved_size = 0; -} + request->ringbuf = request->engine->buffer; -void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) -{ - GEM_BUG_ON(!ringbuf->reserved_size); - ringbuf->reserved_size = 0; -} + ret = intel_ring_begin(request, 0); + if (ret) + return ret; -void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) -{ - GEM_BUG_ON(ringbuf->reserved_size); + request->reserved_space -= LEGACY_REQUEST_SIZE; + return 0; } static int wait_for_space(struct drm_i915_gem_request *req, int bytes) @@ -2468,7 +2346,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) * * See also i915_gem_request_alloc() and i915_add_request(). */ - GEM_BUG_ON(!ringbuf->reserved_size); + GEM_BUG_ON(!req->reserved_space); list_for_each_entry(target, &engine->request_list, list) { unsigned space; @@ -2503,7 +2381,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) int total_bytes, wait_bytes; bool need_wrap = false; - total_bytes = bytes + ringbuf->reserved_size; + total_bytes = bytes + req->reserved_space; if (unlikely(bytes > remain_usable)) { /* @@ -2519,7 +2397,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) * and only need to effectively wait for the reserved * size space from the start of ringbuffer. */ - wait_bytes = remain_actual + ringbuf->reserved_size; + wait_bytes = remain_actual + req->reserved_space; } else { /* No wrapping required, just waiting. */ wait_bytes = total_bytes; @@ -2576,7 +2454,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req) void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; /* Our semaphore implementation is strictly monotonic (i.e. we proceed * so long as the semaphore value in the register/page is greater @@ -2586,7 +2464,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) * the semaphore value, then when the seqno moves backwards all * future waits will complete instantly (causing rendering corruption). */ - if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) { + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); if (HAS_VEBOX(dev_priv)) @@ -2603,43 +2481,58 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) memset(engine->semaphore.sync_seqno, 0, sizeof(engine->semaphore.sync_seqno)); - engine->set_seqno(engine, seqno); + intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); + if (engine->irq_seqno_barrier) + engine->irq_seqno_barrier(engine); engine->last_submitted_seqno = seqno; engine->hangcheck.seqno = seqno; + + /* After manually advancing the seqno, fake the interrupt in case + * there are any waiters for that seqno. + */ + rcu_read_lock(); + intel_engine_wakeup(engine); + rcu_read_unlock(); } static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, u32 value) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; + + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); /* Every tail move must follow the sequence below */ /* Disable notification that the ring is IDLE. The GT * will then assume that it is busy and bring it out of rc6. */ - I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, - _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, + _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); /* Clear the context id. Here be magic! */ - I915_WRITE64(GEN6_BSD_RNCID, 0x0); + I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0); /* Wait for the ring not to be idle, i.e. for it to wake up. */ - if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & - GEN6_BSD_SLEEP_INDICATOR) == 0, - 50)) + if (intel_wait_for_register_fw(dev_priv, + GEN6_BSD_SLEEP_PSMI_CONTROL, + GEN6_BSD_SLEEP_INDICATOR, + 0, + 50)) DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); /* Now that the ring is fully powered up, update the tail */ - I915_WRITE_TAIL(engine, value); - POSTING_READ(RING_TAIL(engine->mmio_base)); + I915_WRITE_FW(RING_TAIL(engine->mmio_base), value); + POSTING_READ_FW(RING_TAIL(engine->mmio_base)); /* Let the ring send IDLE messages to the GT again, * and so let it sleep to conserve power when idle. */ - I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, - _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, + _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, @@ -2654,7 +2547,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, return ret; cmd = MI_FLUSH_DW; - if (INTEL_INFO(engine->dev)->gen >= 8) + if (INTEL_GEN(req->i915) >= 8) cmd += 1; /* We always require a command barrier so that subsequent @@ -2676,7 +2569,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, intel_ring_emit(engine, cmd); intel_ring_emit(engine, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); - if (INTEL_INFO(engine->dev)->gen >= 8) { + if (INTEL_GEN(req->i915) >= 8) { intel_ring_emit(engine, 0); /* upper addr */ intel_ring_emit(engine, 0); /* value */ } else { @@ -2767,7 +2660,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 invalidate, u32 flush) { struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; uint32_t cmd; int ret; @@ -2776,7 +2668,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, return ret; cmd = MI_FLUSH_DW; - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_GEN(req->i915) >= 8) cmd += 1; /* We always require a command barrier so that subsequent @@ -2797,7 +2689,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, intel_ring_emit(engine, cmd); intel_ring_emit(engine, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(req->i915) >= 8) { intel_ring_emit(engine, 0); /* upper addr */ intel_ring_emit(engine, 0); /* value */ } else { @@ -2809,11 +2701,159 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, return 0; } +static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, + struct intel_engine_cs *engine) +{ + struct drm_i915_gem_object *obj; + int ret, i; + + if (!i915_semaphore_is_enabled(dev_priv)) + return; + + if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) { + obj = i915_gem_object_create(&dev_priv->drm, 4096); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); + i915.semaphores = 0; + } else { + i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK); + if (ret != 0) { + drm_gem_object_unreference(&obj->base); + DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); + i915.semaphores = 0; + } else { + dev_priv->semaphore_obj = obj; + } + } + } + + if (!i915_semaphore_is_enabled(dev_priv)) + return; + + if (INTEL_GEN(dev_priv) >= 8) { + u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); + + engine->semaphore.sync_to = gen8_ring_sync; + engine->semaphore.signal = gen8_xcs_signal; + + for (i = 0; i < I915_NUM_ENGINES; i++) { + u64 ring_offset; + + if (i != engine->id) + ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i); + else + ring_offset = MI_SEMAPHORE_SYNC_INVALID; + + engine->semaphore.signal_ggtt[i] = ring_offset; + } + } else if (INTEL_GEN(dev_priv) >= 6) { + engine->semaphore.sync_to = gen6_ring_sync; + engine->semaphore.signal = gen6_signal; + + /* + * The current semaphore is only applied on pre-gen8 + * platform. And there is no VCS2 ring on the pre-gen8 + * platform. So the semaphore between RCS and VCS2 is + * initialized as INVALID. Gen8 will initialize the + * sema between VCS2 and RCS later. + */ + for (i = 0; i < I915_NUM_ENGINES; i++) { + static const struct { + u32 wait_mbox; + i915_reg_t mbox_reg; + } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = { + [RCS] = { + [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC }, + [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC }, + [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC }, + }, + [VCS] = { + [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC }, + [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC }, + [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC }, + }, + [BCS] = { + [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC }, + [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC }, + [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC }, + }, + [VECS] = { + [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC }, + [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC }, + [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC }, + }, + }; + u32 wait_mbox; + i915_reg_t mbox_reg; + + if (i == engine->id || i == VCS2) { + wait_mbox = MI_SEMAPHORE_SYNC_INVALID; + mbox_reg = GEN6_NOSYNC; + } else { + wait_mbox = sem_data[engine->id][i].wait_mbox; + mbox_reg = sem_data[engine->id][i].mbox_reg; + } + + engine->semaphore.mbox.wait[i] = wait_mbox; + engine->semaphore.mbox.signal[i] = mbox_reg; + } + } +} + +static void intel_ring_init_irq(struct drm_i915_private *dev_priv, + struct intel_engine_cs *engine) +{ + if (INTEL_GEN(dev_priv) >= 8) { + engine->irq_enable = gen8_irq_enable; + engine->irq_disable = gen8_irq_disable; + engine->irq_seqno_barrier = gen6_seqno_barrier; + } else if (INTEL_GEN(dev_priv) >= 6) { + engine->irq_enable = gen6_irq_enable; + engine->irq_disable = gen6_irq_disable; + engine->irq_seqno_barrier = gen6_seqno_barrier; + } else if (INTEL_GEN(dev_priv) >= 5) { + engine->irq_enable = gen5_irq_enable; + engine->irq_disable = gen5_irq_disable; + engine->irq_seqno_barrier = gen5_seqno_barrier; + } else if (INTEL_GEN(dev_priv) >= 3) { + engine->irq_enable = i9xx_irq_enable; + engine->irq_disable = i9xx_irq_disable; + } else { + engine->irq_enable = i8xx_irq_enable; + engine->irq_disable = i8xx_irq_disable; + } +} + +static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, + struct intel_engine_cs *engine) +{ + engine->init_hw = init_ring_common; + engine->write_tail = ring_write_tail; + + engine->add_request = i9xx_add_request; + if (INTEL_GEN(dev_priv) >= 6) + engine->add_request = gen6_add_request; + + if (INTEL_GEN(dev_priv) >= 8) + engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; + else if (INTEL_GEN(dev_priv) >= 6) + engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + else if (INTEL_GEN(dev_priv) >= 4) + engine->dispatch_execbuffer = i965_dispatch_execbuffer; + else if (IS_I830(dev_priv) || IS_845G(dev_priv)) + engine->dispatch_execbuffer = i830_dispatch_execbuffer; + else + engine->dispatch_execbuffer = i915_dispatch_execbuffer; + + intel_ring_init_irq(dev_priv, engine); + intel_ring_init_semaphores(dev_priv, engine); +} + int intel_init_render_ring_buffer(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine = &dev_priv->engine[RCS]; - struct drm_i915_gem_object *obj; int ret; engine->name = "render ring"; @@ -2822,140 +2862,49 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->hw_id = 0; engine->mmio_base = RENDER_RING_BASE; - if (INTEL_INFO(dev)->gen >= 8) { - if (i915_semaphore_is_enabled(dev)) { - obj = i915_gem_alloc_object(dev, 4096); - if (obj == NULL) { - DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); - i915.semaphores = 0; - } else { - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK); - if (ret != 0) { - drm_gem_object_unreference(&obj->base); - DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); - i915.semaphores = 0; - } else - dev_priv->semaphore_obj = obj; - } - } + intel_ring_default_vfuncs(dev_priv, engine); + engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; + if (HAS_L3_DPF(dev_priv)) + engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + + if (INTEL_GEN(dev_priv) >= 8) { engine->init_context = intel_rcs_ctx_init; - engine->add_request = gen6_add_request; + engine->add_request = gen8_render_add_request; engine->flush = gen8_render_ring_flush; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; - engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; - engine->irq_seqno_barrier = gen6_seqno_barrier; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (i915_semaphore_is_enabled(dev)) { - WARN_ON(!dev_priv->semaphore_obj); - engine->semaphore.sync_to = gen8_ring_sync; + if (i915_semaphore_is_enabled(dev_priv)) engine->semaphore.signal = gen8_rcs_signal; - GEN8_RING_SEMAPHORE_INIT(engine); - } - } else if (INTEL_INFO(dev)->gen >= 6) { + } else if (INTEL_GEN(dev_priv) >= 6) { engine->init_context = intel_rcs_ctx_init; - engine->add_request = gen6_add_request; engine->flush = gen7_render_ring_flush; - if (INTEL_INFO(dev)->gen == 6) + if (IS_GEN6(dev_priv)) engine->flush = gen6_render_ring_flush; - engine->irq_get = gen6_ring_get_irq; - engine->irq_put = gen6_ring_put_irq; - engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; - engine->irq_seqno_barrier = gen6_seqno_barrier; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (i915_semaphore_is_enabled(dev)) { - engine->semaphore.sync_to = gen6_ring_sync; - engine->semaphore.signal = gen6_signal; - /* - * The current semaphore is only applied on pre-gen8 - * platform. And there is no VCS2 ring on the pre-gen8 - * platform. So the semaphore between RCS and VCS2 is - * initialized as INVALID. Gen8 will initialize the - * sema between VCS2 and RCS later. - */ - engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; - engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; - engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; - engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; - engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; - engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; - engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; - engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; - } - } else if (IS_GEN5(dev)) { - engine->add_request = pc_render_add_request; + } else if (IS_GEN5(dev_priv)) { engine->flush = gen4_render_ring_flush; - engine->get_seqno = pc_render_get_seqno; - engine->set_seqno = pc_render_set_seqno; - engine->irq_get = gen5_ring_get_irq; - engine->irq_put = gen5_ring_put_irq; - engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT | - GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; } else { - engine->add_request = i9xx_add_request; - if (INTEL_INFO(dev)->gen < 4) + if (INTEL_GEN(dev_priv) < 4) engine->flush = gen2_render_ring_flush; else engine->flush = gen4_render_ring_flush; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (IS_GEN2(dev)) { - engine->irq_get = i8xx_ring_get_irq; - engine->irq_put = i8xx_ring_put_irq; - } else { - engine->irq_get = i9xx_ring_get_irq; - engine->irq_put = i9xx_ring_put_irq; - } engine->irq_enable_mask = I915_USER_INTERRUPT; } - engine->write_tail = ring_write_tail; - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; - else if (IS_GEN8(dev)) - engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - else if (INTEL_INFO(dev)->gen >= 6) - engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - else if (INTEL_INFO(dev)->gen >= 4) - engine->dispatch_execbuffer = i965_dispatch_execbuffer; - else if (IS_I830(dev) || IS_845G(dev)) - engine->dispatch_execbuffer = i830_dispatch_execbuffer; - else - engine->dispatch_execbuffer = i915_dispatch_execbuffer; + engine->init_hw = init_render_ring; engine->cleanup = render_ring_cleanup; - /* Workaround batchbuffer to combat CS tlb bug. */ - if (HAS_BROKEN_CS_TLB(dev)) { - obj = i915_gem_alloc_object(dev, I830_WA_SIZE); - if (obj == NULL) { - DRM_ERROR("Failed to allocate batch bo\n"); - return -ENOMEM; - } - - ret = i915_gem_obj_ggtt_pin(obj, 0, 0); - if (ret != 0) { - drm_gem_object_unreference(&obj->base); - DRM_ERROR("Failed to ping batch bo\n"); - return ret; - } - - engine->scratch.obj = obj; - engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); - } - ret = intel_init_ring_buffer(dev, engine); if (ret) return ret; - if (INTEL_INFO(dev)->gen >= 5) { - ret = intel_init_pipe_control(engine); + if (INTEL_GEN(dev_priv) >= 6) { + ret = intel_init_pipe_control(engine, 4096); + if (ret) + return ret; + } else if (HAS_BROKEN_CS_TLB(dev_priv)) { + ret = intel_init_pipe_control(engine, I830_WA_SIZE); if (ret) return ret; } @@ -2965,7 +2914,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) int intel_init_bsd_ring_buffer(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine = &dev_priv->engine[VCS]; engine->name = "bsd ring"; @@ -2973,68 +2922,27 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) engine->exec_id = I915_EXEC_BSD; engine->hw_id = 1; - engine->write_tail = ring_write_tail; - if (INTEL_INFO(dev)->gen >= 6) { + intel_ring_default_vfuncs(dev_priv, engine); + + if (INTEL_GEN(dev_priv) >= 6) { engine->mmio_base = GEN6_BSD_RING_BASE; /* gen6 bsd needs a special wa for tail updates */ - if (IS_GEN6(dev)) + if (IS_GEN6(dev_priv)) engine->write_tail = gen6_bsd_ring_write_tail; engine->flush = gen6_bsd_ring_flush; - engine->add_request = gen6_add_request; - engine->irq_seqno_barrier = gen6_seqno_barrier; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; - engine->dispatch_execbuffer = - gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { - engine->semaphore.sync_to = gen8_ring_sync; - engine->semaphore.signal = gen8_xcs_signal; - GEN8_RING_SEMAPHORE_INIT(engine); - } - } else { + else engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; - engine->irq_get = gen6_ring_get_irq; - engine->irq_put = gen6_ring_put_irq; - engine->dispatch_execbuffer = - gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { - engine->semaphore.sync_to = gen6_ring_sync; - engine->semaphore.signal = gen6_signal; - engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; - engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; - engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; - engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; - engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; - engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; - engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; - engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; - } - } } else { engine->mmio_base = BSD_RING_BASE; engine->flush = bsd_ring_flush; - engine->add_request = i9xx_add_request; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (IS_GEN5(dev)) { + if (IS_GEN5(dev_priv)) engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; - engine->irq_get = gen5_ring_get_irq; - engine->irq_put = gen5_ring_put_irq; - } else { + else engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; - engine->irq_get = i9xx_ring_get_irq; - engine->irq_put = i9xx_ring_put_irq; - } - engine->dispatch_execbuffer = i965_dispatch_execbuffer; } - engine->init_hw = init_ring_common; return intel_init_ring_buffer(dev, engine); } @@ -3044,147 +2952,70 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) */ int intel_init_bsd2_ring_buffer(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; engine->name = "bsd2 ring"; engine->id = VCS2; engine->exec_id = I915_EXEC_BSD; engine->hw_id = 4; - - engine->write_tail = ring_write_tail; engine->mmio_base = GEN8_BSD2_RING_BASE; + + intel_ring_default_vfuncs(dev_priv, engine); + engine->flush = gen6_bsd_ring_flush; - engine->add_request = gen6_add_request; - engine->irq_seqno_barrier = gen6_seqno_barrier; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; - engine->dispatch_execbuffer = - gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { - engine->semaphore.sync_to = gen8_ring_sync; - engine->semaphore.signal = gen8_xcs_signal; - GEN8_RING_SEMAPHORE_INIT(engine); - } - engine->init_hw = init_ring_common; return intel_init_ring_buffer(dev, engine); } int intel_init_blt_ring_buffer(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine = &dev_priv->engine[BCS]; engine->name = "blitter ring"; engine->id = BCS; engine->exec_id = I915_EXEC_BLT; engine->hw_id = 2; - engine->mmio_base = BLT_RING_BASE; - engine->write_tail = ring_write_tail; + + intel_ring_default_vfuncs(dev_priv, engine); + engine->flush = gen6_ring_flush; - engine->add_request = gen6_add_request; - engine->irq_seqno_barrier = gen6_seqno_barrier; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; - engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { - engine->semaphore.sync_to = gen8_ring_sync; - engine->semaphore.signal = gen8_xcs_signal; - GEN8_RING_SEMAPHORE_INIT(engine); - } - } else { + else engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; - engine->irq_get = gen6_ring_get_irq; - engine->irq_put = gen6_ring_put_irq; - engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { - engine->semaphore.signal = gen6_signal; - engine->semaphore.sync_to = gen6_ring_sync; - /* - * The current semaphore is only applied on pre-gen8 - * platform. And there is no VCS2 ring on the pre-gen8 - * platform. So the semaphore between BCS and VCS2 is - * initialized as INVALID. Gen8 will initialize the - * sema between BCS and VCS2 later. - */ - engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; - engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; - engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; - engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; - engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; - engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; - engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; - engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; - } - } - engine->init_hw = init_ring_common; return intel_init_ring_buffer(dev, engine); } int intel_init_vebox_ring_buffer(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine = &dev_priv->engine[VECS]; engine->name = "video enhancement ring"; engine->id = VECS; engine->exec_id = I915_EXEC_VEBOX; engine->hw_id = 3; - engine->mmio_base = VEBOX_RING_BASE; - engine->write_tail = ring_write_tail; + + intel_ring_default_vfuncs(dev_priv, engine); + engine->flush = gen6_ring_flush; - engine->add_request = gen6_add_request; - engine->irq_seqno_barrier = gen6_seqno_barrier; - engine->get_seqno = ring_get_seqno; - engine->set_seqno = ring_set_seqno; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; - engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { - engine->semaphore.sync_to = gen8_ring_sync; - engine->semaphore.signal = gen8_xcs_signal; - GEN8_RING_SEMAPHORE_INIT(engine); - } } else { engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; - engine->irq_get = hsw_vebox_get_irq; - engine->irq_put = hsw_vebox_put_irq; - engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { - engine->semaphore.sync_to = gen6_ring_sync; - engine->semaphore.signal = gen6_signal; - engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; - engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; - engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; - engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; - engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; - engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; - engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; - engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; - } + engine->irq_enable = hsw_vebox_irq_enable; + engine->irq_disable = hsw_vebox_irq_disable; } - engine->init_hw = init_ring_common; return intel_init_ring_buffer(dev, engine); } |