diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_lrc.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_lrc.c | 21 |
1 files changed, 8 insertions, 13 deletions
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 3a69b367e565..697af5add78b 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -831,7 +831,8 @@ static void execlists_submission_tasklet(unsigned long data) struct drm_i915_private *dev_priv = engine->i915; bool fw = false; - /* We can skip acquiring intel_runtime_pm_get() here as it was taken + /* + * We can skip acquiring intel_runtime_pm_get() here as it was taken * on our behalf by the request (see i915_gem_mark_busy()) and it will * not be relinquished until the device is idle (see * i915_gem_idle_work_handler()). As a precaution, we make sure @@ -840,7 +841,8 @@ static void execlists_submission_tasklet(unsigned long data) */ GEM_BUG_ON(!dev_priv->gt.awake); - /* Prefer doing test_and_clear_bit() as a two stage operation to avoid + /* + * Prefer doing test_and_clear_bit() as a two stage operation to avoid * imposing the cost of a locked atomic transaction when submitting a * new request (outside of the context-switch interrupt). */ @@ -856,17 +858,10 @@ static void execlists_submission_tasklet(unsigned long data) execlists->csb_head = -1; /* force mmio read of CSB ptrs */ } - /* The write will be ordered by the uncached read (itself - * a memory barrier), so we do not need another in the form - * of a locked instruction. The race between the interrupt - * handler and the split test/clear is harmless as we order - * our clear before the CSB read. If the interrupt arrived - * first between the test and the clear, we read the updated - * CSB and clear the bit. If the interrupt arrives as we read - * the CSB or later (i.e. after we had cleared the bit) the bit - * is set and we do a new loop. - */ - __clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); + /* Clear before reading to catch new interrupts */ + clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); + smp_mb__after_atomic(); + if (unlikely(execlists->csb_head == -1)) { /* following a reset */ if (!fw) { intel_uncore_forcewake_get(dev_priv, |