summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-01-14 22:04:01 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-01-16 16:24:16 +0100
commit18bb2bccb5492fb5c36908191b8af77e54c58814 (patch)
tree96cb21f22d1eb251d6f3853b300912b1f1c81800 /drivers/gpu/drm/i915/i915_gem.c
parentdrm/i915: Pass down rc in intel_encoder->compute_config() (diff)
downloadlinux-18bb2bccb5492fb5c36908191b8af77e54c58814.tar.xz
linux-18bb2bccb5492fb5c36908191b8af77e54c58814.zip
drm/i915: Serialise concurrent calls to i915_gem_set_wedged()
Make i915_gem_set_wedged() and i915_gem_unset_wedged() behaviour more consistent if called concurrently, and only do the wedging and reporting once, curtailing any possible race where we start unwedging in the middle of a wedge. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190114210408.4561-2-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c32
1 files changed, 24 insertions, 8 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 565b2fa1607d..5c6089777fde 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3187,10 +3187,15 @@ static void nop_submit_request(struct i915_request *request)
void i915_gem_set_wedged(struct drm_i915_private *i915)
{
+ struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- GEM_TRACE("start\n");
+ mutex_lock(&error->wedge_mutex);
+ if (test_bit(I915_WEDGED, &error->flags)) {
+ mutex_unlock(&error->wedge_mutex);
+ return;
+ }
if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
struct drm_printer p = drm_debug_printer(__func__);
@@ -3199,8 +3204,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
- if (test_and_set_bit(I915_WEDGED, &i915->gpu_error.flags))
- goto out;
+ GEM_TRACE("start\n");
/*
* First, stop submission to hw, but do not yet complete requests by
@@ -3236,23 +3240,31 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
intel_engine_wakeup(engine);
}
-out:
+ smp_mb__before_atomic();
+ set_bit(I915_WEDGED, &error->flags);
+
GEM_TRACE("end\n");
+ mutex_unlock(&error->wedge_mutex);
- wake_up_all(&i915->gpu_error.reset_queue);
+ wake_up_all(&error->reset_queue);
}
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
{
+ struct i915_gpu_error *error = &i915->gpu_error;
struct i915_timeline *tl;
+ bool ret = false;
lockdep_assert_held(&i915->drm.struct_mutex);
- if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
+
+ if (!test_bit(I915_WEDGED, &error->flags))
return true;
if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
return false;
+ mutex_lock(&error->wedge_mutex);
+
GEM_TRACE("start\n");
/*
@@ -3286,7 +3298,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
*/
if (dma_fence_default_wait(&rq->fence, true,
MAX_SCHEDULE_TIMEOUT) < 0)
- return false;
+ goto unlock;
}
i915_retire_requests(i915);
GEM_BUG_ON(i915->gt.active_requests);
@@ -3309,8 +3321,11 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
clear_bit(I915_WEDGED, &i915->gpu_error.flags);
+ ret = true;
+unlock:
+ mutex_unlock(&i915->gpu_error.wedge_mutex);
- return true;
+ return ret;
}
static void
@@ -5706,6 +5721,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
i915_gem_idle_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
+ mutex_init(&dev_priv->gpu_error.wedge_mutex);
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);