From 641934069d29211baf82afb93622a426172b67b6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 24 Oct 2010 12:38:05 +0100 Subject: drm/i915: Move gpu_write_list to per-ring ... to prevent flush processing of an idle (or even absent) ring. This fixes a regression during suspend from 87acb0a5. Reported-and-tested-by: Alexey Fisher Tested-by: Peter Clifton Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 9 --------- drivers/gpu/drm/i915/i915_gem.c | 36 +++++++++++++++++---------------- drivers/gpu/drm/i915/intel_ringbuffer.c | 1 + drivers/gpu/drm/i915/intel_ringbuffer.h | 9 +++++++++ 4 files changed, 29 insertions(+), 26 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cc9cb0dda6fc..2c2c19b6285e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -570,15 +570,6 @@ typedef struct drm_i915_private { */ struct list_head flushing_list; - /** - * List of objects currently pending a GPU write flush. - * - * All elements on this list will belong to either the - * active_list or flushing_list, last_rendering_seqno can - * be used to differentiate between the two elements. - */ - struct list_head gpu_write_list; - /** * LRU list of objects which are not in the ringbuffer and * are ready to unbind, but are still in the GTT. diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e7f27a5b89dc..6c2618d884e7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1657,12 +1657,11 @@ i915_gem_process_flushing_list(struct drm_device *dev, struct drm_i915_gem_object *obj_priv, *next; list_for_each_entry_safe(obj_priv, next, - &dev_priv->mm.gpu_write_list, + &ring->gpu_write_list, gpu_write_list) { struct drm_gem_object *obj = &obj_priv->base; - if (obj->write_domain & flush_domains && - obj_priv->ring == ring) { + if (obj->write_domain & flush_domains) { uint32_t old_write_domain = obj->write_domain; obj->write_domain = 0; @@ -2173,6 +2172,9 @@ i915_gem_object_unbind(struct drm_gem_object *obj) static int i915_ring_idle(struct drm_device *dev, struct intel_ring_buffer *ring) { + if (list_empty(&ring->gpu_write_list)) + return 0; + i915_gem_flush_ring(dev, NULL, ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); return i915_wait_request(dev, @@ -3786,14 +3788,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_write_domain = obj->write_domain; - obj->write_domain = obj->pending_write_domain; - if (obj->write_domain) - list_move_tail(&obj_priv->gpu_write_list, - &dev_priv->mm.gpu_write_list); - trace_i915_gem_object_change_domain(obj, obj->read_domains, old_write_domain); @@ -3858,9 +3854,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; - obj_priv = to_intel_bo(obj); i915_gem_object_move_to_active(obj, ring); + if (obj->write_domain) + list_move_tail(&to_intel_bo(obj)->gpu_write_list, + &ring->gpu_write_list); } i915_add_request(dev, file, request, ring); @@ -4618,6 +4616,14 @@ i915_gem_lastclose(struct drm_device *dev) DRM_ERROR("failed to idle hardware: %d\n", ret); } +static void +init_ring_lists(struct intel_ring_buffer *ring) +{ + INIT_LIST_HEAD(&ring->active_list); + INIT_LIST_HEAD(&ring->request_list); + INIT_LIST_HEAD(&ring->gpu_write_list); +} + void i915_gem_load(struct drm_device *dev) { @@ -4626,17 +4632,13 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->mm.active_list); INIT_LIST_HEAD(&dev_priv->mm.flushing_list); - INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); INIT_LIST_HEAD(&dev_priv->mm.pinned_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); - INIT_LIST_HEAD(&dev_priv->render_ring.active_list); - INIT_LIST_HEAD(&dev_priv->render_ring.request_list); - INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list); - INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list); - INIT_LIST_HEAD(&dev_priv->blt_ring.active_list); - INIT_LIST_HEAD(&dev_priv->blt_ring.request_list); + init_ring_lists(&dev_priv->render_ring); + init_ring_lists(&dev_priv->bsd_ring); + init_ring_lists(&dev_priv->blt_ring); for (i = 0; i < 16; i++) INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_DELAYED_WORK(&dev_priv->mm.retire_work, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 4b53ca81ea4d..09f2dc353ae2 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -580,6 +580,7 @@ int intel_init_ring_buffer(struct drm_device *dev, ring->dev = dev; INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); + INIT_LIST_HEAD(&ring->gpu_write_list); if (I915_NEED_GFX_HWS(dev)) { ret = init_status_page(dev, ring); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 6ab40c6058f7..a05aff0e5764 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -82,6 +82,15 @@ struct intel_ring_buffer { */ struct list_head request_list; + /** + * List of objects currently pending a GPU write flush. + * + * All elements on this list will belong to either the + * active_list or flushing_list, last_rendering_seqno can + * be used to differentiate between the two elements. + */ + struct list_head gpu_write_list; + /** * Do we have some not yet emitted requests outstanding? */ -- cgit v1.2.3