diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2011-01-07 18:09:48 +0100 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2011-01-11 21:44:50 +0100 |
commit | 882417851a0f2e09e110038a13e88e9b5a100800 (patch) | |
tree | 6c96e700acb948aabbc7857f823bb8ad114b3b44 /drivers/gpu | |
parent | drm/i915: detect & report PCH display error interrupts (diff) | |
download | linux-882417851a0f2e09e110038a13e88e9b5a100800.tar.xz linux-882417851a0f2e09e110038a13e88e9b5a100800.zip |
drm/i915: Propagate error from flushing the ring
... in order to avoid a BUG() and potential unbounded waits.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 102 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 28 |
3 files changed, 90 insertions, 48 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 455260067ff7..3e78314514a2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1086,10 +1086,10 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); int i915_gem_init_object(struct drm_gem_object *obj); -void i915_gem_flush_ring(struct drm_device *dev, - struct intel_ring_buffer *ring, - uint32_t invalidate_domains, - uint32_t flush_domains); +int __must_check i915_gem_flush_ring(struct drm_device *dev, + struct intel_ring_buffer *ring, + uint32_t invalidate_domains, + uint32_t flush_domains); struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size); void i915_gem_free_object(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 07b62449b9e1..2873d068eb1f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -35,18 +35,18 @@ #include <linux/swap.h> #include <linux/pci.h> -static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); +static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); -static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, - bool write); -static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, - uint64_t offset, - uint64_t size); +static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, + bool write); +static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, + uint64_t offset, + uint64_t size); static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); -static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, - unsigned alignment, - bool map_and_fenceable); +static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, + unsigned alignment, + bool map_and_fenceable); static void i915_gem_clear_fence_reg(struct drm_device *dev, struct drm_i915_fence_reg *reg); static int i915_gem_phys_pwrite(struct drm_device *dev, @@ -2142,25 +2142,37 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) return ret; } -void +int i915_gem_flush_ring(struct drm_device *dev, struct intel_ring_buffer *ring, uint32_t invalidate_domains, uint32_t flush_domains) { - if (ring->flush(ring, invalidate_domains, flush_domains) == 0) - i915_gem_process_flushing_list(dev, flush_domains, ring); + int ret; + + ret = ring->flush(ring, invalidate_domains, flush_domains); + if (ret) + return ret; + + i915_gem_process_flushing_list(dev, flush_domains, ring); + return 0; } static int i915_ring_idle(struct drm_device *dev, struct intel_ring_buffer *ring) { + int ret; + if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) return 0; - if (!list_empty(&ring->gpu_write_list)) - i915_gem_flush_ring(dev, ring, + if (!list_empty(&ring->gpu_write_list)) { + ret = i915_gem_flush_ring(dev, ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + if (ret) + return ret; + } + return i915_wait_request(dev, i915_gem_next_request_seqno(dev, ring), ring); @@ -2370,10 +2382,13 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, int ret; if (obj->fenced_gpu_access) { - if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) - i915_gem_flush_ring(obj->base.dev, - obj->last_fenced_ring, - 0, obj->base.write_domain); + if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { + ret = i915_gem_flush_ring(obj->base.dev, + obj->last_fenced_ring, + 0, obj->base.write_domain); + if (ret) + return ret; + } obj->fenced_gpu_access = false; } @@ -2529,9 +2544,12 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, return ret; } else if (obj->tiling_changed) { if (obj->fenced_gpu_access) { - if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) - i915_gem_flush_ring(obj->base.dev, obj->ring, - 0, obj->base.write_domain); + if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { + ret = i915_gem_flush_ring(obj->base.dev, obj->ring, + 0, obj->base.write_domain); + if (ret) + return ret; + } obj->fenced_gpu_access = false; } @@ -2817,17 +2835,16 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) } /** Flushes any GPU write domain for the object if it's dirty. */ -static void +static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) - return; + return 0; /* Queue the GPU write cache flushing we need. */ - i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); - BUG_ON(obj->base.write_domain); + return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); } /** Flushes the GTT write domain for the object if it's dirty. */ @@ -2894,7 +2911,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) if (obj->gtt_space == NULL) return -EINVAL; - i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj); + if (ret) + return ret; + if (obj->pending_gpu_write || write) { ret = i915_gem_object_wait_rendering(obj, true); if (ret) @@ -2939,7 +2959,10 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, if (obj->gtt_space == NULL) return -EINVAL; - i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj); + if (ret) + return ret; + /* Currently, we are always called from an non-interruptible context. */ if (pipelined != obj->ring) { @@ -2964,12 +2987,17 @@ int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, bool interruptible) { + int ret; + if (!obj->active) return 0; - if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) - i915_gem_flush_ring(obj->base.dev, obj->ring, - 0, obj->base.write_domain); + if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { + ret = i915_gem_flush_ring(obj->base.dev, obj->ring, + 0, obj->base.write_domain); + if (ret) + return ret; + } return i915_gem_object_wait_rendering(obj, interruptible); } @@ -2986,7 +3014,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) uint32_t old_write_domain, old_read_domains; int ret; - i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj); + if (ret) + return ret; + ret = i915_gem_object_wait_rendering(obj, true); if (ret) return ret; @@ -3081,7 +3112,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, if (offset == 0 && size == obj->base.size) return i915_gem_object_set_to_cpu_domain(obj, 0); - i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj); + if (ret) + return ret; + ret = i915_gem_object_wait_rendering(obj, true); if (ret) return ret; @@ -3374,8 +3408,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * flush earlier is beneficial. */ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { - i915_gem_flush_ring(dev, obj->ring, - 0, obj->base.write_domain); + ret = i915_gem_flush_ring(dev, obj->ring, + 0, obj->base.write_domain); } else if (obj->ring->outstanding_lazy_request == obj->last_rendering_seqno) { struct drm_i915_gem_request *request; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 1b78b66dd77e..97d5fbd8ea13 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -713,14 +713,14 @@ err: return ret; } -static void +static int i915_gem_execbuffer_flush(struct drm_device *dev, uint32_t invalidate_domains, uint32_t flush_domains, uint32_t flush_rings) { drm_i915_private_t *dev_priv = dev->dev_private; - int i; + int i, ret; if (flush_domains & I915_GEM_DOMAIN_CPU) intel_gtt_chipset_flush(); @@ -730,11 +730,17 @@ i915_gem_execbuffer_flush(struct drm_device *dev, if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { for (i = 0; i < I915_NUM_RINGS; i++) - if (flush_rings & (1 << i)) - i915_gem_flush_ring(dev, &dev_priv->ring[i], - invalidate_domains, - flush_domains); + if (flush_rings & (1 << i)) { + ret = i915_gem_flush_ring(dev, + &dev_priv->ring[i], + invalidate_domains, + flush_domains); + if (ret) + return ret; + } } + + return 0; } static int @@ -798,10 +804,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, cd.invalidate_domains, cd.flush_domains); #endif - i915_gem_execbuffer_flush(ring->dev, - cd.invalidate_domains, - cd.flush_domains, - cd.flush_rings); + ret = i915_gem_execbuffer_flush(ring->dev, + cd.invalidate_domains, + cd.flush_domains, + cd.flush_rings); + if (ret) + return ret; } list_for_each_entry(obj, objects, exec_list) { |