summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2015-08-12 07:31:17 +0200
committerDave Airlie <airlied@redhat.com>2015-08-14 01:49:19 +0200
commit3e04e2fe6d87807d27521ad6ebb9e7919d628f25 (patch)
tree1fe36876290a88c279d1a91ffe46dd7814003bec /drivers/gpu
parentMerge branch 'exynos-drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/... (diff)
downloadlinux-3e04e2fe6d87807d27521ad6ebb9e7919d628f25.tar.xz
linux-3e04e2fe6d87807d27521ad6ebb9e7919d628f25.zip
drm/vmwgfx: Fix execbuf locking issues
This addresses two issues that cause problems with viewperf maya-03 in situation with memory pressure. The first issue causes attempts to unreserve buffers if batched reservation fails due to, for example, a signal pending. While previously the ttm_eu api was resistant against this type of error, it is no longer and the lockdep code will complain about attempting to unreserve buffers that are not reserved. The issue is resolved by avoid calling ttm_eu_backoff_reservation in the buffer reserve error path. The second issue is that the binding_mutex may be held when user-space fence objects are created and hence during memory reclaims. This may cause recursive attempts to grab the binding mutex. The issue is resolved by not holding the binding mutex across fence creation and submission. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com> Cc: <stable@vger.kernel.org> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 654c8daeb5ab..97ad3bcb99a7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
true, NULL);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = vmw_validate_buffers(dev_priv, sw_context);
if (unlikely(ret != 0))
@@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_fifo_commit(dev_priv, command_size);
+ mutex_unlock(&dev_priv->binding_mutex);
vmw_query_bo_switch_commit(dev_priv, sw_context);
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
DRM_ERROR("Fence submission error. Syncing.\n");
vmw_resource_list_unreserve(&sw_context->resource_list, false);
- mutex_unlock(&dev_priv->binding_mutex);
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);