diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2019-03-04 19:37:40 +0100 |
---|---|---|
committer | Thomas Hellstrom <thellstrom@vmware.com> | 2019-08-15 08:40:05 +0200 |
commit | a0a63940b0c9ebc6d3156bbfe0c2cf7560b580cd (patch) | |
tree | af18fb0b82892b0f63d8f5ebdb7b575a2b52ec79 /drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | |
parent | drm/vmwgfx: drop reminaing users of drmP.h (diff) | |
download | linux-a0a63940b0c9ebc6d3156bbfe0c2cf7560b580cd.tar.xz linux-a0a63940b0c9ebc6d3156bbfe0c2cf7560b580cd.zip |
drm/vmwgfx: Assign eviction priorities to resources
TTM provides a means to assign eviction priorities to buffer object. This
means that all buffer objects with a lower priority will be evicted first
on memory pressure.
Use this to make sure surfaces and in particular non-dirty surfaces are
evicted first. Evicting in particular shaders, cotables and contexts imply
a significant performance hit on vmwgfx, so make sure these resources are
evicted last.
Some buffer objects are sub-allocated in user-space which means we can have
many resources attached to a single buffer object or resource. In that case
the buffer object is given the highest priority of the attached resources.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Deepak Rawat <drawat@vmware.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index 7984f172ec4a..4e8df76681c4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -116,6 +116,8 @@ static const struct vmw_res_func vmw_cotable_func = { .res_type = vmw_res_cotable, .needs_backup = true, .may_evict = true, + .prio = 3, + .dirty_prio = 3, .type_name = "context guest backed object tables", .backup_placement = &vmw_mob_placement, .create = vmw_cotable_create, @@ -307,7 +309,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res, struct ttm_buffer_object *bo = val_buf->bo; struct vmw_fence_obj *fence; - if (list_empty(&res->mob_head)) + if (!vmw_resource_mob_attached(res)) return 0; WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); @@ -453,6 +455,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) goto out_wait; } + vmw_resource_mob_detach(res); res->backup = buf; res->backup_size = new_size; vcotbl->size_read_back = cur_size_read_back; @@ -467,12 +470,12 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) res->backup = old_buf; res->backup_size = old_size; vcotbl->size_read_back = old_size_read_back; + vmw_resource_mob_attach(res); goto out_wait; } + vmw_resource_mob_attach(res); /* Let go of the old mob. */ - list_del(&res->mob_head); - list_add_tail(&res->mob_head, &buf->res_list); vmw_bo_unreference(&old_buf); res->id = vcotbl->type; @@ -496,7 +499,7 @@ out_wait: * is called before bind() in the validation sequence is instead used for two * things. * 1) Unscrub the cotable if it is scrubbed and still attached to a backup - * buffer, that is, if @res->mob_head is non-empty. + * buffer. * 2) Resize the cotable if needed. */ static int vmw_cotable_create(struct vmw_resource *res) @@ -512,7 +515,7 @@ static int vmw_cotable_create(struct vmw_resource *res) new_size *= 2; if (likely(new_size <= res->backup_size)) { - if (vcotbl->scrubbed && !list_empty(&res->mob_head)) { + if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) { ret = vmw_cotable_unscrub(res); if (ret) return ret; |