diff options
author | Christian König <christian.koenig@amd.com> | 2017-10-16 16:50:32 +0200 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-12-04 22:41:33 +0100 |
commit | c1c7ce8f5687bb01b2eb0db3c19cb375267bb16d (patch) | |
tree | 1f084782b78db4224fc55dacf8246816b36508ab /drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | |
parent | drm/amdgpu: nuke amdgpu_ttm_is_bound() v2 (diff) | |
download | linux-c1c7ce8f5687bb01b2eb0db3c19cb375267bb16d.tar.xz linux-c1c7ce8f5687bb01b2eb0db3c19cb375267bb16d.zip |
drm/amdgpu: move GART recovery into GTT manager v2
The GTT manager handles the GART address space anyway, so it is
completely pointless to keep the same information around twice.
v2: rebased
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 53 |
1 files changed, 39 insertions, 14 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index f7669dc6909b..e14ab34d8262 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -31,6 +31,11 @@ struct amdgpu_gtt_mgr { atomic64_t available; }; +struct amdgpu_gtt_node { + struct drm_mm_node node; + struct ttm_buffer_object *tbo; +}; + /** * amdgpu_gtt_mgr_init - init GTT manager and DRM MM * @@ -87,9 +92,9 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) */ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem) { - struct drm_mm_node *node = mem->mm_node; + struct amdgpu_gtt_node *node = mem->mm_node; - return (node->start != AMDGPU_BO_INVALID_OFFSET); + return (node->node.start != AMDGPU_BO_INVALID_OFFSET); } /** @@ -109,7 +114,7 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, { struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_gtt_mgr *mgr = man->priv; - struct drm_mm_node *node = mem->mm_node; + struct amdgpu_gtt_node *node = mem->mm_node; enum drm_mm_insert_mode mode; unsigned long fpfn, lpfn; int r; @@ -132,13 +137,13 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, mode = DRM_MM_INSERT_HIGH; spin_lock(&mgr->lock); - r = drm_mm_insert_node_in_range(&mgr->mm, node, - mem->num_pages, mem->page_alignment, 0, - fpfn, lpfn, mode); + r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages, + mem->page_alignment, 0, fpfn, lpfn, + mode); spin_unlock(&mgr->lock); if (!r) - mem->start = node->start; + mem->start = node->node.start; return r; } @@ -159,7 +164,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { struct amdgpu_gtt_mgr *mgr = man->priv; - struct drm_mm_node *node; + struct amdgpu_gtt_node *node; int r; spin_lock(&mgr->lock); @@ -177,8 +182,9 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, goto err_out; } - node->start = AMDGPU_BO_INVALID_OFFSET; - node->size = mem->num_pages; + node->node.start = AMDGPU_BO_INVALID_OFFSET; + node->node.size = mem->num_pages; + node->tbo = tbo; mem->mm_node = node; if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) { @@ -190,7 +196,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, goto err_out; } } else { - mem->start = node->start; + mem->start = node->node.start; } return 0; @@ -214,14 +220,14 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { struct amdgpu_gtt_mgr *mgr = man->priv; - struct drm_mm_node *node = mem->mm_node; + struct amdgpu_gtt_node *node = mem->mm_node; if (!node) return; spin_lock(&mgr->lock); - if (node->start != AMDGPU_BO_INVALID_OFFSET) - drm_mm_remove_node(node); + if (node->node.start != AMDGPU_BO_INVALID_OFFSET) + drm_mm_remove_node(&node->node); spin_unlock(&mgr->lock); atomic64_add(mem->num_pages, &mgr->available); @@ -244,6 +250,25 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man) return (result > 0 ? result : 0) * PAGE_SIZE; } +int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man) +{ + struct amdgpu_gtt_mgr *mgr = man->priv; + struct amdgpu_gtt_node *node; + struct drm_mm_node *mm_node; + int r = 0; + + spin_lock(&mgr->lock); + drm_mm_for_each_node(mm_node, &mgr->mm) { + node = container_of(mm_node, struct amdgpu_gtt_node, node); + r = amdgpu_ttm_recover_gart(node->tbo); + if (r) + break; + } + spin_unlock(&mgr->lock); + + return r; +} + /** * amdgpu_gtt_mgr_debug - dump VRAM table * |