summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2021-06-10 03:03:50 +0200
committerDave Airlie <airlied@redhat.com>2021-06-10 03:28:09 +0200
commit09b020bb05a514f560979438fa40406bc63d5353 (patch)
tree2568980e133e47bce3509cab92f7adac99b51cc4 /drivers/gpu/drm/radeon
parentMerge tag 'amd-drm-next-5.14-2021-06-02' of https://gitlab.freedesktop.org/ag... (diff)
parentdrm/vmwgfx: use ttm_bo_move_null() when there is nothing to move (diff)
downloadlinux-09b020bb05a514f560979438fa40406bc63d5353.tar.xz
linux-09b020bb05a514f560979438fa40406bc63d5353.zip
Merge tag 'drm-misc-next-2021-06-09' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.14: UAPI Changes: * drm/panfrost: Export AFBC_FEATURES register to userspace Cross-subsystem Changes: * dma-buf: Fix debug printing; Rename dma_resv_*() functions + changes in callers; Cleanups Core Changes: * Add prefetching memcpy for WC * Avoid circular dependency on CONFIG_FB * Cleanups * Documentation fixes throughout DRM * ttm: Make struct ttm_resource the base of all managers + changes in all users of TTM; Add a generic memcpy for page-based iomem; Remove use of VM_MIXEDMAP; Cleanups Driver Changes: * drm/bridge: Add TI SN65DSI83 and SN65DSI84 + DT bindings * drm/hyperv: Add DRM driver for HyperV graphics output * drm/msm: Fix module dependencies * drm/panel: KD53T133: Support rotation * drm/pl111: Fix module dependencies * drm/qxl: Fixes * drm/stm: Cleanups * drm/sun4i: Be explicit about format modifiers * drm/vc4: Use struct gpio_desc; Cleanups * drm/vgem: Cleanups * drm/vmwgfx: Use ttm_bo_move_null() if there's nothing to copy * fbdev/mach64: Cleanups * fbdev/mb862xx: Use DEVICE_ATTR_RO Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/YMBw3DF2b9udByfT@linux-uq9g
Diffstat (limited to 'drivers/gpu/drm/radeon')
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
11 files changed, 38 insertions, 37 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 80a3bee933d6..9ed2b2700e0a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -400,8 +400,8 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a,
struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
/* Sort A before B if A is smaller. */
- return (int)la->robj->tbo.mem.num_pages -
- (int)lb->robj->tbo.mem.num_pages;
+ return (int)la->robj->tbo.resource->num_pages -
+ (int)lb->robj->tbo.resource->num_pages;
}
/**
@@ -516,7 +516,7 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
}
r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
- &rdev->ring_tmp_bo.bo->tbo.mem);
+ rdev->ring_tmp_bo.bo->tbo.resource);
if (r)
return r;
@@ -530,7 +530,7 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
return -EINVAL;
}
- r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
+ r = radeon_vm_bo_update(rdev, bo_va, bo->tbo.resource);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 652af7a134bd..406681317419 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv));
+ work->fence = dma_fence_get(dma_resv_excl_fence(new_rbo->tbo.base.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ff8849827d61..458f92a70887 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -161,7 +161,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+ r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
if (!r)
r = -EBUSY;
@@ -523,13 +523,13 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
+ r = dma_resv_test_signaled(robj->tbo.base.resv, true);
if (r == 0)
r = -EBUSY;
else
r = 0;
- cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
args->domain = radeon_mem_type_to_domain(cur_placement);
drm_gem_object_put(gobj);
return r;
@@ -552,14 +552,14 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+ ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
if (ret == 0)
r = -EBUSY;
else if (ret < 0)
r = ret;
/* Flush HDP cache via MMIO if necessary */
- cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
robj->rdev->asic->mmio_hdp_flush(rdev);
@@ -643,7 +643,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
goto error_free;
list_for_each_entry(entry, &list, head) {
- domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
+ domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
if (domain == RADEON_GEM_DOMAIN_CPU)
@@ -656,7 +656,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
goto error_unlock;
if (bo_va->it.start)
- r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
+ r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
error_unlock:
mutex_unlock(&bo_va->vm->mutex);
@@ -860,7 +860,7 @@ static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
unsigned domain;
const char *placement;
- domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
+ domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
switch (domain) {
case RADEON_GEM_DOMAIN_VRAM:
placement = "VRAM";
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index e37c9a57a7c3..9fa88549c89e 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -66,8 +66,8 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
return true;
}
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index cee11c55fd15..bfaaa3c969a3 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -76,7 +76,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
bo = container_of(tbo, struct radeon_bo, tbo);
- radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
+ radeon_update_memory_usage(bo, bo->tbo.resource->mem_type, -1);
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
@@ -250,7 +250,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
}
return 0;
}
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
if (r) {
return r;
}
@@ -359,7 +359,7 @@ void radeon_bo_unpin(struct radeon_bo *bo)
{
ttm_bo_unpin(&bo->tbo);
if (!bo->tbo.pin_count) {
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
bo->rdev->vram_pin_size -= radeon_bo_size(bo);
else
bo->rdev->gart_pin_size -= radeon_bo_size(bo);
@@ -506,7 +506,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u32 domain = lobj->preferred_domains;
u32 allowed = lobj->allowed_domains;
u32 current_domain =
- radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
+ radeon_mem_type_to_domain(bo->tbo.resource->mem_type);
/* Check if this buffer will be moved and don't move it
* if we have moved too many buffers for this IB already.
@@ -605,7 +605,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
out:
radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
- bo->tbo.mem.start << PAGE_SHIFT,
+ bo->tbo.resource->start << PAGE_SHIFT,
bo->tbo.base.size);
return 0;
}
@@ -711,7 +711,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
return 0;
}
- if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
+ if (bo->tbo.resource->mem_type != TTM_PL_VRAM) {
if (!has_moved)
return 0;
@@ -743,7 +743,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
if (!new_mem)
return;
- radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
+ radeon_update_memory_usage(rbo, bo->resource->mem_type, -1);
radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
}
@@ -760,11 +760,11 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 0);
rdev = rbo->rdev;
- if (bo->mem.mem_type != TTM_PL_VRAM)
+ if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
- size = bo->mem.num_pages << PAGE_SHIFT;
- offset = bo->mem.start << PAGE_SHIFT;
+ size = bo->resource->num_pages << PAGE_SHIFT;
+ offset = bo->resource->start << PAGE_SHIFT;
if ((offset + size) <= rdev->mc.visible_vram_size)
return 0;
@@ -786,7 +786,7 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
r = ttm_bo_validate(bo, &rbo->placement, &ctx);
} else if (likely(!r)) {
- offset = bo->mem.start << PAGE_SHIFT;
+ offset = bo->resource->start << PAGE_SHIFT;
/* this should never happen */
if ((offset + size) > rdev->mc.visible_vram_size)
return VM_FAULT_SIGBUS;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index fd4116bdde0f..1739c6a142cd 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -95,7 +95,7 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
rdev = radeon_get_rdev(bo->tbo.bdev);
- switch (bo->tbo.mem.mem_type) {
+ switch (bo->tbo.resource->mem_type) {
case TTM_PL_TT:
start = rdev->mc.gtt_start;
break;
@@ -104,7 +104,7 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
break;
}
- return (bo->tbo.mem.start << PAGE_SHIFT) + start;
+ return (bo->tbo.resource->start << PAGE_SHIFT) + start;
}
static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 3861c0b98fcf..c67b6ddb29a4 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -154,7 +154,7 @@ static void radeon_unmap_vram_bos(struct radeon_device *rdev)
return;
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
ttm_bo_unmap_virtual(&bo->tbo);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 5d3302945076..9257b60144c4 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -98,14 +98,14 @@ int radeon_sync_resv(struct radeon_device *rdev,
int r = 0;
/* always sync to the exclusive fence */
- f = dma_resv_get_excl(resv);
+ f = dma_resv_excl_fence(resv);
fence = f ? to_radeon_fence(f) : NULL;
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
else if (f)
r = dma_fence_wait(f, true);
- flist = dma_resv_get_list(resv);
+ flist = dma_resv_shared_list(resv);
if (shared || !flist || r)
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 1729cb9a95c5..c9fed5f2b870 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
TP_fast_assign(
__entry->bo = bo;
- __entry->pages = bo->tbo.mem.num_pages;
+ __entry->pages = bo->tbo.resource->num_pages;
),
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index a71d94f7067b..ad2a5a791bba 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -45,6 +45,7 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
#include "radeon_reg.h"
#include "radeon.h"
@@ -98,12 +99,12 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
return;
}
rbo = container_of(bo, struct radeon_bo, tbo);
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
- bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
+ bo->resource->start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
int i;
@@ -195,9 +196,9 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *new_mem,
struct ttm_place *hop)
{
+ struct ttm_resource *old_mem = bo->resource;
struct radeon_device *rdev;
struct radeon_bo *rbo;
- struct ttm_resource *old_mem = &bo->mem;
int r;
if (new_mem->mem_type == TTM_PL_TT) {
@@ -229,7 +230,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
if (old_mem->mem_type == TTM_PL_TT &&
new_mem->mem_type == TTM_PL_SYSTEM) {
radeon_ttm_tt_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
goto out;
}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index dfa9fdbe98da..1f5b1a5c0a09 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
- f = dma_resv_get_excl(bo->tbo.base.resv);
+ f = dma_resv_excl_fence(bo->tbo.base.resv);
if (f) {
r = radeon_fence_wait((struct radeon_fence *)f, false);
if (r) {