summaryrefslogtreecommitdiffstats
path: root/drivers/dma-buf
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2021-11-09 11:08:18 +0100
committerChristian König <christian.koenig@amd.com>2022-04-07 12:53:53 +0200
commit73511edf8b196e6f1ccda0fdf294ff57aa2dc9db (patch)
treeededfa9aaa8241cdec2f33f26150baad470feb13 /drivers/dma-buf
parentdma-buf: add enum dma_resv_usage v4 (diff)
downloadlinux-73511edf8b196e6f1ccda0fdf294ff57aa2dc9db.tar.xz
linux-73511edf8b196e6f1ccda0fdf294ff57aa2dc9db.zip
dma-buf: specify usage while adding fences to dma_resv obj v7
Instead of distingting between shared and exclusive fences specify the fence usage while adding fences. Rework all drivers to use this interface instead and deprecate the old one. v2: some kerneldoc comments suggested by Daniel v3: fix a missing case in radeon v4: rebase on nouveau changes, fix lockdep and temporary disable warning v5: more documentation updates v6: separate internal dma_resv changes from this patch, avoids to disable warning temporary, rebase on upstream changes v7: fix missed case in lima driver, minimize changes to i915_gem_busy_ioctl Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-3-christian.koenig@amd.com
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r--drivers/dma-buf/dma-resv.c48
-rw-r--r--drivers/dma-buf/st-dma-resv.c101
2 files changed, 64 insertions, 85 deletions
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 17237e6ee30c..543dae6566d2 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -234,14 +234,14 @@ EXPORT_SYMBOL(dma_resv_reserve_fences);
#ifdef CONFIG_DEBUG_MUTEXES
/**
- * dma_resv_reset_shared_max - reset shared fences for debugging
+ * dma_resv_reset_max_fences - reset shared fences for debugging
* @obj: the dma_resv object to reset
*
* Reset the number of pre-reserved shared slots to test that drivers do
* correct slot allocation using dma_resv_reserve_fences(). See also
* &dma_resv_list.shared_max.
*/
-void dma_resv_reset_shared_max(struct dma_resv *obj)
+void dma_resv_reset_max_fences(struct dma_resv *obj)
{
struct dma_resv_list *fences = dma_resv_shared_list(obj);
@@ -251,7 +251,7 @@ void dma_resv_reset_shared_max(struct dma_resv *obj)
if (fences)
fences->shared_max = fences->shared_count;
}
-EXPORT_SYMBOL(dma_resv_reset_shared_max);
+EXPORT_SYMBOL(dma_resv_reset_max_fences);
#endif
/**
@@ -264,7 +264,8 @@ EXPORT_SYMBOL(dma_resv_reset_shared_max);
*
* See also &dma_resv.fence for a discussion of the semantics.
*/
-void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
+static void dma_resv_add_shared_fence(struct dma_resv *obj,
+ struct dma_fence *fence)
{
struct dma_resv_list *fobj;
struct dma_fence *old;
@@ -305,13 +306,13 @@ replace:
write_seqcount_end(&obj->seq);
dma_fence_put(old);
}
-EXPORT_SYMBOL(dma_resv_add_shared_fence);
/**
* dma_resv_replace_fences - replace fences in the dma_resv obj
* @obj: the reservation object
* @context: the context of the fences to replace
* @replacement: the new fence to use instead
+ * @usage: how the new fence is used, see enum dma_resv_usage
*
* Replace fences with a specified context with a new fence. Only valid if the
* operation represented by the original fence has no longer access to the
@@ -321,12 +322,16 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
* update fence which makes the resource inaccessible.
*/
void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
- struct dma_fence *replacement)
+ struct dma_fence *replacement,
+ enum dma_resv_usage usage)
{
struct dma_resv_list *list;
struct dma_fence *old;
unsigned int i;
+ /* Only readers supported for now */
+ WARN_ON(usage != DMA_RESV_USAGE_READ);
+
dma_resv_assert_held(obj);
write_seqcount_begin(&obj->seq);
@@ -360,7 +365,8 @@ EXPORT_SYMBOL(dma_resv_replace_fences);
* Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
* See also &dma_resv.fence_excl for a discussion of the semantics.
*/
-void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
+static void dma_resv_add_excl_fence(struct dma_resv *obj,
+ struct dma_fence *fence)
{
struct dma_fence *old_fence = dma_resv_excl_fence(obj);
@@ -375,7 +381,27 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
dma_fence_put(old_fence);
}
-EXPORT_SYMBOL(dma_resv_add_excl_fence);
+
+/**
+ * dma_resv_add_fence - Add a fence to the dma_resv obj
+ * @obj: the reservation object
+ * @fence: the fence to add
+ * @usage: how the fence is used, see enum dma_resv_usage
+ *
+ * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and
+ * dma_resv_reserve_fences() has been called.
+ *
+ * See also &dma_resv.fence for a discussion of the semantics.
+ */
+void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+ enum dma_resv_usage usage)
+{
+ if (usage == DMA_RESV_USAGE_WRITE)
+ dma_resv_add_excl_fence(obj, fence);
+ else
+ dma_resv_add_shared_fence(obj, fence);
+}
+EXPORT_SYMBOL(dma_resv_add_fence);
/* Restart the iterator by initializing all the necessary fields, but not the
* relation to the dma_resv object. */
@@ -574,7 +600,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
}
dma_fence_get(f);
- if (dma_resv_iter_is_exclusive(&cursor))
+ if (dma_resv_iter_usage(&cursor) == DMA_RESV_USAGE_WRITE)
excl = f;
else
RCU_INIT_POINTER(list->shared[list->shared_count++], f);
@@ -771,13 +797,13 @@ EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
*/
void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
{
+ static const char *usage[] = { "write", "read" };
struct dma_resv_iter cursor;
struct dma_fence *fence;
dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
seq_printf(seq, "\t%s fence:",
- dma_resv_iter_is_exclusive(&cursor) ?
- "Exclusive" : "Shared");
+ usage[dma_resv_iter_usage(&cursor)]);
dma_fence_describe(fence, seq);
}
}
diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
index d097981061b1..d0f7c2bfd4f0 100644
--- a/drivers/dma-buf/st-dma-resv.c
+++ b/drivers/dma-buf/st-dma-resv.c
@@ -58,8 +58,9 @@ static int sanitycheck(void *arg)
return r;
}
-static int test_signaling(void *arg, enum dma_resv_usage usage)
+static int test_signaling(void *arg)
{
+ enum dma_resv_usage usage = (unsigned long)arg;
struct dma_resv resv;
struct dma_fence *f;
int r;
@@ -81,11 +82,7 @@ static int test_signaling(void *arg, enum dma_resv_usage usage)
goto err_unlock;
}
- if (usage >= DMA_RESV_USAGE_READ)
- dma_resv_add_shared_fence(&resv, f);
- else
- dma_resv_add_excl_fence(&resv, f);
-
+ dma_resv_add_fence(&resv, f, usage);
if (dma_resv_test_signaled(&resv, usage)) {
pr_err("Resv unexpectedly signaled\n");
r = -EINVAL;
@@ -105,18 +102,9 @@ err_free:
return r;
}
-static int test_excl_signaling(void *arg)
-{
- return test_signaling(arg, DMA_RESV_USAGE_WRITE);
-}
-
-static int test_shared_signaling(void *arg)
-{
- return test_signaling(arg, DMA_RESV_USAGE_READ);
-}
-
-static int test_for_each(void *arg, enum dma_resv_usage usage)
+static int test_for_each(void *arg)
{
+ enum dma_resv_usage usage = (unsigned long)arg;
struct dma_resv_iter cursor;
struct dma_fence *f, *fence;
struct dma_resv resv;
@@ -139,10 +127,7 @@ static int test_for_each(void *arg, enum dma_resv_usage usage)
goto err_unlock;
}
- if (usage >= DMA_RESV_USAGE_READ)
- dma_resv_add_shared_fence(&resv, f);
- else
- dma_resv_add_excl_fence(&resv, f);
+ dma_resv_add_fence(&resv, f, usage);
r = -ENOENT;
dma_resv_for_each_fence(&cursor, &resv, usage, fence) {
@@ -156,8 +141,7 @@ static int test_for_each(void *arg, enum dma_resv_usage usage)
r = -EINVAL;
goto err_unlock;
}
- if (dma_resv_iter_is_exclusive(&cursor) !=
- (usage >= DMA_RESV_USAGE_READ)) {
+ if (dma_resv_iter_usage(&cursor) != usage) {
pr_err("Unexpected fence usage\n");
r = -EINVAL;
goto err_unlock;
@@ -177,18 +161,9 @@ err_free:
return r;
}
-static int test_excl_for_each(void *arg)
-{
- return test_for_each(arg, DMA_RESV_USAGE_WRITE);
-}
-
-static int test_shared_for_each(void *arg)
-{
- return test_for_each(arg, DMA_RESV_USAGE_READ);
-}
-
-static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
+static int test_for_each_unlocked(void *arg)
{
+ enum dma_resv_usage usage = (unsigned long)arg;
struct dma_resv_iter cursor;
struct dma_fence *f, *fence;
struct dma_resv resv;
@@ -212,10 +187,7 @@ static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
goto err_free;
}
- if (usage >= DMA_RESV_USAGE_READ)
- dma_resv_add_shared_fence(&resv, f);
- else
- dma_resv_add_excl_fence(&resv, f);
+ dma_resv_add_fence(&resv, f, usage);
dma_resv_unlock(&resv);
r = -ENOENT;
@@ -235,8 +207,7 @@ static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
r = -EINVAL;
goto err_iter_end;
}
- if (dma_resv_iter_is_exclusive(&cursor) !=
- (usage >= DMA_RESV_USAGE_READ)) {
+ if (dma_resv_iter_usage(&cursor) != usage) {
pr_err("Unexpected fence usage\n");
r = -EINVAL;
goto err_iter_end;
@@ -262,18 +233,9 @@ err_free:
return r;
}
-static int test_excl_for_each_unlocked(void *arg)
-{
- return test_for_each_unlocked(arg, DMA_RESV_USAGE_WRITE);
-}
-
-static int test_shared_for_each_unlocked(void *arg)
-{
- return test_for_each_unlocked(arg, DMA_RESV_USAGE_READ);
-}
-
-static int test_get_fences(void *arg, enum dma_resv_usage usage)
+static int test_get_fences(void *arg)
{
+ enum dma_resv_usage usage = (unsigned long)arg;
struct dma_fence *f, **fences = NULL;
struct dma_resv resv;
int r, i;
@@ -296,10 +258,7 @@ static int test_get_fences(void *arg, enum dma_resv_usage usage)
goto err_resv;
}
- if (usage >= DMA_RESV_USAGE_READ)
- dma_resv_add_shared_fence(&resv, f);
- else
- dma_resv_add_excl_fence(&resv, f);
+ dma_resv_add_fence(&resv, f, usage);
dma_resv_unlock(&resv);
r = dma_resv_get_fences(&resv, usage, &i, &fences);
@@ -324,30 +283,24 @@ err_resv:
return r;
}
-static int test_excl_get_fences(void *arg)
-{
- return test_get_fences(arg, DMA_RESV_USAGE_WRITE);
-}
-
-static int test_shared_get_fences(void *arg)
-{
- return test_get_fences(arg, DMA_RESV_USAGE_READ);
-}
-
int dma_resv(void)
{
static const struct subtest tests[] = {
SUBTEST(sanitycheck),
- SUBTEST(test_excl_signaling),
- SUBTEST(test_shared_signaling),
- SUBTEST(test_excl_for_each),
- SUBTEST(test_shared_for_each),
- SUBTEST(test_excl_for_each_unlocked),
- SUBTEST(test_shared_for_each_unlocked),
- SUBTEST(test_excl_get_fences),
- SUBTEST(test_shared_get_fences),
+ SUBTEST(test_signaling),
+ SUBTEST(test_for_each),
+ SUBTEST(test_for_each_unlocked),
+ SUBTEST(test_get_fences),
};
+ enum dma_resv_usage usage;
+ int r;
spin_lock_init(&fence_lock);
- return subtests(tests, NULL);
+ for (usage = DMA_RESV_USAGE_WRITE; usage <= DMA_RESV_USAGE_READ;
+ ++usage) {
+ r = subtests(tests, (void *)(unsigned long)usage);
+ if (r)
+ return r;
+ }
+ return 0;
}