summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/scheduler
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-10-25 14:00:45 +0200
committerDaniel Vetter <daniel.vetter@ffwll.ch>2016-10-25 14:40:39 +0200
commitf54d1867005c3323f5d8ad83eed823e84226c429 (patch)
tree026c3f57bc546d3a0205389d0f8e0d02ce8a76ac /drivers/gpu/drm/amd/scheduler
parentMerge remote-tracking branch 'airlied/drm-next' into topic/drm-misc (diff)
downloadlinux-f54d1867005c3323f5d8ad83eed823e84226c429.tar.xz
linux-f54d1867005c3323f5d8ad83eed823e84226c429.zip
dma-buf: Rename struct fence to dma_fence
I plan to usurp the short name of struct fence for a core kernel struct, and so I need to rename the specialised fence/timeline for DMA operations to make room. A consensus was reached in https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html that making clear this fence applies to DMA operations was a good thing. Since then the patch has grown a bit as usage increases, so hopefully it remains a good thing! (v2...: rebase, rerun spatch) v3: Compile on msm, spotted a manual fixup that I broke. v4: Try again for msm, sorry Daniel coccinelle script: @@ @@ - struct fence + struct dma_fence @@ @@ - struct fence_ops + struct dma_fence_ops @@ @@ - struct fence_cb + struct dma_fence_cb @@ @@ - struct fence_array + struct dma_fence_array @@ @@ - enum fence_flag_bits + enum dma_fence_flag_bits @@ @@ ( - fence_init + dma_fence_init | - fence_release + dma_fence_release | - fence_free + dma_fence_free | - fence_get + dma_fence_get | - fence_get_rcu + dma_fence_get_rcu | - fence_put + dma_fence_put | - fence_signal + dma_fence_signal | - fence_signal_locked + dma_fence_signal_locked | - fence_default_wait + dma_fence_default_wait | - fence_add_callback + dma_fence_add_callback | - fence_remove_callback + dma_fence_remove_callback | - fence_enable_sw_signaling + dma_fence_enable_sw_signaling | - fence_is_signaled_locked + dma_fence_is_signaled_locked | - fence_is_signaled + dma_fence_is_signaled | - fence_is_later + dma_fence_is_later | - fence_later + dma_fence_later | - fence_wait_timeout + dma_fence_wait_timeout | - fence_wait_any_timeout + dma_fence_wait_any_timeout | - fence_wait + dma_fence_wait | - fence_context_alloc + dma_fence_context_alloc | - fence_array_create + dma_fence_array_create | - to_fence_array + to_dma_fence_array | - fence_is_array + dma_fence_is_array | - trace_fence_emit + trace_dma_fence_emit | - FENCE_TRACE + DMA_FENCE_TRACE | - FENCE_WARN + DMA_FENCE_WARN | - FENCE_ERR + DMA_FENCE_ERR ) ( ... ) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler')
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c67
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h26
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c48
4 files changed, 75 insertions, 70 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index b961a1c6caf3..dbd4fd3a810b 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -17,7 +17,7 @@ TRACE_EVENT(amd_sched_job,
TP_STRUCT__entry(
__field(struct amd_sched_entity *, entity)
__field(struct amd_sched_job *, sched_job)
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
__field(const char *, name)
__field(u32, job_count)
__field(int, hw_job_count)
@@ -42,7 +42,7 @@ TRACE_EVENT(amd_sched_process_job,
TP_PROTO(struct amd_sched_fence *fence),
TP_ARGS(fence),
TP_STRUCT__entry(
- __field(struct fence *, fence)
+ __field(struct dma_fence *, fence)
),
TP_fast_assign(
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 963a24d46a93..5364e6a7ec8f 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -32,7 +32,7 @@
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
-static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
+static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
struct kmem_cache *sched_fence_slab;
atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
@@ -141,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
return r;
atomic_set(&entity->fence_seq, 0);
- entity->fence_context = fence_context_alloc(2);
+ entity->fence_context = dma_fence_context_alloc(2);
return 0;
}
@@ -221,32 +221,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
kfifo_free(&entity->job_queue);
}
-static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
+static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_entity *entity =
container_of(cb, struct amd_sched_entity, cb);
entity->dependency = NULL;
- fence_put(f);
+ dma_fence_put(f);
amd_sched_wakeup(entity->sched);
}
-static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb)
+static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_entity *entity =
container_of(cb, struct amd_sched_entity, cb);
entity->dependency = NULL;
- fence_put(f);
+ dma_fence_put(f);
}
static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
{
struct amd_gpu_scheduler *sched = entity->sched;
- struct fence * fence = entity->dependency;
+ struct dma_fence * fence = entity->dependency;
struct amd_sched_fence *s_fence;
if (fence->context == entity->fence_context) {
/* We can ignore fences from ourself */
- fence_put(entity->dependency);
+ dma_fence_put(entity->dependency);
return false;
}
@@ -257,23 +257,23 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
* Fence is from the same scheduler, only need to wait for
* it to be scheduled
*/
- fence = fence_get(&s_fence->scheduled);
- fence_put(entity->dependency);
+ fence = dma_fence_get(&s_fence->scheduled);
+ dma_fence_put(entity->dependency);
entity->dependency = fence;
- if (!fence_add_callback(fence, &entity->cb,
- amd_sched_entity_clear_dep))
+ if (!dma_fence_add_callback(fence, &entity->cb,
+ amd_sched_entity_clear_dep))
return true;
/* Ignore it when it is already scheduled */
- fence_put(fence);
+ dma_fence_put(fence);
return false;
}
- if (!fence_add_callback(entity->dependency, &entity->cb,
- amd_sched_entity_wakeup))
+ if (!dma_fence_add_callback(entity->dependency, &entity->cb,
+ amd_sched_entity_wakeup))
return true;
- fence_put(entity->dependency);
+ dma_fence_put(entity->dependency);
return false;
}
@@ -354,7 +354,8 @@ static void amd_sched_job_finish(struct work_struct *work)
sched->ops->free_job(s_job);
}
-static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
+static void amd_sched_job_finish_cb(struct dma_fence *f,
+ struct dma_fence_cb *cb)
{
struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
finish_cb);
@@ -388,8 +389,8 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
- if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
- fence_put(s_job->s_fence->parent);
+ if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
+ dma_fence_put(s_job->s_fence->parent);
s_job->s_fence->parent = NULL;
}
}
@@ -410,21 +411,21 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct amd_sched_fence *s_fence = s_job->s_fence;
- struct fence *fence;
+ struct dma_fence *fence;
spin_unlock(&sched->job_list_lock);
fence = sched->ops->run_job(s_job);
atomic_inc(&sched->hw_rq_count);
if (fence) {
- s_fence->parent = fence_get(fence);
- r = fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ s_fence->parent = dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &s_fence->cb,
+ amd_sched_process_job);
if (r == -ENOENT)
amd_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
- fence_put(fence);
+ dma_fence_put(fence);
} else {
DRM_ERROR("Failed to run job!\n");
amd_sched_process_job(NULL, &s_fence->cb);
@@ -446,8 +447,8 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
struct amd_sched_entity *entity = sched_job->s_entity;
trace_amd_sched_job(sched_job);
- fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
- amd_sched_job_finish_cb);
+ dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
+ amd_sched_job_finish_cb);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
}
@@ -511,7 +512,7 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched)
return entity;
}
-static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
+static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
{
struct amd_sched_fence *s_fence =
container_of(cb, struct amd_sched_fence, cb);
@@ -521,7 +522,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
amd_sched_fence_finished(s_fence);
trace_amd_sched_process_job(s_fence);
- fence_put(&s_fence->finished);
+ dma_fence_put(&s_fence->finished);
wake_up_interruptible(&sched->wake_up_worker);
}
@@ -547,7 +548,7 @@ static int amd_sched_main(void *param)
struct amd_sched_entity *entity = NULL;
struct amd_sched_fence *s_fence;
struct amd_sched_job *sched_job;
- struct fence *fence;
+ struct dma_fence *fence;
wait_event_interruptible(sched->wake_up_worker,
(!amd_sched_blocked(sched) &&
@@ -569,15 +570,15 @@ static int amd_sched_main(void *param)
fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
if (fence) {
- s_fence->parent = fence_get(fence);
- r = fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ s_fence->parent = dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &s_fence->cb,
+ amd_sched_process_job);
if (r == -ENOENT)
amd_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
- fence_put(fence);
+ dma_fence_put(fence);
} else {
DRM_ERROR("Failed to run job!\n");
amd_sched_process_job(NULL, &s_fence->cb);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 7cbbbfb502ef..876aa43b57df 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -25,7 +25,7 @@
#define _GPU_SCHEDULER_H_
#include <linux/kfifo.h>
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
struct amd_gpu_scheduler;
struct amd_sched_rq;
@@ -50,8 +50,8 @@ struct amd_sched_entity {
atomic_t fence_seq;
uint64_t fence_context;
- struct fence *dependency;
- struct fence_cb cb;
+ struct dma_fence *dependency;
+ struct dma_fence_cb cb;
};
/**
@@ -66,10 +66,10 @@ struct amd_sched_rq {
};
struct amd_sched_fence {
- struct fence scheduled;
- struct fence finished;
- struct fence_cb cb;
- struct fence *parent;
+ struct dma_fence scheduled;
+ struct dma_fence finished;
+ struct dma_fence_cb cb;
+ struct dma_fence *parent;
struct amd_gpu_scheduler *sched;
spinlock_t lock;
void *owner;
@@ -79,15 +79,15 @@ struct amd_sched_job {
struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence;
- struct fence_cb finish_cb;
+ struct dma_fence_cb finish_cb;
struct work_struct finish_work;
struct list_head node;
struct delayed_work work_tdr;
};
-extern const struct fence_ops amd_sched_fence_ops_scheduled;
-extern const struct fence_ops amd_sched_fence_ops_finished;
-static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
+extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
+extern const struct dma_fence_ops amd_sched_fence_ops_finished;
+static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f)
{
if (f->ops == &amd_sched_fence_ops_scheduled)
return container_of(f, struct amd_sched_fence, scheduled);
@@ -103,8 +103,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
* these functions should be implemented in driver side
*/
struct amd_sched_backend_ops {
- struct fence *(*dependency)(struct amd_sched_job *sched_job);
- struct fence *(*run_job)(struct amd_sched_job *sched_job);
+ struct dma_fence *(*dependency)(struct amd_sched_job *sched_job);
+ struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
void (*timedout_job)(struct amd_sched_job *sched_job);
void (*free_job)(struct amd_sched_job *sched_job);
};
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 6b63beaf7574..c26fa298fe9e 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -42,46 +42,50 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
spin_lock_init(&fence->lock);
seq = atomic_inc_return(&entity->fence_seq);
- fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
- &fence->lock, entity->fence_context, seq);
- fence_init(&fence->finished, &amd_sched_fence_ops_finished,
- &fence->lock, entity->fence_context + 1, seq);
+ dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
+ &fence->lock, entity->fence_context, seq);
+ dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished,
+ &fence->lock, entity->fence_context + 1, seq);
return fence;
}
void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
{
- int ret = fence_signal(&fence->scheduled);
+ int ret = dma_fence_signal(&fence->scheduled);
if (!ret)
- FENCE_TRACE(&fence->scheduled, "signaled from irq context\n");
+ DMA_FENCE_TRACE(&fence->scheduled,
+ "signaled from irq context\n");
else
- FENCE_TRACE(&fence->scheduled, "was already signaled\n");
+ DMA_FENCE_TRACE(&fence->scheduled,
+ "was already signaled\n");
}
void amd_sched_fence_finished(struct amd_sched_fence *fence)
{
- int ret = fence_signal(&fence->finished);
+ int ret = dma_fence_signal(&fence->finished);
if (!ret)
- FENCE_TRACE(&fence->finished, "signaled from irq context\n");
+ DMA_FENCE_TRACE(&fence->finished,
+ "signaled from irq context\n");
else
- FENCE_TRACE(&fence->finished, "was already signaled\n");
+ DMA_FENCE_TRACE(&fence->finished,
+ "was already signaled\n");
}
-static const char *amd_sched_fence_get_driver_name(struct fence *fence)
+static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence)
{
return "amd_sched";
}
-static const char *amd_sched_fence_get_timeline_name(struct fence *f)
+static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
return (const char *)fence->sched->name;
}
-static bool amd_sched_fence_enable_signaling(struct fence *f)
+static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
{
return true;
}
@@ -95,10 +99,10 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
*/
static void amd_sched_fence_free(struct rcu_head *rcu)
{
- struct fence *f = container_of(rcu, struct fence, rcu);
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
struct amd_sched_fence *fence = to_amd_sched_fence(f);
- fence_put(fence->parent);
+ dma_fence_put(fence->parent);
kmem_cache_free(sched_fence_slab, fence);
}
@@ -110,7 +114,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amd_sched_fence_release_scheduled(struct fence *f)
+static void amd_sched_fence_release_scheduled(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
@@ -124,27 +128,27 @@ static void amd_sched_fence_release_scheduled(struct fence *f)
*
* Drop the extra reference from the scheduled fence to the base fence.
*/
-static void amd_sched_fence_release_finished(struct fence *f)
+static void amd_sched_fence_release_finished(struct dma_fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
- fence_put(&fence->scheduled);
+ dma_fence_put(&fence->scheduled);
}
-const struct fence_ops amd_sched_fence_ops_scheduled = {
+const struct dma_fence_ops amd_sched_fence_ops_scheduled = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = amd_sched_fence_release_scheduled,
};
-const struct fence_ops amd_sched_fence_ops_finished = {
+const struct dma_fence_ops amd_sched_fence_ops_finished = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
- .wait = fence_default_wait,
+ .wait = dma_fence_default_wait,
.release = amd_sched_fence_release_finished,
};