summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-05-13 14:01:00 +0200
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2019-05-20 17:27:07 +0200
commit06b2b1a40e3a9a1ea80b4c51885d863ca194ef3f (patch)
tree0bad079b8a88ebdaae8a3ce783197f4339798a5f /drivers
parentLinux 5.2-rc1 (diff)
downloadlinux-06b2b1a40e3a9a1ea80b4c51885d863ca194ef3f.tar.xz
linux-06b2b1a40e3a9a1ea80b4c51885d863ca194ef3f.zip
drm/i915: Rearrange i915_scheduler.c
To avoid pulling in a forward declaration in the next patch, move the i915_sched_node handling to after the main dfs of the scheduler. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190513120102.29660-1-chris@chris-wilson.co.uk (cherry picked from commit 5ae87063c162679a61f2141041d0918cc3045daf) Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c210
1 files changed, 105 insertions, 105 deletions
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 39bc4f54e272..5756fcfe343d 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -35,109 +35,6 @@ static inline bool node_signaled(const struct i915_sched_node *node)
return i915_request_completed(node_to_request(node));
}
-void i915_sched_node_init(struct i915_sched_node *node)
-{
- INIT_LIST_HEAD(&node->signalers_list);
- INIT_LIST_HEAD(&node->waiters_list);
- INIT_LIST_HEAD(&node->link);
- node->attr.priority = I915_PRIORITY_INVALID;
- node->semaphores = 0;
- node->flags = 0;
-}
-
-static struct i915_dependency *
-i915_dependency_alloc(void)
-{
- return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
-}
-
-static void
-i915_dependency_free(struct i915_dependency *dep)
-{
- kmem_cache_free(global.slab_dependencies, dep);
-}
-
-bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
- struct i915_sched_node *signal,
- struct i915_dependency *dep,
- unsigned long flags)
-{
- bool ret = false;
-
- spin_lock_irq(&schedule_lock);
-
- if (!node_signaled(signal)) {
- INIT_LIST_HEAD(&dep->dfs_link);
- list_add(&dep->wait_link, &signal->waiters_list);
- list_add(&dep->signal_link, &node->signalers_list);
- dep->signaler = signal;
- dep->flags = flags;
-
- /* Keep track of whether anyone on this chain has a semaphore */
- if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
- !node_started(signal))
- node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
-
- ret = true;
- }
-
- spin_unlock_irq(&schedule_lock);
-
- return ret;
-}
-
-int i915_sched_node_add_dependency(struct i915_sched_node *node,
- struct i915_sched_node *signal)
-{
- struct i915_dependency *dep;
-
- dep = i915_dependency_alloc();
- if (!dep)
- return -ENOMEM;
-
- if (!__i915_sched_node_add_dependency(node, signal, dep,
- I915_DEPENDENCY_ALLOC))
- i915_dependency_free(dep);
-
- return 0;
-}
-
-void i915_sched_node_fini(struct i915_sched_node *node)
-{
- struct i915_dependency *dep, *tmp;
-
- GEM_BUG_ON(!list_empty(&node->link));
-
- spin_lock_irq(&schedule_lock);
-
- /*
- * Everyone we depended upon (the fences we wait to be signaled)
- * should retire before us and remove themselves from our list.
- * However, retirement is run independently on each timeline and
- * so we may be called out-of-order.
- */
- list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
- GEM_BUG_ON(!node_signaled(dep->signaler));
- GEM_BUG_ON(!list_empty(&dep->dfs_link));
-
- list_del(&dep->wait_link);
- if (dep->flags & I915_DEPENDENCY_ALLOC)
- i915_dependency_free(dep);
- }
-
- /* Remove ourselves from everyone who depends upon us */
- list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
- GEM_BUG_ON(dep->signaler != node);
- GEM_BUG_ON(!list_empty(&dep->dfs_link));
-
- list_del(&dep->signal_link);
- if (dep->flags & I915_DEPENDENCY_ALLOC)
- i915_dependency_free(dep);
- }
-
- spin_unlock_irq(&schedule_lock);
-}
-
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
@@ -239,6 +136,11 @@ out:
return &p->requests[idx];
}
+void __i915_priolist_free(struct i915_priolist *p)
+{
+ kmem_cache_free(global.slab_priorities, p);
+}
+
struct sched_cache {
struct list_head *priolist;
};
@@ -436,9 +338,107 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
spin_unlock_irqrestore(&schedule_lock, flags);
}
-void __i915_priolist_free(struct i915_priolist *p)
+void i915_sched_node_init(struct i915_sched_node *node)
{
- kmem_cache_free(global.slab_priorities, p);
+ INIT_LIST_HEAD(&node->signalers_list);
+ INIT_LIST_HEAD(&node->waiters_list);
+ INIT_LIST_HEAD(&node->link);
+ node->attr.priority = I915_PRIORITY_INVALID;
+ node->semaphores = 0;
+ node->flags = 0;
+}
+
+static struct i915_dependency *
+i915_dependency_alloc(void)
+{
+ return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
+}
+
+static void
+i915_dependency_free(struct i915_dependency *dep)
+{
+ kmem_cache_free(global.slab_dependencies, dep);
+}
+
+bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
+ struct i915_sched_node *signal,
+ struct i915_dependency *dep,
+ unsigned long flags)
+{
+ bool ret = false;
+
+ spin_lock_irq(&schedule_lock);
+
+ if (!node_signaled(signal)) {
+ INIT_LIST_HEAD(&dep->dfs_link);
+ list_add(&dep->wait_link, &signal->waiters_list);
+ list_add(&dep->signal_link, &node->signalers_list);
+ dep->signaler = signal;
+ dep->flags = flags;
+
+ /* Keep track of whether anyone on this chain has a semaphore */
+ if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
+ !node_started(signal))
+ node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
+
+ ret = true;
+ }
+
+ spin_unlock_irq(&schedule_lock);
+
+ return ret;
+}
+
+int i915_sched_node_add_dependency(struct i915_sched_node *node,
+ struct i915_sched_node *signal)
+{
+ struct i915_dependency *dep;
+
+ dep = i915_dependency_alloc();
+ if (!dep)
+ return -ENOMEM;
+
+ if (!__i915_sched_node_add_dependency(node, signal, dep,
+ I915_DEPENDENCY_ALLOC))
+ i915_dependency_free(dep);
+
+ return 0;
+}
+
+void i915_sched_node_fini(struct i915_sched_node *node)
+{
+ struct i915_dependency *dep, *tmp;
+
+ GEM_BUG_ON(!list_empty(&node->link));
+
+ spin_lock_irq(&schedule_lock);
+
+ /*
+ * Everyone we depended upon (the fences we wait to be signaled)
+ * should retire before us and remove themselves from our list.
+ * However, retirement is run independently on each timeline and
+ * so we may be called out-of-order.
+ */
+ list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
+ GEM_BUG_ON(!node_signaled(dep->signaler));
+ GEM_BUG_ON(!list_empty(&dep->dfs_link));
+
+ list_del(&dep->wait_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(dep);
+ }
+
+ /* Remove ourselves from everyone who depends upon us */
+ list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
+ GEM_BUG_ON(dep->signaler != node);
+ GEM_BUG_ON(!list_empty(&dep->dfs_link));
+
+ list_del(&dep->signal_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(dep);
+ }
+
+ spin_unlock_irq(&schedule_lock);
}
static void i915_global_scheduler_shrink(void)