summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2017-09-09 01:15:08 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-09 03:26:49 +0200
commitf808c13fd3738948e10196496959871130612b61 (patch)
tree0f9b1bf3ccc9c4d051bf4fed87b493dced56d032 /drivers/gpu/drm
parentblock/cfq: replace cfq_rb_root leftmost caching (diff)
downloadlinux-f808c13fd3738948e10196496959871130612b61.tar.xz
linux-f808c13fd3738948e10196496959871130612b61.zip
lib/interval_tree: fast overlap detection
Allow interval trees to quickly check for overlaps to avoid unnecesary tree lookups in interval_tree_iter_first(). As of this patch, all interval tree flavors will require using a 'rb_root_cached' such that we can have the leftmost node easily available. While most users will make use of this feature, those with special functions (in addition to the generic insert, delete, search calls) will avoid using the cached option as they can do funky things with insertions -- for example, vma_interval_tree_insert_after(). [jglisse@redhat.com: fix deadlock from typo vm_lock_anon_vma()] Link: http://lkml.kernel.org/r/20170808225719.20723-1-jglisse@redhat.com Link: http://lkml.kernel.org/r/20170719014603.19029-12-dave@stgolabs.net Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Doug Ledford <dledford@redhat.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Cc: David Airlie <airlied@linux.ie> Cc: Jason Wang <jasowang@redhat.com> Cc: Christian Benvenuti <benve@cisco.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h2
-rw-r--r--drivers/gpu/drm/drm_mm.c19
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c7
9 files changed, 33 insertions, 28 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index e1cde6b80027..3b0f2ec6eec7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -51,7 +51,7 @@ struct amdgpu_mn {
/* objects protected by lock */
struct mutex lock;
- struct rb_root objects;
+ struct rb_root_cached objects;
};
struct amdgpu_mn_node {
@@ -76,8 +76,8 @@ static void amdgpu_mn_destroy(struct work_struct *work)
mutex_lock(&adev->mn_lock);
mutex_lock(&rmn->lock);
hash_del(&rmn->node);
- rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
- it.rb) {
+ rbtree_postorder_for_each_entry_safe(node, next_node,
+ &rmn->objects.rb_root, it.rb) {
list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
bo->mn = NULL;
list_del_init(&bo->mn_list);
@@ -221,7 +221,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
rmn->mm = mm;
rmn->mn.ops = &amdgpu_mn_ops;
mutex_init(&rmn->lock);
- rmn->objects = RB_ROOT;
+ rmn->objects = RB_ROOT_CACHED;
r = __mmu_notifier_register(&rmn->mn, mm);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6b1343e5541d..b9a5a77eedaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2475,7 +2475,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
u64 flags;
uint64_t init_pde_value = 0;
- vm->va = RB_ROOT;
+ vm->va = RB_ROOT_CACHED;
vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
vm->reserved_vmid[i] = NULL;
@@ -2596,10 +2596,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amd_sched_entity_fini(vm->entity.sched, &vm->entity);
- if (!RB_EMPTY_ROOT(&vm->va)) {
+ if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
}
- rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) {
+ rbtree_postorder_for_each_entry_safe(mapping, tmp,
+ &vm->va.rb_root, rb) {
list_del(&mapping->list);
amdgpu_vm_it_remove(mapping, &vm->va);
kfree(mapping);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index ba6691b58ee7..6716355403ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -118,7 +118,7 @@ struct amdgpu_vm_pt {
struct amdgpu_vm {
/* tree of virtual addresses mapped */
- struct rb_root va;
+ struct rb_root_cached va;
/* protecting invalidated */
spinlock_t status_lock;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index f794089d30ac..61a1c8ea74bc 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -169,7 +169,7 @@ INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
struct drm_mm_node *
__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
{
- return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree,
+ return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
start, last) ?: (struct drm_mm_node *)&mm->head_node;
}
EXPORT_SYMBOL(__drm_mm_interval_first);
@@ -180,6 +180,7 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
struct drm_mm *mm = hole_node->mm;
struct rb_node **link, *rb;
struct drm_mm_node *parent;
+ bool leftmost = true;
node->__subtree_last = LAST(node);
@@ -196,9 +197,10 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
rb = &hole_node->rb;
link = &hole_node->rb.rb_right;
+ leftmost = false;
} else {
rb = NULL;
- link = &mm->interval_tree.rb_node;
+ link = &mm->interval_tree.rb_root.rb_node;
}
while (*link) {
@@ -208,14 +210,15 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
parent->__subtree_last = node->__subtree_last;
if (node->start < parent->start)
link = &parent->rb.rb_left;
- else
+ else {
link = &parent->rb.rb_right;
+ leftmost = true;
+ }
}
rb_link_node(&node->rb, rb, link);
- rb_insert_augmented(&node->rb,
- &mm->interval_tree,
- &drm_mm_interval_tree_augment);
+ rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
+ &drm_mm_interval_tree_augment);
}
#define RB_INSERT(root, member, expr) do { \
@@ -577,7 +580,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
*new = *old;
list_replace(&old->node_list, &new->node_list);
- rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
+ rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root);
if (drm_mm_hole_follows(old)) {
list_replace(&old->hole_stack, &new->hole_stack);
@@ -863,7 +866,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
mm->color_adjust = NULL;
INIT_LIST_HEAD(&mm->hole_stack);
- mm->interval_tree = RB_ROOT;
+ mm->interval_tree = RB_ROOT_CACHED;
mm->holes_size = RB_ROOT;
mm->holes_addr = RB_ROOT;
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index d9100b565198..28f1226576f8 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -147,7 +147,7 @@ struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_m
struct rb_node *iter;
unsigned long offset;
- iter = mgr->vm_addr_space_mm.interval_tree.rb_node;
+ iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node;
best = NULL;
while (likely(iter)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index f152a38d7079..23fd18bd1b56 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -49,7 +49,7 @@ struct i915_mmu_notifier {
spinlock_t lock;
struct hlist_node node;
struct mmu_notifier mn;
- struct rb_root objects;
+ struct rb_root_cached objects;
struct workqueue_struct *wq;
};
@@ -123,7 +123,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct interval_tree_node *it;
LIST_HEAD(cancelled);
- if (RB_EMPTY_ROOT(&mn->objects))
+ if (RB_EMPTY_ROOT(&mn->objects.rb_root))
return;
/* interval ranges are inclusive, but invalidate range is exclusive */
@@ -172,7 +172,7 @@ i915_mmu_notifier_create(struct mm_struct *mm)
spin_lock_init(&mn->lock);
mn->mn.ops = &i915_gem_userptr_notifier;
- mn->objects = RB_ROOT;
+ mn->objects = RB_ROOT_CACHED;
mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
if (mn->wq == NULL) {
kfree(mn);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ec63bc5e9de7..8cbaeec090c9 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -924,7 +924,7 @@ struct radeon_vm_id {
struct radeon_vm {
struct mutex mutex;
- struct rb_root va;
+ struct rb_root_cached va;
/* protecting invalidated and freed */
spinlock_t status_lock;
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 896f2cf51e4e..1d62288b7ee3 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -50,7 +50,7 @@ struct radeon_mn {
/* objects protected by lock */
struct mutex lock;
- struct rb_root objects;
+ struct rb_root_cached objects;
};
struct radeon_mn_node {
@@ -75,8 +75,8 @@ static void radeon_mn_destroy(struct work_struct *work)
mutex_lock(&rdev->mn_lock);
mutex_lock(&rmn->lock);
hash_del(&rmn->node);
- rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
- it.rb) {
+ rbtree_postorder_for_each_entry_safe(node, next_node,
+ &rmn->objects.rb_root, it.rb) {
interval_tree_remove(&node->it, &rmn->objects);
list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
@@ -205,7 +205,7 @@ static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev)
rmn->mm = mm;
rmn->mn.ops = &radeon_mn_ops;
mutex_init(&rmn->lock);
- rmn->objects = RB_ROOT;
+ rmn->objects = RB_ROOT_CACHED;
r = __mmu_notifier_register(&rmn->mn, mm);
if (r)
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 5e82b408d522..e5c0e635e371 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -1185,7 +1185,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
vm->ids[i].last_id_use = NULL;
}
mutex_init(&vm->mutex);
- vm->va = RB_ROOT;
+ vm->va = RB_ROOT_CACHED;
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->freed);
@@ -1232,10 +1232,11 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
struct radeon_bo_va *bo_va, *tmp;
int i, r;
- if (!RB_EMPTY_ROOT(&vm->va)) {
+ if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(rdev->dev, "still active bo inside vm\n");
}
- rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) {
+ rbtree_postorder_for_each_entry_safe(bo_va, tmp,
+ &vm->va.rb_root, it.rb) {
interval_tree_remove(&bo_va->it, &vm->va);
r = radeon_bo_reserve(bo_va->bo, false);
if (!r) {