diff options
author | Christian König <christian.koenig@amd.com> | 2020-06-15 16:16:42 +0200 |
---|---|---|
committer | Christian König <christian.koenig@amd.com> | 2020-06-23 15:46:40 +0200 |
commit | 5fad79fd66ff90b8c0a95319dad0b099008f8347 (patch) | |
tree | 7fe0bd6de4f603aebf0693154d096d20da2dd46f /drivers/gpu/drm/drm_mm.c | |
parent | drm/mm: optimize find_hole() as well (diff) | |
download | linux-5fad79fd66ff90b8c0a95319dad0b099008f8347.tar.xz linux-5fad79fd66ff90b8c0a95319dad0b099008f8347.zip |
drm/mm: cleanup and improve next_hole_*_addr()
Skipping just one branch of the tree is not the most
effective approach.
Instead use a macro to define the traversal functions and
sort out both branch sides.
This improves the performance of the unit tests by
a factor of more than 4.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Nirmoy Das <nirmoy.das@amd.com>
Link: https://patchwork.freedesktop.org/patch/370298/
Diffstat (limited to 'drivers/gpu/drm/drm_mm.c')
-rw-r--r-- | drivers/gpu/drm/drm_mm.c | 106 |
1 files changed, 34 insertions, 72 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 177a5df0fe95..a4a04d246135 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -325,6 +325,11 @@ static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size) return best; } +static bool usable_hole_addr(struct rb_node *rb, u64 size) +{ + return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size; +} + static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size) { struct rb_node *rb = mm->holes_addr.rb_node; @@ -333,7 +338,7 @@ static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size) while (rb) { u64 hole_start; - if (rb_hole_addr_to_node(rb)->subtree_max_hole < size) + if (!usable_hole_addr(rb, size)) break; node = rb_hole_addr_to_node(rb); @@ -374,82 +379,39 @@ first_hole(struct drm_mm *mm, } /** - * next_hole_high_addr - returns next hole for a DRM_MM_INSERT_HIGH mode request - * @entry: previously selected drm_mm_node - * @size: size of the a hole needed for the request - * - * This function will verify whether left subtree of @entry has hole big enough - * to fit the requtested size. If so, it will return previous node of @entry or - * else it will return parent node of @entry + * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions + * @name: name of function to declare + * @first: first rb member to traverse (either rb_left or rb_right). + * @last: last rb member to traverse (either rb_right or rb_left). * - * It will also skip the complete left subtree if subtree_max_hole of that - * subtree is same as the subtree_max_hole of the @entry. - * - * Returns: - * previous node of @entry if left subtree of @entry can serve the request or - * else return parent of @entry + * This macro declares a function to return the next hole of the addr rb tree. + * While traversing the tree we take the searched size into account and only + * visit branches with potential big enough holes. */ -static struct drm_mm_node * -next_hole_high_addr(struct drm_mm_node *entry, u64 size) -{ - struct rb_node *rb_node, *left_rb_node, *parent_rb_node; - struct drm_mm_node *left_node; - - if (!entry) - return NULL; - rb_node = &entry->rb_hole_addr; - if (rb_node->rb_left) { - left_rb_node = rb_node->rb_left; - parent_rb_node = rb_parent(rb_node); - left_node = rb_entry(left_rb_node, - struct drm_mm_node, rb_hole_addr); - if (left_node->subtree_max_hole < size && - parent_rb_node && parent_rb_node->rb_left != rb_node) - return rb_hole_addr_to_node(parent_rb_node); - } - - return rb_hole_addr_to_node(rb_prev(rb_node)); +#define DECLARE_NEXT_HOLE_ADDR(name, first, last) \ +static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size) \ +{ \ + struct rb_node *parent, *node = &entry->rb_hole_addr; \ + \ + if (!entry || RB_EMPTY_NODE(node)) \ + return NULL; \ + \ + if (usable_hole_addr(node->first, size)) { \ + node = node->first; \ + while (usable_hole_addr(node->last, size)) \ + node = node->last; \ + return rb_hole_addr_to_node(node); \ + } \ + \ + while ((parent = rb_parent(node)) && node == parent->first) \ + node = parent; \ + \ + return rb_hole_addr_to_node(parent); \ } -/** - * next_hole_low_addr - returns next hole for a DRM_MM_INSERT_LOW mode request - * @entry: previously selected drm_mm_node - * @size: size of the a hole needed for the request - * - * This function will verify whether right subtree of @entry has hole big enough - * to fit the requtested size. If so, it will return next node of @entry or - * else it will return parent node of @entry - * - * It will also skip the complete right subtree if subtree_max_hole of that - * subtree is same as the subtree_max_hole of the @entry. - * - * Returns: - * next node of @entry if right subtree of @entry can serve the request or - * else return parent of @entry - */ -static struct drm_mm_node * -next_hole_low_addr(struct drm_mm_node *entry, u64 size) -{ - struct rb_node *rb_node, *right_rb_node, *parent_rb_node; - struct drm_mm_node *right_node; - - if (!entry) - return NULL; - - rb_node = &entry->rb_hole_addr; - if (rb_node->rb_right) { - right_rb_node = rb_node->rb_right; - parent_rb_node = rb_parent(rb_node); - right_node = rb_entry(right_rb_node, - struct drm_mm_node, rb_hole_addr); - if (right_node->subtree_max_hole < size && - parent_rb_node && parent_rb_node->rb_right != rb_node) - return rb_hole_addr_to_node(parent_rb_node); - } - - return rb_hole_addr_to_node(rb_next(rb_node)); -} +DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right) +DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left) static struct drm_mm_node * next_hole(struct drm_mm *mm, |