From b0b7af1884b7d807a3504804f9825d472de78708 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 18 Feb 2011 17:59:14 +0100 Subject: drm: mm: add api for embedding struct drm_mm_node The old api has a two-step process: First search for a suitable free hole, then allocate from that specific hole. No user used this to do anything clever. So drop it for the embeddable variant of the drm_mm api (the old one retains this ability, for the time being). With struct drm_mm_node embedded, we cannot track allocations anymore by checking for a NULL pointer. So keep track of this and add a small helper drm_mm_node_allocated. Also add a function to move allocations between different struct drm_mm_node. v2: Implement suggestions by Chris Wilson. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_mm.c | 93 +++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 84 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/drm_mm.c') diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index fecb4063c018..d6432f9e49c1 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -124,6 +124,8 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, unsigned long hole_start = drm_mm_hole_node_start(hole_node); unsigned long hole_end = drm_mm_hole_node_end(hole_node); + BUG_ON(!hole_node->hole_follows || node->allocated); + if (alignment) tmp = hole_start % alignment; @@ -136,6 +138,7 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, node->start = hole_start + wasted; node->size = size; node->mm = mm; + node->allocated = 1; INIT_LIST_HEAD(&node->hole_stack); list_add(&node->node_list, &hole_node->node_list); @@ -157,8 +160,6 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, { struct drm_mm_node *node; - BUG_ON(!hole_node->hole_follows); - node = drm_mm_kmalloc(hole_node->mm, atomic); if (unlikely(node == NULL)) return NULL; @@ -169,6 +170,26 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, } EXPORT_SYMBOL(drm_mm_get_block_generic); +/** + * Search for free space and insert a preallocated memory node. Returns + * -ENOSPC if no suitable free area is available. The preallocated memory node + * must be cleared. + */ +int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, + unsigned long size, unsigned alignment) +{ + struct drm_mm_node *hole_node; + + hole_node = drm_mm_search_free(mm, size, alignment, 0); + if (!hole_node) + return -ENOSPC; + + drm_mm_insert_helper(hole_node, node, size, alignment); + + return 0; +} +EXPORT_SYMBOL(drm_mm_insert_node); + static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, struct drm_mm_node *node, unsigned long size, unsigned alignment, @@ -179,6 +200,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, unsigned long hole_start = drm_mm_hole_node_start(hole_node); unsigned long hole_end = drm_mm_hole_node_end(hole_node); + BUG_ON(!hole_node->hole_follows || node->allocated); + if (hole_start < start) wasted += start - hole_start; if (alignment) @@ -195,6 +218,7 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, node->start = hole_start + wasted; node->size = size; node->mm = mm; + node->allocated = 1; INIT_LIST_HEAD(&node->hole_stack); list_add(&node->node_list, &hole_node->node_list); @@ -219,8 +243,6 @@ struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node { struct drm_mm_node *node; - BUG_ON(!hole_node->hole_follows); - node = drm_mm_kmalloc(hole_node->mm, atomic); if (unlikely(node == NULL)) return NULL; @@ -232,14 +254,34 @@ struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node } EXPORT_SYMBOL(drm_mm_get_block_range_generic); -/* - * Put a block. Merge with the previous and / or next block if they are free. - * Otherwise add to the free stack. +/** + * Search for free space and insert a preallocated memory node. Returns + * -ENOSPC if no suitable free area is available. This is for range + * restricted allocations. The preallocated memory node must be cleared. */ - -void drm_mm_put_block(struct drm_mm_node *node) +int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, + unsigned long size, unsigned alignment, + unsigned long start, unsigned long end) { + struct drm_mm_node *hole_node; + + hole_node = drm_mm_search_free_in_range(mm, size, alignment, + start, end, 0); + if (!hole_node) + return -ENOSPC; + + drm_mm_insert_helper_range(hole_node, node, size, alignment, + start, end); + return 0; +} +EXPORT_SYMBOL(drm_mm_insert_node_in_range); + +/** + * Remove a memory node from the allocator. + */ +void drm_mm_remove_node(struct drm_mm_node *node) +{ struct drm_mm *mm = node->mm; struct drm_mm_node *prev_node; @@ -264,6 +306,22 @@ void drm_mm_put_block(struct drm_mm_node *node) list_move(&prev_node->hole_stack, &mm->hole_stack); list_del(&node->node_list); + node->allocated = 0; +} +EXPORT_SYMBOL(drm_mm_remove_node); + +/* + * Remove a memory node from the allocator and free the allocated struct + * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the + * drm_mm_get_block functions. + */ +void drm_mm_put_block(struct drm_mm_node *node) +{ + + struct drm_mm *mm = node->mm; + + drm_mm_remove_node(node); + spin_lock(&mm->unused_lock); if (mm->num_unused < MM_UNUSED_TARGET) { list_add(&node->node_list, &mm->unused_nodes); @@ -367,6 +425,23 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, } EXPORT_SYMBOL(drm_mm_search_free_in_range); +/** + * Moves an allocation. To be used with embedded struct drm_mm_node. + */ +void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) +{ + list_replace(&old->node_list, &new->node_list); + list_replace(&old->node_list, &new->hole_stack); + new->hole_follows = old->hole_follows; + new->mm = old->mm; + new->start = old->start; + new->size = old->size; + + old->allocated = 0; + new->allocated = 1; +} +EXPORT_SYMBOL(drm_mm_replace_node); + /** * Initializa lru scanning. * -- cgit v1.2.3