summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_mm.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-10-03 23:01:00 +0200
committerChris Wilson <chris@chris-wilson.co.uk>2019-10-04 14:43:43 +0200
commit3dda22d3dcd1fc39e7867b2c5f5fc8fa79fdefcc (patch)
treef7074121eac9203209c3de91008807438b44719f /drivers/gpu/drm/drm_mm.c
parentdrm/mm: Convert drm_mm_node booleans to bitops (diff)
downloadlinux-3dda22d3dcd1fc39e7867b2c5f5fc8fa79fdefcc.tar.xz
linux-3dda22d3dcd1fc39e7867b2c5f5fc8fa79fdefcc.zip
drm/mm: Use clear_bit_unlock() for releasing the drm_mm_node()
A few callers need to serialise the destruction of their drm_mm_node and ensure it is removed from the drm_mm before freeing. However, to be completely sure that any access from another thread is complete before we free the struct, we require the RELEASE semantics of clear_bit_unlock(). This allows the conditional locking such as Thread A Thread B mutex_lock(mm_lock); if (drm_mm_node_allocated(node)) { drm_mm_node_remove(node); mutex_lock(mm_lock); mutex_unlock(mm_lock); if (drm_mm_node_allocated(node)) drm_mm_node_remove(node); mutex_unlock(mm_lock); } kfree(node); to serialise correctly without any lingering accesses from A to the freed node. Allocation / insertion of the node is assumed never to race with removal or eviction scanning. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191003210100.22250-5-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/drm_mm.c')
-rw-r--r--drivers/gpu/drm/drm_mm.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index a9cab5e53731..2a6e34663146 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -424,9 +424,9 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
node->mm = mm;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node);
- __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
node->hole_size = 0;
rm_hole(hole);
@@ -543,9 +543,9 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
node->color = color;
node->hole_size = 0;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node);
- __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
rm_hole(hole);
if (adj_start > hole_start)
@@ -589,11 +589,12 @@ void drm_mm_remove_node(struct drm_mm_node *node)
drm_mm_interval_tree_remove(node, &mm->interval_tree);
list_del(&node->node_list);
- __clear_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
if (drm_mm_hole_follows(prev_node))
rm_hole(prev_node);
add_hole(prev_node);
+
+ clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
}
EXPORT_SYMBOL(drm_mm_remove_node);
@@ -614,6 +615,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
*new = *old;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
list_replace(&old->node_list, &new->node_list);
rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
@@ -627,8 +629,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
&mm->holes_addr);
}
- __clear_bit(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
- __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
+ clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
}
EXPORT_SYMBOL(drm_mm_replace_node);