diff options
author | Peng Zhang <zhangpeng.00@bytedance.com> | 2023-05-24 05:12:38 +0200 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-06-10 01:25:43 +0200 |
commit | 523716770e63e229dbb6307d663f03d990dfefc5 (patch) | |
tree | 585aadfc4b3bf7d2ecba035d21ae475f7bab2f36 /lib | |
parent | mm/memcontrol: export memcg.swap watermark via sysfs for v2 memcg (diff) | |
download | linux-523716770e63e229dbb6307d663f03d990dfefc5.tar.xz linux-523716770e63e229dbb6307d663f03d990dfefc5.zip |
maple_tree: rework mtree_alloc_{range,rrange}()
Patch series "Clean ups for maple tree", v4.
Some clean ups, mainly to make the code of maple tree more concise.
This patchset has passed the self-test.
This patch (of 10):
Use mas_empty_area{_rev}() to refactor mtree_alloc_{range,rrange}()
Link: https://lkml.kernel.org/r/20230524031247.65949-2-zhangpeng.00@bytedance.com
Signed-off-by: Peng Zhang <zhangpeng.00@bytedance.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/maple_tree.c | 57 |
1 files changed, 32 insertions, 25 deletions
diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 4eb220008f72..96d102d60b4e 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -6493,31 +6493,33 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, { int ret = 0; - MA_STATE(mas, mt, min, min); + MA_STATE(mas, mt, 0, 0); if (!mt_is_alloc(mt)) return -EINVAL; if (WARN_ON_ONCE(mt_is_reserved(entry))) return -EINVAL; - if (min > max) - return -EINVAL; - - if (max < size) - return -EINVAL; - - if (!size) - return -EINVAL; - mtree_lock(mt); retry: - mas.offset = 0; - mas.index = min; - mas.last = max - size + 1; - ret = mas_alloc(&mas, entry, size, startp); + ret = mas_empty_area(&mas, min, max, size); + if (ret) + goto unlock; + + mas_insert(&mas, entry); + /* + * mas_nomem() may release the lock, causing the allocated area + * to be unavailable, so try to allocate a free area again. + */ if (mas_nomem(&mas, gfp)) goto retry; + if (mas_is_err(&mas)) + ret = xa_err(mas.node); + else + *startp = mas.index; + +unlock: mtree_unlock(mt); return ret; } @@ -6529,28 +6531,33 @@ int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, { int ret = 0; - MA_STATE(mas, mt, min, max - size + 1); + MA_STATE(mas, mt, 0, 0); if (!mt_is_alloc(mt)) return -EINVAL; if (WARN_ON_ONCE(mt_is_reserved(entry))) return -EINVAL; - if (min > max) - return -EINVAL; - - if (max < size - 1) - return -EINVAL; - - if (!size) - return -EINVAL; - mtree_lock(mt); retry: - ret = mas_rev_alloc(&mas, min, max, entry, size, startp); + ret = mas_empty_area_rev(&mas, min, max, size); + if (ret) + goto unlock; + + mas_insert(&mas, entry); + /* + * mas_nomem() may release the lock, causing the allocated area + * to be unavailable, so try to allocate a free area again. + */ if (mas_nomem(&mas, gfp)) goto retry; + if (mas_is_err(&mas)) + ret = xa_err(mas.node); + else + *startp = mas.index; + +unlock: mtree_unlock(mt); return ret; } |