diff options
author | Miaohe Lin <linmiaohe@huawei.com> | 2020-10-14 01:57:14 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-14 03:38:34 +0200 |
commit | 544941d788319951ae75bae5d1bce076a6f71949 (patch) | |
tree | 91834edb7d541ea9dca0504ac898f3b48fdfedcd /mm/mempool.c | |
parent | mm: remove unused alloc_page_vma_node() (diff) | |
download | linux-544941d788319951ae75bae5d1bce076a6f71949.tar.xz linux-544941d788319951ae75bae5d1bce076a6f71949.zip |
mm/mempool: add 'else' to split mutually exclusive case
Add else to split mutually exclusive case and avoid some unnecessary check.
It doesn't seem to change code generation (compiler is smart), but I think
it helps readability.
[akpm@linux-foundation.org: fix comment location]
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Link: https://lkml.kernel.org/r/20200924111641.28922-1-linmiaohe@huawei.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mempool.c')
-rw-r--r-- | mm/mempool.c | 18 |
1 files changed, 8 insertions, 10 deletions
diff --git a/mm/mempool.c b/mm/mempool.c index 79bff63ecf27..f473cdddaff0 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -58,11 +58,10 @@ static void __check_element(mempool_t *pool, void *element, size_t size) static void check_element(mempool_t *pool, void *element) { /* Mempools backed by slab allocator */ - if (pool->free == mempool_free_slab || pool->free == mempool_kfree) + if (pool->free == mempool_free_slab || pool->free == mempool_kfree) { __check_element(pool, element, ksize(element)); - - /* Mempools backed by page allocator */ - if (pool->free == mempool_free_pages) { + } else if (pool->free == mempool_free_pages) { + /* Mempools backed by page allocator */ int order = (int)(long)pool->pool_data; void *addr = kmap_atomic((struct page *)element); @@ -82,11 +81,10 @@ static void __poison_element(void *element, size_t size) static void poison_element(mempool_t *pool, void *element) { /* Mempools backed by slab allocator */ - if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) + if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) { __poison_element(element, ksize(element)); - - /* Mempools backed by page allocator */ - if (pool->alloc == mempool_alloc_pages) { + } else if (pool->alloc == mempool_alloc_pages) { + /* Mempools backed by page allocator */ int order = (int)(long)pool->pool_data; void *addr = kmap_atomic((struct page *)element); @@ -107,7 +105,7 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element) { if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) kasan_poison_kfree(element, _RET_IP_); - if (pool->alloc == mempool_alloc_pages) + else if (pool->alloc == mempool_alloc_pages) kasan_free_pages(element, (unsigned long)pool->pool_data); } @@ -115,7 +113,7 @@ static void kasan_unpoison_element(mempool_t *pool, void *element) { if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) kasan_unpoison_slab(element); - if (pool->alloc == mempool_alloc_pages) + else if (pool->alloc == mempool_alloc_pages) kasan_alloc_pages(element, (unsigned long)pool->pool_data); } |