summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBaolin Wang <baolin.wang@linux.alibaba.com>2023-02-15 11:39:37 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-02-20 21:46:17 +0100
commitcd7755800eb54e8522f5e51f4e71e6494c1f1572 (patch)
treed796c6986f1ba19fa79c8f76641c6c2c8d26d4b4
parentmm: hugetlb: change to return bool for isolate_hugetlb() (diff)
downloadlinux-cd7755800eb54e8522f5e51f4e71e6494c1f1572.tar.xz
linux-cd7755800eb54e8522f5e51f4e71e6494c1f1572.zip
mm: change to return bool for isolate_movable_page()
Now the isolate_movable_page() can only return 0 or -EBUSY, and no users will care about the negative return value, thus we can convert the isolate_movable_page() to return a boolean value to make the code more clear when checking the movable page isolation state. No functional changes intended. [akpm@linux-foundation.org: remove unneeded comment, per Matthew] Link: https://lkml.kernel.org/r/cb877f73f4fff8d309611082ec740a7065b1ade0.1676424378.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Acked-by: David Hildenbrand <david@redhat.com> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Reviewed-by: SeongJae Park <sj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/migrate.h6
-rw-r--r--mm/compaction.c2
-rw-r--r--mm/memory-failure.c4
-rw-r--r--mm/memory_hotplug.c10
-rw-r--r--mm/migrate.c6
5 files changed, 14 insertions, 14 deletions
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index c88b96b48be7..6b252f519c86 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -71,7 +71,7 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason,
unsigned int *ret_succeeded);
extern struct page *alloc_migration_target(struct page *page, unsigned long private);
-extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
+extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
@@ -92,8 +92,8 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
static inline struct page *alloc_migration_target(struct page *page,
unsigned long private)
{ return NULL; }
-static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
- { return -EBUSY; }
+static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+ { return false; }
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src)
diff --git a/mm/compaction.c b/mm/compaction.c
index d73578af44cc..ad7409f70519 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -976,7 +976,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
locked = NULL;
}
- if (!isolate_movable_page(page, mode))
+ if (isolate_movable_page(page, mode))
goto isolate_success;
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 8604753bc644..a1ede7bdce95 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -2515,8 +2515,8 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
if (lru)
isolated = isolate_lru_page(page);
else
- isolated = !isolate_movable_page(page,
- ISOLATE_UNEVICTABLE);
+ isolated = isolate_movable_page(page,
+ ISOLATE_UNEVICTABLE);
if (isolated) {
list_add(&page->lru, pagelist);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 5fc2dcf4e3ab..5f73fd894b89 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1668,18 +1668,18 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
* We can skip free pages. And we can deal with pages on
* LRU and non-lru movable pages.
*/
- if (PageLRU(page)) {
+ if (PageLRU(page))
isolated = isolate_lru_page(page);
- ret = isolated ? 0 : -EBUSY;
- } else
- ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
- if (!ret) { /* Success */
+ else
+ isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
+ if (isolated) {
list_add_tail(&page->lru, &source);
if (!__PageMovable(page))
inc_node_page_state(page, NR_ISOLATED_ANON +
page_is_file_lru(page));
} else {
+ ret = -EBUSY;
if (__ratelimit(&migrate_rs)) {
pr_warn("failed to isolate pfn %lx\n", pfn);
dump_page(page, "isolation failed");
diff --git a/mm/migrate.c b/mm/migrate.c
index 2db546a0618c..9a101c7bb8ff 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -58,7 +58,7 @@
#include "internal.h"
-int isolate_movable_page(struct page *page, isolate_mode_t mode)
+bool isolate_movable_page(struct page *page, isolate_mode_t mode)
{
struct folio *folio = folio_get_nontail_page(page);
const struct movable_operations *mops;
@@ -119,14 +119,14 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode)
folio_set_isolated(folio);
folio_unlock(folio);
- return 0;
+ return true;
out_no_isolated:
folio_unlock(folio);
out_putfolio:
folio_put(folio);
out:
- return -EBUSY;
+ return false;
}
static void putback_movable_folio(struct folio *folio)