summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig32
-rw-r--r--mm/cma.c2
-rw-r--r--mm/compaction.c9
-rw-r--r--mm/damon/core-test.h60
-rw-r--r--mm/damon/core.c70
-rw-r--r--mm/damon/dbgfs-test.h2
-rw-r--r--mm/damon/dbgfs.c2
-rw-r--r--mm/damon/modules-common.c2
-rw-r--r--mm/damon/sysfs-common.h3
-rw-r--r--mm/damon/sysfs-schemes.c272
-rw-r--r--mm/damon/sysfs.c27
-rw-r--r--mm/damon/vaddr-test.h2
-rw-r--r--mm/damon/vaddr.c4
-rw-r--r--mm/debug_page_alloc.c2
-rw-r--r--mm/debug_vm_pgtable.c4
-rw-r--r--mm/filemap.c14
-rw-r--r--mm/folio-compat.c20
-rw-r--r--mm/gup.c4
-rw-r--r--mm/huge_memory.c456
-rw-r--r--mm/hugetlb.c25
-rw-r--r--mm/hugetlb_vmemmap.c276
-rw-r--r--mm/internal.h41
-rw-r--r--mm/kasan/common.c280
-rw-r--r--mm/kasan/generic.c175
-rw-r--r--mm/kasan/hw_tags.c8
-rw-r--r--mm/kasan/kasan.h93
-rw-r--r--mm/kasan/kasan_test.c793
-rw-r--r--mm/kasan/quarantine.c12
-rw-r--r--mm/kasan/report.c46
-rw-r--r--mm/kasan/report_generic.c6
-rw-r--r--mm/kasan/report_tags.c27
-rw-r--r--mm/kasan/shadow.c18
-rw-r--r--mm/kasan/tags.c24
-rw-r--r--mm/khugepaged.c73
-rw-r--r--mm/kmemleak.c186
-rw-r--r--mm/kmsan/core.c7
-rw-r--r--mm/kmsan/init.c8
-rw-r--r--mm/ksm.c388
-rw-r--r--mm/list_lru.c79
-rw-r--r--mm/madvise.c22
-rw-r--r--mm/memblock.c41
-rw-r--r--mm/memcontrol.c319
-rw-r--r--mm/memory-failure.c119
-rw-r--r--mm/memory.c292
-rw-r--r--mm/memory_hotplug.c219
-rw-r--r--mm/mempool.c75
-rw-r--r--mm/memremap.c32
-rw-r--r--mm/migrate.c39
-rw-r--r--mm/migrate_device.c64
-rw-r--r--mm/mm_init.c71
-rw-r--r--mm/mmap.c46
-rw-r--r--mm/mmu_gather.c2
-rw-r--r--mm/mmzone.c1
-rw-r--r--mm/oom_kill.c7
-rw-r--r--mm/page-writeback.c54
-rw-r--r--mm/page_alloc.c84
-rw-r--r--mm/page_io.c84
-rw-r--r--mm/page_isolation.c17
-rw-r--r--mm/page_owner.c16
-rw-r--r--mm/page_poison.c8
-rw-r--r--mm/page_reporting.c6
-rw-r--r--mm/page_vma_mapped.c3
-rw-r--r--mm/pagewalk.c29
-rw-r--r--mm/process_vm_access.c15
-rw-r--r--mm/readahead.c14
-rw-r--r--mm/rmap.c499
-rw-r--r--mm/shmem.c17
-rw-r--r--mm/show_mem.c8
-rw-r--r--mm/shuffle.h2
-rw-r--r--mm/slub.c69
-rw-r--r--mm/sparse.c17
-rw-r--r--mm/swap.h28
-rw-r--r--mm/swap_state.c121
-rw-r--r--mm/swapfile.c105
-rw-r--r--mm/truncate.c51
-rw-r--r--mm/userfaultfd.c627
-rw-r--r--mm/util.c8
-rw-r--r--mm/vmscan.c280
-rw-r--r--mm/vmstat.c21
-rw-r--r--mm/workingset.c46
-rw-r--r--mm/zsmalloc.c5
-rw-r--r--mm/zswap.c746
82 files changed, 5351 insertions, 2530 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index ddf246bf785d..1902cfe4cc4f 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -61,6 +61,20 @@ config ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON
The cost is that if the page was never dirtied and needs to be
swapped out again, it will be re-compressed.
+config ZSWAP_SHRINKER_DEFAULT_ON
+ bool "Shrink the zswap pool on memory pressure"
+ depends on ZSWAP
+ default n
+ help
+ If selected, the zswap shrinker will be enabled, and the pages
+ stored in the zswap pool will become available for reclaim (i.e
+ written back to the backing swap device) on memory pressure.
+
+ This means that zswap writeback could happen even if the pool is
+ not yet full, or the cgroup zswap limit has not been reached,
+ reducing the chance that cold pages will reside in the zswap pool
+ and consume memory indefinitely.
+
choice
prompt "Default compressor"
depends on ZSWAP
@@ -329,7 +343,7 @@ config SHUFFLE_PAGE_ALLOCATOR
the presence of a memory-side-cache. There are also incidental
security benefits as it reduces the predictability of page
allocations to compliment SLAB_FREELIST_RANDOM, but the
- default granularity of shuffling on the MAX_ORDER i.e, 10th
+ default granularity of shuffling on the MAX_PAGE_ORDER i.e, 10th
order of pages is selected based on cache utilization benefits
on x86.
@@ -661,8 +675,8 @@ config HUGETLB_PAGE_SIZE_VARIABLE
HUGETLB_PAGE_ORDER when there are multiple HugeTLB page sizes available
on a platform.
- Note that the pageblock_order cannot exceed MAX_ORDER and will be
- clamped down to MAX_ORDER.
+ Note that the pageblock_order cannot exceed MAX_PAGE_ORDER and will be
+ clamped down to MAX_PAGE_ORDER.
config CONTIG_ALLOC
def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
@@ -718,7 +732,7 @@ config DEFAULT_MMAP_MIN_ADDR
from userspace allocation. Keeping a user from writing to low pages
can help reduce the impact of kernel NULL pointer bugs.
- For most ia64, ppc64 and x86 users with lots of address space
+ For most ppc64 and x86 users with lots of address space
a value of 65536 is reasonable and should cause no problems.
On arm and other archs it should not be higher than 32768.
Programs which use vm86 functionality or have some need to map
@@ -821,6 +835,12 @@ choice
madvise(MADV_HUGEPAGE) but it won't risk to increase the
memory footprint of applications without a guaranteed
benefit.
+
+ config TRANSPARENT_HUGEPAGE_NEVER
+ bool "never"
+ help
+ Disable Transparent Hugepage by default. It can still be
+ enabled at runtime via sysfs.
endchoice
config THP_SWAP
@@ -1216,6 +1236,10 @@ config LRU_GEN_STATS
from evicted generations for debugging purpose.
This option has a per-memcg and per-node memory overhead.
+
+config LRU_GEN_WALKS_MMU
+ def_bool y
+ depends on LRU_GEN && ARCH_HAS_HW_PTE_YOUNG
# }
config ARCH_SUPPORTS_PER_VMA_LOCK
diff --git a/mm/cma.c b/mm/cma.c
index 2b2494fd6b59..7c09c47e530b 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -244,7 +244,7 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
{
phys_addr_t memblock_end = memblock_end_of_DRAM();
phys_addr_t highmem_start;
- int ret = 0;
+ int ret;
/*
* We can't use __pa(high_memory) directly, since high_memory
diff --git a/mm/compaction.c b/mm/compaction.c
index 01ba298739dd..27ada42924d5 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -999,7 +999,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* a valid page order. Consider only values in the
* valid order range to prevent low_pfn overflow.
*/
- if (freepage_order > 0 && freepage_order <= MAX_ORDER) {
+ if (freepage_order > 0 && freepage_order <= MAX_PAGE_ORDER) {
low_pfn += (1UL << freepage_order) - 1;
nr_scanned += (1UL << freepage_order) - 1;
}
@@ -1017,7 +1017,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
if (PageCompound(page) && !cc->alloc_contig) {
const unsigned int order = compound_order(page);
- if (likely(order <= MAX_ORDER)) {
+ if (likely(order <= MAX_PAGE_ORDER)) {
low_pfn += (1UL << order) - 1;
nr_scanned += (1UL << order) - 1;
}
@@ -1611,6 +1611,9 @@ static void fast_isolate_freepages(struct compact_control *cc)
min(pageblock_end_pfn(min_pfn),
zone_end_pfn(cc->zone)),
cc->zone);
+ if (page && !suitable_migration_target(cc, page))
+ page = NULL;
+
cc->free_pfn = min_pfn;
}
}
@@ -2226,7 +2229,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
/* Direct compactor: Is a suitable page free? */
ret = COMPACT_NO_SUITABLE_PAGE;
- for (order = cc->order; order <= MAX_ORDER; order++) {
+ for (order = cc->order; order < NR_PAGE_ORDERS; order++) {
struct free_area *area = &cc->zone->free_area[order];
bool can_steal;
diff --git a/mm/damon/core-test.h b/mm/damon/core-test.h
index 649adf91ebc5..0cee634f3544 100644
--- a/mm/damon/core-test.h
+++ b/mm/damon/core-test.h
@@ -4,7 +4,7 @@
*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#ifdef CONFIG_DAMON_KUNIT_TEST
@@ -122,18 +122,25 @@ static void damon_test_split_at(struct kunit *test)
{
struct damon_ctx *c = damon_new_ctx();
struct damon_target *t;
- struct damon_region *r;
+ struct damon_region *r, *r_new;
t = damon_new_target();
r = damon_new_region(0, 100);
+ r->nr_accesses_bp = 420000;
+ r->nr_accesses = 42;
+ r->last_nr_accesses = 15;
damon_add_region(r, t);
damon_split_region_at(t, r, 25);
KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
KUNIT_EXPECT_EQ(test, r->ar.end, 25ul);
- r = damon_next_region(r);
- KUNIT_EXPECT_EQ(test, r->ar.start, 25ul);
- KUNIT_EXPECT_EQ(test, r->ar.end, 100ul);
+ r_new = damon_next_region(r);
+ KUNIT_EXPECT_EQ(test, r_new->ar.start, 25ul);
+ KUNIT_EXPECT_EQ(test, r_new->ar.end, 100ul);
+
+ KUNIT_EXPECT_EQ(test, r->nr_accesses_bp, r_new->nr_accesses_bp);
+ KUNIT_EXPECT_EQ(test, r->nr_accesses, r_new->nr_accesses);
+ KUNIT_EXPECT_EQ(test, r->last_nr_accesses, r_new->last_nr_accesses);
damon_free_target(t);
damon_destroy_ctx(c);
@@ -295,6 +302,16 @@ static void damon_test_set_regions(struct kunit *test)
damon_destroy_target(t);
}
+static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test)
+{
+ struct damon_attrs attrs = {
+ .sample_interval = 10,
+ .aggr_interval = ((unsigned long)UINT_MAX + 1) * 10
+ };
+
+ KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0);
+}
+
static void damon_test_update_monitoring_result(struct kunit *test)
{
struct damon_attrs old_attrs = {
@@ -439,6 +456,37 @@ static void damos_test_filter_out(struct kunit *test)
damos_free_filter(f);
}
+static void damon_test_feed_loop_next_input(struct kunit *test)
+{
+ unsigned long last_input = 900000, current_score = 200;
+
+ /*
+ * If current score is lower than the goal, which is always 10,000
+ * (read the comment on damon_feed_loop_next_input()'s comment), next
+ * input should be higher than the last input.
+ */
+ KUNIT_EXPECT_GT(test,
+ damon_feed_loop_next_input(last_input, current_score),
+ last_input);
+
+ /*
+ * If current score is higher than the goal, next input should be lower
+ * than the last input.
+ */
+ current_score = 250000000;
+ KUNIT_EXPECT_LT(test,
+ damon_feed_loop_next_input(last_input, current_score),
+ last_input);
+
+ /*
+ * The next input depends on the distance between the current score and
+ * the goal
+ */
+ KUNIT_EXPECT_GT(test,
+ damon_feed_loop_next_input(last_input, 200),
+ damon_feed_loop_next_input(last_input, 2000));
+}
+
static struct kunit_case damon_test_cases[] = {
KUNIT_CASE(damon_test_target),
KUNIT_CASE(damon_test_regions),
@@ -449,11 +497,13 @@ static struct kunit_case damon_test_cases[] = {
KUNIT_CASE(damon_test_split_regions_of),
KUNIT_CASE(damon_test_ops_registration),
KUNIT_CASE(damon_test_set_regions),
+ KUNIT_CASE(damon_test_nr_accesses_to_accesses_bp),
KUNIT_CASE(damon_test_update_monitoring_result),
KUNIT_CASE(damon_test_set_attrs),
KUNIT_CASE(damon_test_moving_sum),
KUNIT_CASE(damos_test_new_filter),
KUNIT_CASE(damos_test_filter_out),
+ KUNIT_CASE(damon_test_feed_loop_next_input),
{},
};
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 3a05e71509b9..36f6f1d21ff0 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -2,7 +2,7 @@
/*
* Data Access Monitor
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#define pr_fmt(fmt) "damon: " fmt
@@ -1043,26 +1043,76 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
}
}
-/* Shouldn't be called if quota->ms and quota->sz are zero */
+/*
+ * damon_feed_loop_next_input() - get next input to achieve a target score.
+ * @last_input The last input.
+ * @score Current score that made with @last_input.
+ *
+ * Calculate next input to achieve the target score, based on the last input
+ * and current score. Assuming the input and the score are positively
+ * proportional, calculate how much compensation should be added to or
+ * subtracted from the last input as a proportion of the last input. Avoid
+ * next input always being zero by setting it non-zero always. In short form
+ * (assuming support of float and signed calculations), the algorithm is as
+ * below.
+ *
+ * next_input = max(last_input * ((goal - current) / goal + 1), 1)
+ *
+ * For simple implementation, we assume the target score is always 10,000. The
+ * caller should adjust @score for this.
+ *
+ * Returns next input that assumed to achieve the target score.
+ */
+static unsigned long damon_feed_loop_next_input(unsigned long last_input,
+ unsigned long score)
+{
+ const unsigned long goal = 10000;
+ unsigned long score_goal_diff = max(goal, score) - min(goal, score);
+ unsigned long score_goal_diff_bp = score_goal_diff * 10000 / goal;
+ unsigned long compensation = last_input * score_goal_diff_bp / 10000;
+ /* Set minimum input as 10000 to avoid compensation be zero */
+ const unsigned long min_input = 10000;
+
+ if (goal > score)
+ return last_input + compensation;
+ if (last_input > compensation + min_input)
+ return last_input - compensation;
+ return min_input;
+}
+
+/* Shouldn't be called if quota->ms, quota->sz, and quota->get_score unset */
static void damos_set_effective_quota(struct damos_quota *quota)
{
unsigned long throughput;
unsigned long esz;
- if (!quota->ms) {
+ if (!quota->ms && !quota->get_score) {
quota->esz = quota->sz;
return;
}
- if (quota->total_charged_ns)
- throughput = quota->total_charged_sz * 1000000 /
- quota->total_charged_ns;
- else
- throughput = PAGE_SIZE * 1024;
- esz = throughput * quota->ms;
+ if (quota->get_score) {
+ quota->esz_bp = damon_feed_loop_next_input(
+ max(quota->esz_bp, 10000UL),
+ quota->get_score(quota->get_score_arg));
+ esz = quota->esz_bp / 10000;
+ }
+
+ if (quota->ms) {
+ if (quota->total_charged_ns)
+ throughput = quota->total_charged_sz * 1000000 /
+ quota->total_charged_ns;
+ else
+ throughput = PAGE_SIZE * 1024;
+ if (quota->get_score)
+ esz = min(throughput * quota->ms, esz);
+ else
+ esz = throughput * quota->ms;
+ }
if (quota->sz && quota->sz < esz)
esz = quota->sz;
+
quota->esz = esz;
}
@@ -1074,7 +1124,7 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
unsigned long cumulated_sz;
unsigned int score, max_score = 0;
- if (!quota->ms && !quota->sz)
+ if (!quota->ms && !quota->sz && !quota->get_score)
return;
/* New charge window starts */
diff --git a/mm/damon/dbgfs-test.h b/mm/damon/dbgfs-test.h
index 0bb0d532b159..2d85217f5ba4 100644
--- a/mm/damon/dbgfs-test.h
+++ b/mm/damon/dbgfs-test.h
@@ -2,7 +2,7 @@
/*
* DAMON Debugfs Interface Unit Tests
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#ifdef CONFIG_DAMON_DBGFS_KUNIT_TEST
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
index dc0ea1fc30ca..7dac24e69e3b 100644
--- a/mm/damon/dbgfs.c
+++ b/mm/damon/dbgfs.c
@@ -2,7 +2,7 @@
/*
* DAMON Debugfs Interface
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#define pr_fmt(fmt) "damon-dbgfs: " fmt
diff --git a/mm/damon/modules-common.c b/mm/damon/modules-common.c
index b2381a8466ec..7cf96574cde7 100644
--- a/mm/damon/modules-common.c
+++ b/mm/damon/modules-common.c
@@ -2,7 +2,7 @@
/*
* Common Primitives for DAMON Modules
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#include <linux/damon.h>
diff --git a/mm/damon/sysfs-common.h b/mm/damon/sysfs-common.h
index 5ff081226e28..4c37a166eb81 100644
--- a/mm/damon/sysfs-common.h
+++ b/mm/damon/sysfs-common.h
@@ -56,3 +56,6 @@ int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx);
int damon_sysfs_schemes_clear_regions(
struct damon_sysfs_schemes *sysfs_schemes,
struct damon_ctx *ctx);
+
+void damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes,
+ struct damon_ctx *ctx);
diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
index fe0fe2562000..8dbaac6e5c2d 100644
--- a/mm/damon/sysfs-schemes.c
+++ b/mm/damon/sysfs-schemes.c
@@ -821,6 +821,203 @@ static const struct kobj_type damon_sysfs_watermarks_ktype = {
};
/*
+ * quota goal directory
+ */
+
+struct damos_sysfs_quota_goal {
+ struct kobject kobj;
+ unsigned long target_value;
+ unsigned long current_value;
+};
+
+static struct damos_sysfs_quota_goal *damos_sysfs_quota_goal_alloc(void)
+{
+ return kzalloc(sizeof(struct damos_sysfs_quota_goal), GFP_KERNEL);
+}
+
+static ssize_t target_value_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damos_sysfs_quota_goal *goal = container_of(kobj, struct
+ damos_sysfs_quota_goal, kobj);
+
+ return sysfs_emit(buf, "%lu\n", goal->target_value);
+}
+
+static ssize_t target_value_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damos_sysfs_quota_goal *goal = container_of(kobj, struct
+ damos_sysfs_quota_goal, kobj);
+ int err = kstrtoul(buf, 0, &goal->target_value);
+
+ return err ? err : count;
+}
+
+static ssize_t current_value_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damos_sysfs_quota_goal *goal = container_of(kobj, struct
+ damos_sysfs_quota_goal, kobj);
+
+ return sysfs_emit(buf, "%lu\n", goal->current_value);
+}
+
+static ssize_t current_value_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damos_sysfs_quota_goal *goal = container_of(kobj, struct
+ damos_sysfs_quota_goal, kobj);
+ int err = kstrtoul(buf, 0, &goal->current_value);
+
+ /* feed callback should check existence of this file and read value */
+ return err ? err : count;
+}
+
+static void damos_sysfs_quota_goal_release(struct kobject *kobj)
+{
+ /* or, notify this release to the feed callback */
+ kfree(container_of(kobj, struct damos_sysfs_quota_goal, kobj));
+}
+
+static struct kobj_attribute damos_sysfs_quota_goal_target_value_attr =
+ __ATTR_RW_MODE(target_value, 0600);
+
+static struct kobj_attribute damos_sysfs_quota_goal_current_value_attr =
+ __ATTR_RW_MODE(current_value, 0600);
+
+static struct attribute *damos_sysfs_quota_goal_attrs[] = {
+ &damos_sysfs_quota_goal_target_value_attr.attr,
+ &damos_sysfs_quota_goal_current_value_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(damos_sysfs_quota_goal);
+
+static const struct kobj_type damos_sysfs_quota_goal_ktype = {
+ .release = damos_sysfs_quota_goal_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = damos_sysfs_quota_goal_groups,
+};
+
+/*
+ * quota goals directory
+ */
+
+struct damos_sysfs_quota_goals {
+ struct kobject kobj;
+ struct damos_sysfs_quota_goal **goals_arr; /* counted by nr */
+ int nr;
+};
+
+static struct damos_sysfs_quota_goals *damos_sysfs_quota_goals_alloc(void)
+{
+ return kzalloc(sizeof(struct damos_sysfs_quota_goals), GFP_KERNEL);
+}
+
+static void damos_sysfs_quota_goals_rm_dirs(
+ struct damos_sysfs_quota_goals *goals)
+{
+ struct damos_sysfs_quota_goal **goals_arr = goals->goals_arr;
+ int i;
+
+ for (i = 0; i < goals->nr; i++)
+ kobject_put(&goals_arr[i]->kobj);
+ goals->nr = 0;
+ kfree(goals_arr);
+ goals->goals_arr = NULL;
+}
+
+static int damos_sysfs_quota_goals_add_dirs(
+ struct damos_sysfs_quota_goals *goals, int nr_goals)
+{
+ struct damos_sysfs_quota_goal **goals_arr, *goal;
+ int err, i;
+
+ damos_sysfs_quota_goals_rm_dirs(goals);
+ if (!nr_goals)
+ return 0;
+
+ goals_arr = kmalloc_array(nr_goals, sizeof(*goals_arr),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!goals_arr)
+ return -ENOMEM;
+ goals->goals_arr = goals_arr;
+
+ for (i = 0; i < nr_goals; i++) {
+ goal = damos_sysfs_quota_goal_alloc();
+ if (!goal) {
+ damos_sysfs_quota_goals_rm_dirs(goals);
+ return -ENOMEM;
+ }
+
+ err = kobject_init_and_add(&goal->kobj,
+ &damos_sysfs_quota_goal_ktype, &goals->kobj,
+ "%d", i);
+ if (err) {
+ kobject_put(&goal->kobj);
+ damos_sysfs_quota_goals_rm_dirs(goals);
+ return err;
+ }
+
+ goals_arr[i] = goal;
+ goals->nr++;
+ }
+ return 0;
+}
+
+static ssize_t nr_goals_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damos_sysfs_quota_goals *goals = container_of(kobj,
+ struct damos_sysfs_quota_goals, kobj);
+
+ return sysfs_emit(buf, "%d\n", goals->nr);
+}
+
+static ssize_t nr_goals_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damos_sysfs_quota_goals *goals;
+ int nr, err = kstrtoint(buf, 0, &nr);
+
+ if (err)
+ return err;
+ if (nr < 0)
+ return -EINVAL;
+
+ goals = container_of(kobj, struct damos_sysfs_quota_goals, kobj);
+
+ if (!mutex_trylock(&damon_sysfs_lock))
+ return -EBUSY;
+ err = damos_sysfs_quota_goals_add_dirs(goals, nr);
+ mutex_unlock(&damon_sysfs_lock);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static void damos_sysfs_quota_goals_release(struct kobject *kobj)
+{
+ kfree(container_of(kobj, struct damos_sysfs_quota_goals, kobj));
+}
+
+static struct kobj_attribute damos_sysfs_quota_goals_nr_attr =
+ __ATTR_RW_MODE(nr_goals, 0600);
+
+static struct attribute *damos_sysfs_quota_goals_attrs[] = {
+ &damos_sysfs_quota_goals_nr_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(damos_sysfs_quota_goals);
+
+static const struct kobj_type damos_sysfs_quota_goals_ktype = {
+ .release = damos_sysfs_quota_goals_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = damos_sysfs_quota_goals_groups,
+};
+
+/*
* scheme/weights directory
*/
@@ -938,6 +1135,7 @@ static const struct kobj_type damon_sysfs_weights_ktype = {
struct damon_sysfs_quotas {
struct kobject kobj;
struct damon_sysfs_weights *weights;
+ struct damos_sysfs_quota_goals *goals;
unsigned long ms;
unsigned long sz;
unsigned long reset_interval_ms;
@@ -951,6 +1149,7 @@ static struct damon_sysfs_quotas *damon_sysfs_quotas_alloc(void)
static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas)
{
struct damon_sysfs_weights *weights;
+ struct damos_sysfs_quota_goals *goals;
int err;
weights = damon_sysfs_weights_alloc(0, 0, 0);
@@ -959,16 +1158,35 @@ static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas)
err = kobject_init_and_add(&weights->kobj, &damon_sysfs_weights_ktype,
&quotas->kobj, "weights");
- if (err)
+ if (err) {
kobject_put(&weights->kobj);
- else
- quotas->weights = weights;
+ return err;
+ }
+ quotas->weights = weights;
+
+ goals = damos_sysfs_quota_goals_alloc();
+ if (!goals) {
+ kobject_put(&weights->kobj);
+ return -ENOMEM;
+ }
+ err = kobject_init_and_add(&goals->kobj,
+ &damos_sysfs_quota_goals_ktype, &quotas->kobj,
+ "goals");
+ if (err) {
+ kobject_put(&weights->kobj);
+ kobject_put(&goals->kobj);
+ } else {
+ quotas->goals = goals;
+ }
+
return err;
}
static void damon_sysfs_quotas_rm_dirs(struct damon_sysfs_quotas *quotas)
{
kobject_put(&quotas->weights->kobj);
+ damos_sysfs_quota_goals_rm_dirs(quotas->goals);
+ kobject_put(&quotas->goals->kobj);
}
static ssize_t ms_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -1650,6 +1868,50 @@ static int damon_sysfs_set_scheme_filters(struct damos *scheme,
return 0;
}
+static unsigned long damos_sysfs_get_quota_score(void *arg)
+{
+ return (unsigned long)arg;
+}
+
+static void damos_sysfs_set_quota_score(
+ struct damos_sysfs_quota_goals *sysfs_goals,
+ struct damos_quota *quota)
+{
+ struct damos_sysfs_quota_goal *sysfs_goal;
+ int i;
+
+ quota->get_score = NULL;
+ quota->get_score_arg = (void *)0;
+ for (i = 0; i < sysfs_goals->nr; i++) {
+ sysfs_goal = sysfs_goals->goals_arr[i];
+ if (!sysfs_goal->target_value)
+ continue;
+
+ /* Higher score makes scheme less aggressive */
+ quota->get_score_arg = (void *)max(
+ (unsigned long)quota->get_score_arg,
+ sysfs_goal->current_value * 10000 /
+ sysfs_goal->target_value);
+ quota->get_score = damos_sysfs_get_quota_score;
+ }
+}
+
+void damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes,
+ struct damon_ctx *ctx)
+{
+ struct damos *scheme;
+ int i = 0;
+
+ damon_for_each_scheme(scheme, ctx) {
+ struct damon_sysfs_scheme *sysfs_scheme;
+
+ sysfs_scheme = sysfs_schemes->schemes_arr[i];
+ damos_sysfs_set_quota_score(sysfs_scheme->quotas->goals,
+ &scheme->quota);
+ i++;
+ }
+}
+
static struct damos *damon_sysfs_mk_scheme(
struct damon_sysfs_scheme *sysfs_scheme)
{
@@ -1687,6 +1949,8 @@ static struct damos *damon_sysfs_mk_scheme(
.low = sysfs_wmarks->low,
};
+ damos_sysfs_set_quota_score(sysfs_quotas->goals, &quota);
+
scheme = damon_new_scheme(&pattern, sysfs_scheme->action,
sysfs_scheme->apply_interval_us, &quota, &wmarks);
if (!scheme)
@@ -1727,6 +1991,8 @@ static void damon_sysfs_update_scheme(struct damos *scheme,
scheme->quota.weight_nr_accesses = sysfs_weights->nr_accesses;
scheme->quota.weight_age = sysfs_weights->age;
+ damos_sysfs_set_quota_score(sysfs_quotas->goals, &scheme->quota);
+
scheme->wmarks.metric = sysfs_wmarks->metric;
scheme->wmarks.interval = sysfs_wmarks->interval_us;
scheme->wmarks.high = sysfs_wmarks->high;
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index 7472404456aa..1f891e18b4ee 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -995,6 +995,11 @@ enum damon_sysfs_cmd {
/* @DAMON_SYSFS_CMD_COMMIT: Update kdamond inputs. */
DAMON_SYSFS_CMD_COMMIT,
/*
+ * @DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS: Commit the quota goals
+ * to DAMON.
+ */
+ DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS,
+ /*
* @DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: Update scheme stats sysfs
* files.
*/
@@ -1025,6 +1030,7 @@ static const char * const damon_sysfs_cmd_strs[] = {
"on",
"off",
"commit",
+ "commit_schemes_quota_goals",
"update_schemes_stats",
"update_schemes_tried_bytes",
"update_schemes_tried_regions",
@@ -1351,6 +1357,24 @@ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
kdamond->contexts->contexts_arr[0]);
}
+static int damon_sysfs_commit_schemes_quota_goals(
+ struct damon_sysfs_kdamond *sysfs_kdamond)
+{
+ struct damon_ctx *ctx;
+ struct damon_sysfs_context *sysfs_ctx;
+
+ if (!damon_sysfs_kdamond_running(sysfs_kdamond))
+ return -EINVAL;
+ /* TODO: Support multiple contexts per kdamond */
+ if (sysfs_kdamond->contexts->nr != 1)
+ return -EINVAL;
+
+ ctx = sysfs_kdamond->damon_ctx;
+ sysfs_ctx = sysfs_kdamond->contexts->contexts_arr[0];
+ damos_sysfs_set_quota_scores(sysfs_ctx->schemes, ctx);
+ return 0;
+}
+
/*
* damon_sysfs_cmd_request_callback() - DAMON callback for handling requests.
* @c: The DAMON context of the callback.
@@ -1379,6 +1403,9 @@ static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active)
case DAMON_SYSFS_CMD_COMMIT:
err = damon_sysfs_commit_input(kdamond);
break;
+ case DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS:
+ err = damon_sysfs_commit_schemes_quota_goals(kdamond);
+ break;
case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES:
total_bytes_only = true;
fallthrough;
diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h
index dcf1ca6b31cc..83626483f82b 100644
--- a/mm/damon/vaddr-test.h
+++ b/mm/damon/vaddr-test.h
@@ -4,7 +4,7 @@
*
* Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index a4d1f63c5b23..381559e4a1fa 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -2,14 +2,14 @@
/*
* DAMON Primitives for Virtual Address Spaces
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#define pr_fmt(fmt) "damon-va: " fmt
-#include <asm-generic/mman-common.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
+#include <linux/mman.h>
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
#include <linux/pagewalk.h>
diff --git a/mm/debug_page_alloc.c b/mm/debug_page_alloc.c
index f9d145730fd1..6755f0c9d4a3 100644
--- a/mm/debug_page_alloc.c
+++ b/mm/debug_page_alloc.c
@@ -22,7 +22,7 @@ static int __init debug_guardpage_minorder_setup(char *buf)
{
unsigned long res;
- if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
+ if (kstrtoul(buf, 10, &res) < 0 || res > MAX_PAGE_ORDER / 2) {
pr_err("Bad debug_guardpage_minorder value\n");
return 0;
}
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index e651500e597a..5662e29fe253 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -1091,7 +1091,7 @@ debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
struct page *page = NULL;
#ifdef CONFIG_CONTIG_ALLOC
- if (order > MAX_ORDER) {
+ if (order > MAX_PAGE_ORDER) {
page = alloc_contig_pages((1 << order), GFP_KERNEL,
first_online_node, NULL);
if (page) {
@@ -1101,7 +1101,7 @@ debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
}
#endif
- if (order <= MAX_ORDER)
+ if (order <= MAX_PAGE_ORDER)
page = alloc_pages(GFP_KERNEL, order);
return page;
diff --git a/mm/filemap.c b/mm/filemap.c
index ad5b4aa049a3..c8dafe70d4cc 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -113,11 +113,11 @@
* ->i_pages lock (try_to_unmap_one)
* ->lruvec->lru_lock (follow_page->mark_page_accessed)
* ->lruvec->lru_lock (check_pte_range->isolate_lru_page)
- * ->private_lock (page_remove_rmap->set_page_dirty)
- * ->i_pages lock (page_remove_rmap->set_page_dirty)
- * bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
- * ->inode->i_lock (page_remove_rmap->set_page_dirty)
- * ->memcg->move_lock (page_remove_rmap->folio_memcg_lock)
+ * ->private_lock (folio_remove_rmap_pte->set_page_dirty)
+ * ->i_pages lock (folio_remove_rmap_pte->set_page_dirty)
+ * bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty)
+ * ->inode->i_lock (folio_remove_rmap_pte->set_page_dirty)
+ * ->memcg->move_lock (folio_remove_rmap_pte->folio_memcg_lock)
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
* ->inode->i_lock (zap_pte_range->set_page_dirty)
* ->private_lock (zap_pte_range->block_dirty_folio)
@@ -1623,7 +1623,7 @@ EXPORT_SYMBOL_GPL(__folio_lock_killable);
static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
{
struct wait_queue_head *q = folio_waitqueue(folio);
- int ret = 0;
+ int ret;
wait->folio = folio;
wait->bit_nr = PG_locked;
@@ -2173,7 +2173,7 @@ update_start:
if (nr) {
folio = fbatch->folios[nr - 1];
- *start = folio->index + folio_nr_pages(folio);
+ *start = folio_next_index(folio);
}
out:
rcu_read_unlock();
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index 10c3247542cb..50412014f16f 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -46,9 +46,9 @@ void mark_page_accessed(struct page *page)
}
EXPORT_SYMBOL(mark_page_accessed);
-bool set_page_writeback(struct page *page)
+void set_page_writeback(struct page *page)
{
- return folio_start_writeback(page_folio(page));
+ folio_start_writeback(page_folio(page));
}
EXPORT_SYMBOL(set_page_writeback);
@@ -77,12 +77,6 @@ bool redirty_page_for_writepage(struct writeback_control *wbc,
}
EXPORT_SYMBOL(redirty_page_for_writepage);
-void lru_cache_add_inactive_or_unevictable(struct page *page,
- struct vm_area_struct *vma)
-{
- folio_add_lru_vma(page_folio(page), vma);
-}
-
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp)
{
@@ -122,13 +116,3 @@ void putback_lru_page(struct page *page)
{
folio_putback_lru(page_folio(page));
}
-
-#ifdef CONFIG_MMU
-void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma,
- unsigned long address)
-{
- VM_BUG_ON_PAGE(PageTail(page), page);
-
- return folio_add_new_anon_rmap((struct folio *)page, vma, address);
-}
-#endif
diff --git a/mm/gup.c b/mm/gup.c
index 231711efa390..df83182ec72d 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -177,7 +177,7 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
/*
* Adjust the pincount before re-checking the PTE for changes.
* This is essentially a smp_mb() and is paired with a memory
- * barrier in page_try_share_anon_rmap().
+ * barrier in folio_try_share_anon_rmap_*().
*/
smp_mb__after_atomic();
@@ -710,6 +710,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
spin_unlock(ptl);
if (page)
return page;
+ return no_page_table(vma, flags);
}
if (likely(!pmd_trans_huge(pmdval)))
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
@@ -758,6 +759,7 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
spin_unlock(ptl);
if (page)
return page;
+ return no_page_table(vma, flags);
}
if (unlikely(pud_bad(*pud)))
return no_page_table(vma, flags);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 86ee29b5c39c..94ef5c02b459 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -74,12 +74,23 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
static atomic_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly;
unsigned long huge_zero_pfn __read_mostly = ~0UL;
+unsigned long huge_anon_orders_always __read_mostly;
+unsigned long huge_anon_orders_madvise __read_mostly;
+unsigned long huge_anon_orders_inherit __read_mostly;
+
+unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs,
+ unsigned long orders)
+{
+ /* Check the intersection of requested and supported orders. */
+ orders &= vma_is_anonymous(vma) ?
+ THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
+ if (!orders)
+ return 0;
-bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
- bool smaps, bool in_pf, bool enforce_sysfs)
-{
if (!vma->vm_mm) /* vdso */
- return false;
+ return 0;
/*
* Explicitly disabled through madvise or prctl, or some
@@ -88,16 +99,16 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
* */
if ((vm_flags & VM_NOHUGEPAGE) ||
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
- return false;
+ return 0;
/*
* If the hardware/firmware marked hugepage support disabled.
*/
if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
- return false;
+ return 0;
/* khugepaged doesn't collapse DAX vma, but page fault is fine. */
if (vma_is_dax(vma))
- return in_pf;
+ return in_pf ? orders : 0;
/*
* khugepaged special VMA and hugetlb VMA.
@@ -105,17 +116,29 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
* VM_MIXEDMAP set.
*/
if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
- return false;
+ return 0;
/*
- * Check alignment for file vma and size for both file and anon vma.
+ * Check alignment for file vma and size for both file and anon vma by
+ * filtering out the unsuitable orders.
*
* Skip the check for page fault. Huge fault does the check in fault
- * handlers. And this check is not suitable for huge PUD fault.
+ * handlers.
*/
- if (!in_pf &&
- !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE)))
- return false;
+ if (!in_pf) {
+ int order = highest_order(orders);
+ unsigned long addr;
+
+ while (orders) {
+ addr = vma->vm_end - (PAGE_SIZE << order);
+ if (thp_vma_suitable_order(vma, addr, order))
+ break;
+ order = next_order(&orders, order);
+ }
+
+ if (!orders)
+ return 0;
+ }
/*
* Enabled via shmem mount options or sysfs settings.
@@ -124,29 +147,33 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
*/
if (!in_pf && shmem_file(vma->vm_file))
return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
- !enforce_sysfs, vma->vm_mm, vm_flags);
-
- /* Enforce sysfs THP requirements as necessary */
- if (enforce_sysfs &&
- (!hugepage_flags_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
- !hugepage_flags_always())))
- return false;
+ !enforce_sysfs, vma->vm_mm, vm_flags)
+ ? orders : 0;
if (!vma_is_anonymous(vma)) {
/*
+ * Enforce sysfs THP requirements as necessary. Anonymous vmas
+ * were already handled in thp_vma_allowable_orders().
+ */
+ if (enforce_sysfs &&
+ (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
+ !hugepage_global_always())))
+ return 0;
+
+ /*
* Trust that ->huge_fault() handlers know what they are doing
* in fault path.
*/
if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
- return true;
+ return orders;
/* Only regular file is valid in collapse path */
if (((!in_pf || smaps)) && file_thp_enabled(vma))
- return true;
- return false;
+ return orders;
+ return 0;
}
if (vma_is_temporary_stack(vma))
- return false;
+ return 0;
/*
* THPeligible bit of smaps should show 1 for proper VMAs even
@@ -156,9 +183,9 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
* the first page fault.
*/
if (!vma->anon_vma)
- return (smaps || in_pf);
+ return (smaps || in_pf) ? orders : 0;
- return true;
+ return orders;
}
static bool get_huge_zero_page(void)
@@ -412,9 +439,136 @@ static const struct attribute_group hugepage_attr_group = {
.attrs = hugepage_attr,
};
+static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
+static void thpsize_release(struct kobject *kobj);
+static DEFINE_SPINLOCK(huge_anon_orders_lock);
+static LIST_HEAD(thpsize_list);
+
+struct thpsize {
+ struct kobject kobj;
+ struct list_head node;
+ int order;
+};
+
+#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
+
+static ssize_t thpsize_enabled_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int order = to_thpsize(kobj)->order;
+ const char *output;
+
+ if (test_bit(order, &huge_anon_orders_always))
+ output = "[always] inherit madvise never";
+ else if (test_bit(order, &huge_anon_orders_inherit))
+ output = "always [inherit] madvise never";
+ else if (test_bit(order, &huge_anon_orders_madvise))
+ output = "always inherit [madvise] never";
+ else
+ output = "always inherit madvise [never]";
+
+ return sysfs_emit(buf, "%s\n", output);
+}
+
+static ssize_t thpsize_enabled_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int order = to_thpsize(kobj)->order;
+ ssize_t ret = count;
+
+ if (sysfs_streq(buf, "always")) {
+ spin_lock(&huge_anon_orders_lock);
+ clear_bit(order, &huge_anon_orders_inherit);
+ clear_bit(order, &huge_anon_orders_madvise);
+ set_bit(order, &huge_anon_orders_always);
+ spin_unlock(&huge_anon_orders_lock);
+ } else if (sysfs_streq(buf, "inherit")) {
+ spin_lock(&huge_anon_orders_lock);
+ clear_bit(order, &huge_anon_orders_always);
+ clear_bit(order, &huge_anon_orders_madvise);
+ set_bit(order, &huge_anon_orders_inherit);
+ spin_unlock(&huge_anon_orders_lock);
+ } else if (sysfs_streq(buf, "madvise")) {
+ spin_lock(&huge_anon_orders_lock);
+ clear_bit(order, &huge_anon_orders_always);
+ clear_bit(order, &huge_anon_orders_inherit);
+ set_bit(order, &huge_anon_orders_madvise);
+ spin_unlock(&huge_anon_orders_lock);
+ } else if (sysfs_streq(buf, "never")) {
+ spin_lock(&huge_anon_orders_lock);
+ clear_bit(order, &huge_anon_orders_always);
+ clear_bit(order, &huge_anon_orders_inherit);
+ clear_bit(order, &huge_anon_orders_madvise);
+ spin_unlock(&huge_anon_orders_lock);
+ } else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static struct kobj_attribute thpsize_enabled_attr =
+ __ATTR(enabled, 0644, thpsize_enabled_show, thpsize_enabled_store);
+
+static struct attribute *thpsize_attrs[] = {
+ &thpsize_enabled_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group thpsize_attr_group = {
+ .attrs = thpsize_attrs,
+};
+
+static const struct kobj_type thpsize_ktype = {
+ .release = &thpsize_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static struct thpsize *thpsize_create(int order, struct kobject *parent)
+{
+ unsigned long size = (PAGE_SIZE << order) / SZ_1K;
+ struct thpsize *thpsize;
+ int ret;
+
+ thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL);
+ if (!thpsize)
+ return ERR_PTR(-ENOMEM);
+
+ ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
+ "hugepages-%lukB", size);
+ if (ret) {
+ kfree(thpsize);
+ return ERR_PTR(ret);
+ }
+
+ ret = sysfs_create_group(&thpsize->kobj, &thpsize_attr_group);
+ if (ret) {
+ kobject_put(&thpsize->kobj);
+ return ERR_PTR(ret);
+ }
+
+ thpsize->order = order;
+ return thpsize;
+}
+
+static void thpsize_release(struct kobject *kobj)
+{
+ kfree(to_thpsize(kobj));
+}
+
static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
{
int err;
+ struct thpsize *thpsize;
+ unsigned long orders;
+ int order;
+
+ /*
+ * Default to setting PMD-sized THP to inherit the global setting and
+ * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
+ * constant so we have to do this here.
+ */
+ huge_anon_orders_inherit = BIT(PMD_ORDER);
*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
if (unlikely(!*hugepage_kobj)) {
@@ -434,8 +588,24 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
goto remove_hp_group;
}
+ orders = THP_ORDERS_ALL_ANON;
+ order = highest_order(orders);
+ while (orders) {
+ thpsize = thpsize_create(order, *hugepage_kobj);
+ if (IS_ERR(thpsize)) {
+ pr_err("failed to create thpsize for order %d\n", order);
+ err = PTR_ERR(thpsize);
+ goto remove_all;
+ }
+ list_add(&thpsize->node, &thpsize_list);
+ order = next_order(&orders, order);
+ }
+
return 0;
+remove_all:
+ hugepage_exit_sysfs(*hugepage_kobj);
+ return err;
remove_hp_group:
sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
delete_obj:
@@ -445,6 +615,13 @@ delete_obj:
static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
{
+ struct thpsize *thpsize, *tmp;
+
+ list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
+ list_del(&thpsize->node);
+ kobject_put(&thpsize->kobj);
+ }
+
sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
kobject_put(hugepage_kobj);
@@ -505,7 +682,7 @@ static int __init hugepage_init(void)
/*
* hugepages can't be allocated by the buddy allocator
*/
- MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_ORDER);
+ MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER);
/*
* we use page->mapping and page->index in second tail page
* as list_head: assuming THP order >= 2
@@ -811,7 +988,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
struct folio *folio;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
- if (!transhuge_vma_suitable(vma, haddr))
+ if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
return VM_FAULT_FALLBACK;
if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;
@@ -1098,6 +1275,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
{
spinlock_t *dst_ptl, *src_ptl;
struct page *src_page;
+ struct folio *src_folio;
pmd_t pmd;
pgtable_t pgtable = NULL;
int ret = -ENOMEM;
@@ -1164,11 +1342,12 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
src_page = pmd_page(pmd);
VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
+ src_folio = page_folio(src_page);
- get_page(src_page);
- if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) {
+ folio_get(src_folio);
+ if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) {
/* Page maybe pinned: split and retry the fault on PTEs. */
- put_page(src_page);
+ folio_put(src_folio);
pte_free(dst_mm, pgtable);
spin_unlock(src_ptl);
spin_unlock(dst_ptl);
@@ -1277,8 +1456,8 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
}
/*
- * TODO: once we support anonymous pages, use page_try_dup_anon_rmap()
- * and split if duplicating fails.
+ * TODO: once we support anonymous pages, use
+ * folio_try_dup_anon_rmap_*() and split if duplicating fails.
*/
pudp_set_wrprotect(src_mm, addr, src_pud);
pud = pud_mkold(pud_wrprotect(pud));
@@ -1721,7 +1900,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (pmd_present(orig_pmd)) {
page = pmd_page(orig_pmd);
- page_remove_rmap(page, vma, true);
+ folio_remove_rmap_pmd(page_folio(page), page, vma);
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
VM_BUG_ON_PAGE(!PageHead(page), page);
} else if (thp_migration_supported()) {
@@ -1964,6 +2143,128 @@ unlock:
return ret;
}
+#ifdef CONFIG_USERFAULTFD
+/*
+ * The PT lock for src_pmd and the mmap_lock for reading are held by
+ * the caller, but it must return after releasing the page_table_lock.
+ * Just move the page from src_pmd to dst_pmd if possible.
+ * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
+ * repeated by the caller, or other errors in case of failure.
+ */
+int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
+ struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
+ unsigned long dst_addr, unsigned long src_addr)
+{
+ pmd_t _dst_pmd, src_pmdval;
+ struct page *src_page;
+ struct folio *src_folio;
+ struct anon_vma *src_anon_vma;
+ spinlock_t *src_ptl, *dst_ptl;
+ pgtable_t src_pgtable;
+ struct mmu_notifier_range range;
+ int err = 0;
+
+ src_pmdval = *src_pmd;
+ src_ptl = pmd_lockptr(mm, src_pmd);
+
+ lockdep_assert_held(src_ptl);
+ mmap_assert_locked(mm);
+
+ /* Sanity checks before the operation */
+ if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
+ WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
+ spin_unlock(src_ptl);
+ return -EINVAL;
+ }
+
+ if (!pmd_trans_huge(src_pmdval)) {
+ spin_unlock(src_ptl);
+ if (is_pmd_migration_entry(src_pmdval)) {
+ pmd_migration_entry_wait(mm, &src_pmdval);
+ return -EAGAIN;
+ }
+ return -ENOENT;
+ }
+
+ src_page = pmd_page(src_pmdval);
+ if (unlikely(!PageAnonExclusive(src_page))) {
+ spin_unlock(src_ptl);
+ return -EBUSY;
+ }
+
+ src_folio = page_folio(src_page);
+ folio_get(src_folio);
+ spin_unlock(src_ptl);
+
+ flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
+ src_addr + HPAGE_PMD_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
+
+ folio_lock(src_folio);
+
+ /*
+ * split_huge_page walks the anon_vma chain without the page
+ * lock. Serialize against it with the anon_vma lock, the page
+ * lock is not enough.
+ */
+ src_anon_vma = folio_get_anon_vma(src_folio);
+ if (!src_anon_vma) {
+ err = -EAGAIN;
+ goto unlock_folio;
+ }
+ anon_vma_lock_write(src_anon_vma);
+
+ dst_ptl = pmd_lockptr(mm, dst_pmd);
+ double_pt_lock(src_ptl, dst_ptl);
+ if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
+ !pmd_same(*dst_pmd, dst_pmdval))) {
+ err = -EAGAIN;
+ goto unlock_ptls;
+ }
+ if (folio_maybe_dma_pinned(src_folio) ||
+ !PageAnonExclusive(&src_folio->page)) {
+ err = -EBUSY;
+ goto unlock_ptls;
+ }
+
+ if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
+ WARN_ON_ONCE(!folio_test_anon(src_folio))) {
+ err = -EBUSY;
+ goto unlock_ptls;
+ }
+
+ folio_move_anon_rmap(src_folio, dst_vma);
+ WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
+
+ src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
+ /* Folio got pinned from under us. Put it back and fail the move. */
+ if (folio_maybe_dma_pinned(src_folio)) {
+ set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
+ err = -EBUSY;
+ goto unlock_ptls;
+ }
+
+ _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot);
+ /* Follow mremap() behavior and treat the entry dirty after the move */
+ _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
+ set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
+
+ src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
+ pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
+unlock_ptls:
+ double_pt_unlock(src_ptl, dst_ptl);
+ anon_vma_unlock_write(src_anon_vma);
+ put_anon_vma(src_anon_vma);
+unlock_folio:
+ /* unblock rmap walks */
+ folio_unlock(src_folio);
+ mmu_notifier_invalidate_range_end(&range);
+ folio_put(src_folio);
+ return err;
+}
+#endif /* CONFIG_USERFAULTFD */
+
/*
* Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
*
@@ -2099,6 +2400,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long haddr, bool freeze)
{
struct mm_struct *mm = vma->vm_mm;
+ struct folio *folio;
struct page *page;
pgtable_t pgtable;
pmd_t old_pmd, _pmd;
@@ -2133,12 +2435,13 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
page = pfn_swap_entry_to_page(entry);
} else {
page = pmd_page(old_pmd);
- if (!PageDirty(page) && pmd_dirty(old_pmd))
- set_page_dirty(page);
- if (!PageReferenced(page) && pmd_young(old_pmd))
- SetPageReferenced(page);
- page_remove_rmap(page, vma, true);
- put_page(page);
+ folio = page_folio(page);
+ if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
+ folio_set_dirty(folio);
+ if (!folio_test_referenced(folio) && pmd_young(old_pmd))
+ folio_set_referenced(folio);
+ folio_remove_rmap_pmd(folio, page, vma);
+ folio_put(folio);
}
add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
return;
@@ -2194,16 +2497,18 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
uffd_wp = pmd_swp_uffd_wp(old_pmd);
} else {
page = pmd_page(old_pmd);
+ folio = page_folio(page);
if (pmd_dirty(old_pmd)) {
dirty = true;
- SetPageDirty(page);
+ folio_set_dirty(folio);
}
write = pmd_write(old_pmd);
young = pmd_young(old_pmd);
soft_dirty = pmd_soft_dirty(old_pmd);
uffd_wp = pmd_uffd_wp(old_pmd);
- VM_BUG_ON_PAGE(!page_count(page), page);
+ VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
/*
* Without "freeze", we'll simply split the PMD, propagating the
@@ -2218,13 +2523,21 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
* In case we cannot clear PageAnonExclusive(), split the PMD
* only and let try_to_migrate_one() fail later.
*
- * See page_try_share_anon_rmap(): invalidate PMD first.
+ * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
*/
- anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
- if (freeze && anon_exclusive && page_try_share_anon_rmap(page))
+ anon_exclusive = PageAnonExclusive(page);
+ if (freeze && anon_exclusive &&
+ folio_try_share_anon_rmap_pmd(folio, page))
freeze = false;
- if (!freeze)
- page_ref_add(page, HPAGE_PMD_NR - 1);
+ if (!freeze) {
+ rmap_t rmap_flags = RMAP_NONE;
+
+ folio_ref_add(folio, HPAGE_PMD_NR - 1);
+ if (anon_exclusive)
+ rmap_flags |= RMAP_EXCLUSIVE;
+ folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
+ vma, haddr, rmap_flags);
+ }
}
/*
@@ -2267,8 +2580,6 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
if (write)
entry = pte_mkwrite(entry, vma);
- if (anon_exclusive)
- SetPageAnonExclusive(page + i);
if (!young)
entry = pte_mkold(entry);
/* NOTE: this may set soft-dirty too on some archs */
@@ -2278,7 +2589,6 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
entry = pte_mksoft_dirty(entry);
if (uffd_wp)
entry = pte_mkuffd_wp(entry);
- page_add_anon_rmap(page + i, vma, addr, RMAP_NONE);
}
VM_BUG_ON(!pte_none(ptep_get(pte)));
set_pte_at(mm, addr, pte, entry);
@@ -2287,7 +2597,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
pte_unmap(pte - 1);
if (!pmd_migration)
- page_remove_rmap(page, vma, true);
+ folio_remove_rmap_pmd(folio, page, vma);
if (freeze)
put_page(page);
@@ -2379,7 +2689,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
static void unmap_folio(struct folio *folio)
{
enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
- TTU_SYNC;
+ TTU_SYNC | TTU_BATCH_FLUSH;
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
@@ -2392,6 +2702,8 @@ static void unmap_folio(struct folio *folio)
try_to_migrate(folio, ttu_flags);
else
try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
+
+ try_to_unmap_flush();
}
static void remap_page(struct folio *folio, unsigned long nr)
@@ -2507,13 +2819,13 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
clear_compound_head(page_tail);
/* Finally unfreeze refcount. Additional reference from page cache. */
- page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
- PageSwapCache(head)));
+ page_ref_unfreeze(page_tail, 1 + (!folio_test_anon(folio) ||
+ folio_test_swapcache(folio)));
- if (page_is_young(head))
- set_page_young(page_tail);
- if (page_is_idle(head))
- set_page_idle(page_tail);
+ if (folio_test_young(folio))
+ folio_set_young(new_folio);
+ if (folio_test_idle(folio))
+ folio_set_idle(new_folio);
folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
@@ -3228,6 +3540,7 @@ late_initcall(split_huge_pages_debugfs);
int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page)
{
+ struct folio *folio = page_folio(page);
struct vm_area_struct *vma = pvmw->vma;
struct mm_struct *mm = vma->vm_mm;
unsigned long address = pvmw->address;
@@ -3242,15 +3555,15 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
- /* See page_try_share_anon_rmap(): invalidate PMD first. */
- anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
- if (anon_exclusive && page_try_share_anon_rmap(page)) {
+ /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
+ anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
+ if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) {
set_pmd_at(mm, address, pvmw->pmd, pmdval);
return -EBUSY;
}
if (pmd_dirty(pmdval))
- set_page_dirty(page);
+ folio_set_dirty(folio);
if (pmd_write(pmdval))
entry = make_writable_migration_entry(page_to_pfn(page));
else if (anon_exclusive)
@@ -3267,8 +3580,8 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
if (pmd_uffd_wp(pmdval))
pmdswp = pmd_swp_mkuffd_wp(pmdswp);
set_pmd_at(mm, address, pvmw->pmd, pmdswp);
- page_remove_rmap(page, vma, true);
- put_page(page);
+ folio_remove_rmap_pmd(folio, page, vma);
+ folio_put(folio);
trace_set_migration_pmd(address, pmd_val(pmdswp));
return 0;
@@ -3276,6 +3589,7 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
{
+ struct folio *folio = page_folio(new);
struct vm_area_struct *vma = pvmw->vma;
struct mm_struct *mm = vma->vm_mm;
unsigned long address = pvmw->address;
@@ -3287,7 +3601,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
return;
entry = pmd_to_swp_entry(*pvmw->pmd);
- get_page(new);
+ folio_get(folio);
pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
if (pmd_swp_soft_dirty(*pvmw->pmd))
pmde = pmd_mksoft_dirty(pmde);
@@ -3298,20 +3612,20 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
if (!is_migration_entry_young(entry))
pmde = pmd_mkold(pmde);
/* NOTE: this may contain setting soft-dirty on some archs */
- if (PageDirty(new) && is_migration_entry_dirty(entry))
+ if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
pmde = pmd_mkdirty(pmde);
- if (PageAnon(new)) {
- rmap_t rmap_flags = RMAP_COMPOUND;
+ if (folio_test_anon(folio)) {
+ rmap_t rmap_flags = RMAP_NONE;
if (!is_readable_migration_entry(entry))
rmap_flags |= RMAP_EXCLUSIVE;
- page_add_anon_rmap(new, vma, haddr, rmap_flags);
+ folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
} else {
- page_add_file_rmap(new, vma, true);
+ folio_add_file_rmap_pmd(folio, new, vma);
}
- VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new));
+ VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
set_pmd_at(mm, haddr, pvmw->pmd, pmde);
/* No need to invalidate - it was non-present before */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c466551e2fd9..ed1581b670d4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3410,7 +3410,7 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h,
/*
* Put bootmem huge pages into the standard lists after mem_map is up.
- * Note: This only applies to gigantic (order > MAX_ORDER) pages.
+ * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
*/
static void __init gather_bootmem_prealloc(void)
{
@@ -4790,7 +4790,7 @@ static int __init default_hugepagesz_setup(char *s)
* The number of default huge pages (for this size) could have been
* specified as the first hugetlb parameter: hugepages=X. If so,
* then default_hstate_max_huge_pages is set. If the default huge
- * page size is gigantic (> MAX_ORDER), then the pages must be
+ * page size is gigantic (> MAX_PAGE_ORDER), then the pages must be
* allocated here from bootmem allocator.
*/
if (default_hstate_max_huge_pages) {
@@ -5285,7 +5285,7 @@ hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long add
pte_t newpte = make_huge_pte(vma, &new_folio->page, 1);
__folio_mark_uptodate(new_folio);
- hugepage_add_new_anon_rmap(new_folio, vma, addr);
+ hugetlb_add_new_anon_rmap(new_folio, vma, addr);
if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old))
newpte = huge_pte_mkuffd_wp(newpte);
set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz);
@@ -5408,9 +5408,8 @@ again:
* sleep during the process.
*/
if (!folio_test_anon(pte_folio)) {
- page_dup_file_rmap(&pte_folio->page, true);
- } else if (page_try_dup_anon_rmap(&pte_folio->page,
- true, src_vma)) {
+ hugetlb_add_file_rmap(pte_folio);
+ } else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) {
pte_t src_pte_old = entry;
struct folio *new_folio;
@@ -5676,7 +5675,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
make_pte_marker(PTE_MARKER_UFFD_WP),
sz);
hugetlb_count_sub(pages_per_huge_page(h), mm);
- page_remove_rmap(page, vma, true);
+ hugetlb_remove_rmap(page_folio(page));
spin_unlock(ptl);
tlb_remove_page_size(tlb, page, huge_page_size(h));
@@ -5987,8 +5986,8 @@ retry_avoidcopy:
/* Break COW or unshare */
huge_ptep_clear_flush(vma, haddr, ptep);
- page_remove_rmap(&old_folio->page, vma, true);
- hugepage_add_new_anon_rmap(new_folio, vma, haddr);
+ hugetlb_remove_rmap(old_folio);
+ hugetlb_add_new_anon_rmap(new_folio, vma, haddr);
if (huge_pte_uffd_wp(pte))
newpte = huge_pte_mkuffd_wp(newpte);
set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h));
@@ -6277,9 +6276,9 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
goto backout;
if (anon_rmap)
- hugepage_add_new_anon_rmap(folio, vma, haddr);
+ hugetlb_add_new_anon_rmap(folio, vma, haddr);
else
- page_dup_file_rmap(&folio->page, true);
+ hugetlb_add_file_rmap(folio);
new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE)
&& (vma->vm_flags & VM_SHARED)));
/*
@@ -6730,9 +6729,9 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
goto out_release_unlock;
if (folio_in_pagecache)
- page_dup_file_rmap(&folio->page, true);
+ hugetlb_add_file_rmap(folio);
else
- hugepage_add_new_anon_rmap(folio, dst_vma, dst_addr);
+ hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr);
/*
* For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 87818ee7f01d..da177e49d956 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -14,6 +14,7 @@
#include <linux/moduleparam.h>
#include <linux/bootmem_info.h>
#include <linux/mmdebug.h>
+#include <linux/pagewalk.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include "hugetlb_vmemmap.h"
@@ -45,21 +46,14 @@ struct vmemmap_remap_walk {
unsigned long flags;
};
-static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start, bool flush)
+static int vmemmap_split_pmd(pmd_t *pmd, struct page *head, unsigned long start,
+ struct vmemmap_remap_walk *walk)
{
pmd_t __pmd;
int i;
unsigned long addr = start;
- struct page *head;
pte_t *pgtable;
- spin_lock(&init_mm.page_table_lock);
- head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL;
- spin_unlock(&init_mm.page_table_lock);
-
- if (!head)
- return 0;
-
pgtable = pte_alloc_one_kernel(&init_mm);
if (!pgtable)
return -ENOMEM;
@@ -88,7 +82,7 @@ static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start, bool flush)
/* Make pte visible before pmd. See comment in pmd_install(). */
smp_wmb();
pmd_populate_kernel(&init_mm, pmd, pgtable);
- if (flush)
+ if (!(walk->flags & VMEMMAP_SPLIT_NO_TLB_FLUSH))
flush_tlb_kernel_range(start, start + PMD_SIZE);
} else {
pte_free_kernel(&init_mm, pgtable);
@@ -98,123 +92,83 @@ static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start, bool flush)
return 0;
}
-static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end,
- struct vmemmap_remap_walk *walk)
+static int vmemmap_pmd_entry(pmd_t *pmd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
{
- pte_t *pte = pte_offset_kernel(pmd, addr);
+ int ret = 0;
+ struct page *head;
+ struct vmemmap_remap_walk *vmemmap_walk = walk->private;
+
+ /* Only splitting, not remapping the vmemmap pages. */
+ if (!vmemmap_walk->remap_pte)
+ walk->action = ACTION_CONTINUE;
+ spin_lock(&init_mm.page_table_lock);
+ head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL;
/*
- * The reuse_page is found 'first' in table walk before we start
- * remapping (which is calling @walk->remap_pte).
+ * Due to HugeTLB alignment requirements and the vmemmap
+ * pages being at the start of the hotplugged memory
+ * region in memory_hotplug.memmap_on_memory case. Checking
+ * the vmemmap page associated with the first vmemmap page
+ * if it is self-hosted is sufficient.
+ *
+ * [ hotplugged memory ]
+ * [ section ][...][ section ]
+ * [ vmemmap ][ usable memory ]
+ * ^ | ^ |
+ * +--+ | |
+ * +------------------------+
*/
- if (!walk->reuse_page) {
- walk->reuse_page = pte_page(ptep_get(pte));
- /*
- * Because the reuse address is part of the range that we are
- * walking, skip the reuse address range.
- */
- addr += PAGE_SIZE;
- pte++;
- walk->nr_walked++;
- }
+ if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && unlikely(!vmemmap_walk->nr_walked)) {
+ struct page *page = head ? head + pte_index(addr) :
+ pte_page(ptep_get(pte_offset_kernel(pmd, addr)));
- for (; addr != end; addr += PAGE_SIZE, pte++) {
- walk->remap_pte(pte, addr, walk);
- walk->nr_walked++;
+ if (PageVmemmapSelfHosted(page))
+ ret = -ENOTSUPP;
}
-}
-
-static int vmemmap_pmd_range(pud_t *pud, unsigned long addr,
- unsigned long end,
- struct vmemmap_remap_walk *walk)
-{
- pmd_t *pmd;
- unsigned long next;
-
- pmd = pmd_offset(pud, addr);
- do {
- int ret;
-
- ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK,
- !(walk->flags & VMEMMAP_SPLIT_NO_TLB_FLUSH));
- if (ret)
- return ret;
-
- next = pmd_addr_end(addr, end);
-
- /*
- * We are only splitting, not remapping the hugetlb vmemmap
- * pages.
- */
- if (!walk->remap_pte)
- continue;
-
- vmemmap_pte_range(pmd, addr, next, walk);
- } while (pmd++, addr = next, addr != end);
+ spin_unlock(&init_mm.page_table_lock);
+ if (!head || ret)
+ return ret;
- return 0;
+ return vmemmap_split_pmd(pmd, head, addr & PMD_MASK, vmemmap_walk);
}
-static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr,
- unsigned long end,
- struct vmemmap_remap_walk *walk)
+static int vmemmap_pte_entry(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
{
- pud_t *pud;
- unsigned long next;
+ struct vmemmap_remap_walk *vmemmap_walk = walk->private;
- pud = pud_offset(p4d, addr);
- do {
- int ret;
-
- next = pud_addr_end(addr, end);
- ret = vmemmap_pmd_range(pud, addr, next, walk);
- if (ret)
- return ret;
- } while (pud++, addr = next, addr != end);
+ /*
+ * The reuse_page is found 'first' in page table walking before
+ * starting remapping.
+ */
+ if (!vmemmap_walk->reuse_page)
+ vmemmap_walk->reuse_page = pte_page(ptep_get(pte));
+ else
+ vmemmap_walk->remap_pte(pte, addr, vmemmap_walk);
+ vmemmap_walk->nr_walked++;
return 0;
}
-static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr,
- unsigned long end,
- struct vmemmap_remap_walk *walk)
-{
- p4d_t *p4d;
- unsigned long next;
-
- p4d = p4d_offset(pgd, addr);
- do {
- int ret;
-
- next = p4d_addr_end(addr, end);
- ret = vmemmap_pud_range(p4d, addr, next, walk);
- if (ret)
- return ret;
- } while (p4d++, addr = next, addr != end);
-
- return 0;
-}
+static const struct mm_walk_ops vmemmap_remap_ops = {
+ .pmd_entry = vmemmap_pmd_entry,
+ .pte_entry = vmemmap_pte_entry,
+};
static int vmemmap_remap_range(unsigned long start, unsigned long end,
struct vmemmap_remap_walk *walk)
{
- unsigned long addr = start;
- unsigned long next;
- pgd_t *pgd;
-
- VM_BUG_ON(!PAGE_ALIGNED(start));
- VM_BUG_ON(!PAGE_ALIGNED(end));
+ int ret;
- pgd = pgd_offset_k(addr);
- do {
- int ret;
+ VM_BUG_ON(!PAGE_ALIGNED(start | end));
- next = pgd_addr_end(addr, end);
- ret = vmemmap_p4d_range(pgd, addr, next, walk);
- if (ret)
- return ret;
- } while (pgd++, addr = next, addr != end);
+ mmap_read_lock(&init_mm);
+ ret = walk_page_range_novma(&init_mm, start, end, &vmemmap_remap_ops,
+ NULL, walk);
+ mmap_read_unlock(&init_mm);
+ if (ret)
+ return ret;
if (walk->remap_pte && !(walk->flags & VMEMMAP_REMAP_NO_TLB_FLUSH))
flush_tlb_kernel_range(start, end);
@@ -328,9 +282,8 @@ static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
* Return: %0 on success, negative error code otherwise.
*/
static int vmemmap_remap_split(unsigned long start, unsigned long end,
- unsigned long reuse)
+ unsigned long reuse)
{
- int ret;
struct vmemmap_remap_walk walk = {
.remap_pte = NULL,
.flags = VMEMMAP_SPLIT_NO_TLB_FLUSH,
@@ -339,11 +292,7 @@ static int vmemmap_remap_split(unsigned long start, unsigned long end,
/* See the comment in the vmemmap_remap_free(). */
BUG_ON(start - reuse != PAGE_SIZE);
- mmap_read_lock(&init_mm);
- ret = vmemmap_remap_range(reuse, end, &walk);
- mmap_read_unlock(&init_mm);
-
- return ret;
+ return vmemmap_remap_range(reuse, end, &walk);
}
/**
@@ -406,7 +355,6 @@ static int vmemmap_remap_free(unsigned long start, unsigned long end,
*/
BUG_ON(start - reuse != PAGE_SIZE);
- mmap_read_lock(&init_mm);
ret = vmemmap_remap_range(reuse, end, &walk);
if (ret && walk.nr_walked) {
end = reuse + walk.nr_walked * PAGE_SIZE;
@@ -425,7 +373,6 @@ static int vmemmap_remap_free(unsigned long start, unsigned long end,
vmemmap_remap_range(reuse, end, &walk);
}
- mmap_read_unlock(&init_mm);
return ret;
}
@@ -482,11 +429,7 @@ static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
if (alloc_vmemmap_page_list(start, end, &vmemmap_pages))
return -ENOMEM;
- mmap_read_lock(&init_mm);
- vmemmap_remap_range(reuse, end, &walk);
- mmap_read_unlock(&init_mm);
-
- return 0;
+ return vmemmap_remap_range(reuse, end, &walk);
}
DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
@@ -495,14 +438,14 @@ EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
-static int __hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio, unsigned long flags)
+static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
+ struct folio *folio, unsigned long flags)
{
int ret;
- struct page *head = &folio->page;
- unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
+ unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
unsigned long vmemmap_reuse;
- VM_WARN_ON_ONCE(!PageHuge(head));
+ VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
if (!folio_test_hugetlb_vmemmap_optimized(folio))
return 0;
@@ -565,7 +508,7 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h,
list_for_each_entry_safe(folio, t_folio, folio_list, lru) {
if (folio_test_hugetlb_vmemmap_optimized(folio)) {
ret = __hugetlb_vmemmap_restore_folio(h, folio,
- VMEMMAP_REMAP_NO_TLB_FLUSH);
+ VMEMMAP_REMAP_NO_TLB_FLUSH);
if (ret)
break;
restored++;
@@ -583,9 +526,9 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h,
}
/* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
-static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
+static bool vmemmap_should_optimize_folio(const struct hstate *h, struct folio *folio)
{
- if (HPageVmemmapOptimized((struct page *)head))
+ if (folio_test_hugetlb_vmemmap_optimized(folio))
return false;
if (!READ_ONCE(vmemmap_optimize_enabled))
@@ -594,65 +537,20 @@ static bool vmemmap_should_optimize(const struct hstate *h, const struct page *h
if (!hugetlb_vmemmap_optimizable(h))
return false;
- if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) {
- pmd_t *pmdp, pmd;
- struct page *vmemmap_page;
- unsigned long vaddr = (unsigned long)head;
-
- /*
- * Only the vmemmap page's vmemmap page can be self-hosted.
- * Walking the page tables to find the backing page of the
- * vmemmap page.
- */
- pmdp = pmd_off_k(vaddr);
- /*
- * The READ_ONCE() is used to stabilize *pmdp in a register or
- * on the stack so that it will stop changing under the code.
- * The only concurrent operation where it can be changed is
- * split_vmemmap_huge_pmd() (*pmdp will be stable after this
- * operation).
- */
- pmd = READ_ONCE(*pmdp);
- if (pmd_leaf(pmd))
- vmemmap_page = pmd_page(pmd) + pte_index(vaddr);
- else
- vmemmap_page = pte_page(*pte_offset_kernel(pmdp, vaddr));
- /*
- * Due to HugeTLB alignment requirements and the vmemmap pages
- * being at the start of the hotplugged memory region in
- * memory_hotplug.memmap_on_memory case. Checking any vmemmap
- * page's vmemmap page if it is marked as VmemmapSelfHosted is
- * sufficient.
- *
- * [ hotplugged memory ]
- * [ section ][...][ section ]
- * [ vmemmap ][ usable memory ]
- * ^ | | |
- * +---+ | |
- * ^ | |
- * +-------+ |
- * ^ |
- * +-------------------------------------------+
- */
- if (PageVmemmapSelfHosted(vmemmap_page))
- return false;
- }
-
return true;
}
static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h,
- struct folio *folio,
- struct list_head *vmemmap_pages,
- unsigned long flags)
+ struct folio *folio,
+ struct list_head *vmemmap_pages,
+ unsigned long flags)
{
int ret = 0;
- struct page *head = &folio->page;
- unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
+ unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
unsigned long vmemmap_reuse;
- VM_WARN_ON_ONCE(!PageHuge(head));
- if (!vmemmap_should_optimize(h, head))
+ VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
+ if (!vmemmap_should_optimize_folio(h, folio))
return ret;
static_branch_inc(&hugetlb_optimize_vmemmap_key);
@@ -680,7 +578,7 @@ static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h,
* the caller.
*/
ret = vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse,
- vmemmap_pages, flags);
+ vmemmap_pages, flags);
if (ret) {
static_branch_dec(&hugetlb_optimize_vmemmap_key);
folio_clear_hugetlb_vmemmap_optimized(folio);
@@ -707,12 +605,12 @@ void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)
free_vmemmap_page_list(&vmemmap_pages);
}
-static int hugetlb_vmemmap_split(const struct hstate *h, struct page *head)
+static int hugetlb_vmemmap_split_folio(const struct hstate *h, struct folio *folio)
{
- unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
+ unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
unsigned long vmemmap_reuse;
- if (!vmemmap_should_optimize(h, head))
+ if (!vmemmap_should_optimize_folio(h, folio))
return 0;
vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
@@ -732,7 +630,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
LIST_HEAD(vmemmap_pages);
list_for_each_entry(folio, folio_list, lru) {
- int ret = hugetlb_vmemmap_split(h, &folio->page);
+ int ret = hugetlb_vmemmap_split_folio(h, folio);
/*
* Spliting the PMD requires allocating a page, thus lets fail
@@ -747,9 +645,10 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
flush_tlb_all();
list_for_each_entry(folio, folio_list, lru) {
- int ret = __hugetlb_vmemmap_optimize_folio(h, folio,
- &vmemmap_pages,
- VMEMMAP_REMAP_NO_TLB_FLUSH);
+ int ret;
+
+ ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages,
+ VMEMMAP_REMAP_NO_TLB_FLUSH);
/*
* Pages to be freed may have been accumulated. If we
@@ -763,9 +662,8 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
flush_tlb_all();
free_vmemmap_page_list(&vmemmap_pages);
INIT_LIST_HEAD(&vmemmap_pages);
- __hugetlb_vmemmap_optimize_folio(h, folio,
- &vmemmap_pages,
- VMEMMAP_REMAP_NO_TLB_FLUSH);
+ __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages,
+ VMEMMAP_REMAP_NO_TLB_FLUSH);
}
}
diff --git a/mm/internal.h b/mm/internal.h
index b61034bd50f5..f309a010d50f 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -54,12 +54,12 @@ void page_writeback_init(void);
/*
* If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
- * its nr_pages_mapped would be 0x400000: choose the COMPOUND_MAPPED bit
+ * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
* above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently
* leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
*/
-#define COMPOUND_MAPPED 0x800000
-#define FOLIO_PAGES_MAPPED (COMPOUND_MAPPED - 1)
+#define ENTIRELY_MAPPED 0x800000
+#define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1)
/*
* Flags passed to __show_mem() and show_free_areas() to suppress output in
@@ -138,7 +138,7 @@ void filemap_free_folio(struct address_space *mapping, struct folio *folio);
int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
loff_t end);
-long invalidate_inode_page(struct page *page);
+long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
unsigned long mapping_try_invalidate(struct address_space *mapping,
pgoff_t start, pgoff_t end, unsigned long *nr_failed);
@@ -335,7 +335,7 @@ static inline bool page_is_buddy(struct page *page, struct page *buddy,
* satisfies the following equation:
* P = B & ~(1 << O)
*
- * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
+ * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
*/
static inline unsigned long
__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
@@ -616,7 +616,7 @@ folio_within_range(struct folio *folio, struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
pgoff_t pgoff, addr;
- unsigned long vma_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long vma_pglen = vma_pages(vma);
VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
if (start > end)
@@ -650,8 +650,8 @@ folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
* should be called with vma's mmap_lock held for read or write,
* under page table lock for the pte/pmd being added or removed.
*
- * mlock is usually called at the end of page_add_*_rmap(), munlock at
- * the end of page_remove_rmap(); but new anon folios are managed by
+ * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
+ * the end of folio_remove_rmap_*(); but new anon folios are managed by
* folio_add_lru_vma() calling mlock_new_folio().
*/
void mlock_folio(struct folio *folio);
@@ -1047,7 +1047,7 @@ enum {
* * Ordinary GUP: Using the PT lock
* * GUP-fast and fork(): mm->write_protect_seq
* * GUP-fast and KSM or temporary unmapping (swap, migration): see
- * page_try_share_anon_rmap()
+ * folio_try_share_anon_rmap_*()
*
* Must be called with the (sub)page that's actually referenced via the
* page table entry, which might not necessarily be the head page for a
@@ -1090,7 +1090,7 @@ static inline bool gup_must_unshare(struct vm_area_struct *vma,
return is_cow_mapping(vma->vm_flags);
}
- /* Paired with a memory barrier in page_try_share_anon_rmap(). */
+ /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
smp_rmb();
@@ -1135,8 +1135,6 @@ static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
static inline void vma_iter_config(struct vma_iterator *vmi,
unsigned long index, unsigned long last)
{
- MAS_BUG_ON(&vmi->mas, vmi->mas.node != MAS_START &&
- (vmi->mas.index > index || vmi->mas.last < index));
__mas_set_range(&vmi->mas, index, last - 1);
}
@@ -1154,17 +1152,6 @@ static inline void vma_iter_clear(struct vma_iterator *vmi)
mas_store_prealloc(&vmi->mas, NULL);
}
-static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
- unsigned long start, unsigned long end, gfp_t gfp)
-{
- __mas_set_range(&vmi->mas, start, end - 1);
- mas_store_gfp(&vmi->mas, NULL, gfp);
- if (unlikely(mas_is_err(&vmi->mas)))
- return -ENOMEM;
-
- return 0;
-}
-
static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
{
return mas_walk(&vmi->mas);
@@ -1176,13 +1163,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
{
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
- if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
+ if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
vmi->mas.index > vma->vm_start)) {
pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
vmi->mas.index, vma->vm_start, vma->vm_start,
vma->vm_end, vmi->mas.index, vmi->mas.last);
}
- if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
+ if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
vmi->mas.last < vma->vm_start)) {
pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
@@ -1190,7 +1177,7 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
}
#endif
- if (vmi->mas.node != MAS_START &&
+ if (vmi->mas.status != ma_start &&
((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
vma_iter_invalidate(vmi);
@@ -1201,7 +1188,7 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
struct vm_area_struct *vma, gfp_t gfp)
{
- if (vmi->mas.node != MAS_START &&
+ if (vmi->mas.status != ma_start &&
((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
vma_iter_invalidate(vmi);
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 5d95219e69d7..610efae91220 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -20,8 +20,10 @@
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/sched.h>
+#include <linux/sched/clock.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
+#include <linux/stackdepot.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -37,19 +39,35 @@ struct slab *kasan_addr_to_slab(const void *addr)
return NULL;
}
-depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
+depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags)
{
unsigned long entries[KASAN_STACK_DEPTH];
unsigned int nr_entries;
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
- return __stack_depot_save(entries, nr_entries, flags, can_alloc);
+ return stack_depot_save_flags(entries, nr_entries, flags, depot_flags);
}
-void kasan_set_track(struct kasan_track *track, gfp_t flags)
+void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack)
{
+#ifdef CONFIG_KASAN_EXTRA_INFO
+ u32 cpu = raw_smp_processor_id();
+ u64 ts_nsec = local_clock();
+
+ track->cpu = cpu;
+ track->timestamp = ts_nsec >> 3;
+#endif /* CONFIG_KASAN_EXTRA_INFO */
track->pid = current->pid;
- track->stack = kasan_save_stack(flags, true);
+ track->stack = stack;
+}
+
+void kasan_save_track(struct kasan_track *track, gfp_t flags)
+{
+ depot_stack_handle_t stack;
+
+ stack = kasan_save_stack(flags,
+ STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
+ kasan_set_track(track, stack);
}
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
@@ -69,6 +87,9 @@ EXPORT_SYMBOL(kasan_disable_current);
void __kasan_unpoison_range(const void *address, size_t size)
{
+ if (is_kfence_address(address))
+ return;
+
kasan_unpoison(address, size, false);
}
@@ -133,12 +154,12 @@ void __kasan_poison_slab(struct slab *slab)
KASAN_SLAB_REDZONE, false);
}
-void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
+void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object)
{
kasan_unpoison(object, cache->object_size, false);
}
-void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
+void __kasan_poison_new_object(struct kmem_cache *cache, void *object)
{
kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
KASAN_SLAB_REDZONE, false);
@@ -188,8 +209,8 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
return (void *)object;
}
-static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
- unsigned long ip, bool quarantine, bool init)
+static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
+ unsigned long ip, bool init)
{
void *tagged_object;
@@ -199,16 +220,12 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
tagged_object = object;
object = kasan_reset_tag(object);
- if (is_kfence_address(object))
- return false;
-
- if (unlikely(nearest_obj(cache, virt_to_slab(object), object) !=
- object)) {
+ if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
return true;
}
- /* RCU slabs could be legally used after free within the RCU period */
+ /* RCU slabs could be legally used after free within the RCU period. */
if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
return false;
@@ -220,22 +237,45 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
KASAN_SLAB_FREE, init);
- if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
- return false;
-
if (kasan_stack_collection_enabled())
kasan_save_free_info(cache, tagged_object);
- return kasan_quarantine_put(cache, object);
+ return false;
}
bool __kasan_slab_free(struct kmem_cache *cache, void *object,
unsigned long ip, bool init)
{
- return ____kasan_slab_free(cache, object, ip, true, init);
+ if (is_kfence_address(object))
+ return false;
+
+ /*
+ * If the object is buggy, do not let slab put the object onto the
+ * freelist. The object will thus never be allocated again and its
+ * metadata will never get released.
+ */
+ if (poison_slab_object(cache, object, ip, init))
+ return true;
+
+ /*
+ * If the object is put into quarantine, do not let slab put the object
+ * onto the freelist for now. The object's metadata is kept until the
+ * object gets evicted from quarantine.
+ */
+ if (kasan_quarantine_put(cache, object))
+ return true;
+
+ /*
+ * If the object is not put into quarantine, it will likely be quickly
+ * reallocated. Thus, release its metadata now.
+ */
+ kasan_release_object_meta(cache, object);
+
+ /* Let slab put the object onto the freelist. */
+ return false;
}
-static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
+static inline bool check_page_allocation(void *ptr, unsigned long ip)
{
if (!kasan_arch_is_ready())
return false;
@@ -250,40 +290,28 @@ static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
return true;
}
- /*
- * The object will be poisoned by kasan_poison_pages() or
- * kasan_slab_free_mempool().
- */
-
return false;
}
void __kasan_kfree_large(void *ptr, unsigned long ip)
{
- ____kasan_kfree_large(ptr, ip);
+ check_page_allocation(ptr, ip);
+
+ /* The object will be poisoned by kasan_poison_pages(). */
}
-void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
+static inline void unpoison_slab_object(struct kmem_cache *cache, void *object,
+ gfp_t flags, bool init)
{
- struct folio *folio;
-
- folio = virt_to_folio(ptr);
-
/*
- * Even though this function is only called for kmem_cache_alloc and
- * kmalloc backed mempool allocations, those allocations can still be
- * !PageSlab() when the size provided to kmalloc is larger than
- * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
+ * Unpoison the whole object. For kmalloc() allocations,
+ * poison_kmalloc_redzone() will do precise poisoning.
*/
- if (unlikely(!folio_test_slab(folio))) {
- if (____kasan_kfree_large(ptr, ip))
- return;
- kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
- } else {
- struct slab *slab = folio_slab(folio);
+ kasan_unpoison(object, cache->object_size, init);
- ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
- }
+ /* Save alloc info (if possible) for non-kmalloc() allocations. */
+ if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
+ kasan_save_alloc_info(cache, object, flags);
}
void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
@@ -308,39 +336,18 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
tag = assign_tag(cache, object, false);
tagged_object = set_tag(object, tag);
- /*
- * Unpoison the whole object.
- * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
- */
- kasan_unpoison(tagged_object, cache->object_size, init);
-
- /* Save alloc info (if possible) for non-kmalloc() allocations. */
- if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
- kasan_save_alloc_info(cache, tagged_object, flags);
+ /* Unpoison the object and save alloc info for non-kmalloc() allocations. */
+ unpoison_slab_object(cache, tagged_object, flags, init);
return tagged_object;
}
-static inline void *____kasan_kmalloc(struct kmem_cache *cache,
+static inline void poison_kmalloc_redzone(struct kmem_cache *cache,
const void *object, size_t size, gfp_t flags)
{
unsigned long redzone_start;
unsigned long redzone_end;
- if (gfpflags_allow_blocking(flags))
- kasan_quarantine_reduce();
-
- if (unlikely(object == NULL))
- return NULL;
-
- if (is_kfence_address(kasan_reset_tag(object)))
- return (void *)object;
-
- /*
- * The object has already been unpoisoned by kasan_slab_alloc() for
- * kmalloc() or by kasan_krealloc() for krealloc().
- */
-
/*
* The redzone has byte-level precision for the generic mode.
* Partially poison the last object granule to cover the unaligned
@@ -364,34 +371,34 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache,
if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
kasan_save_alloc_info(cache, (void *)object, flags);
- /* Keep the tag that was set by kasan_slab_alloc(). */
- return (void *)object;
}
void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
size_t size, gfp_t flags)
{
- return ____kasan_kmalloc(cache, object, size, flags);
+ if (gfpflags_allow_blocking(flags))
+ kasan_quarantine_reduce();
+
+ if (unlikely(object == NULL))
+ return NULL;
+
+ if (is_kfence_address(object))
+ return (void *)object;
+
+ /* The object has already been unpoisoned by kasan_slab_alloc(). */
+ poison_kmalloc_redzone(cache, object, size, flags);
+
+ /* Keep the tag that was set by kasan_slab_alloc(). */
+ return (void *)object;
}
EXPORT_SYMBOL(__kasan_kmalloc);
-void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
+static inline void poison_kmalloc_large_redzone(const void *ptr, size_t size,
gfp_t flags)
{
unsigned long redzone_start;
unsigned long redzone_end;
- if (gfpflags_allow_blocking(flags))
- kasan_quarantine_reduce();
-
- if (unlikely(ptr == NULL))
- return NULL;
-
- /*
- * The object has already been unpoisoned by kasan_unpoison_pages() for
- * alloc_pages() or by kasan_krealloc() for krealloc().
- */
-
/*
* The redzone has byte-level precision for the generic mode.
* Partially poison the last object granule to cover the unaligned
@@ -401,12 +408,25 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
kasan_poison_last_granule(ptr, size);
/* Poison the aligned part of the redzone. */
- redzone_start = round_up((unsigned long)(ptr + size),
- KASAN_GRANULE_SIZE);
+ redzone_start = round_up((unsigned long)(ptr + size), KASAN_GRANULE_SIZE);
redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
kasan_poison((void *)redzone_start, redzone_end - redzone_start,
KASAN_PAGE_REDZONE, false);
+}
+
+void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
+ gfp_t flags)
+{
+ if (gfpflags_allow_blocking(flags))
+ kasan_quarantine_reduce();
+
+ if (unlikely(ptr == NULL))
+ return NULL;
+ /* The object has already been unpoisoned by kasan_unpoison_pages(). */
+ poison_kmalloc_large_redzone(ptr, size, flags);
+
+ /* Keep the tag that was set by alloc_pages(). */
return (void *)ptr;
}
@@ -414,9 +434,15 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
{
struct slab *slab;
+ if (gfpflags_allow_blocking(flags))
+ kasan_quarantine_reduce();
+
if (unlikely(object == ZERO_SIZE_PTR))
return (void *)object;
+ if (is_kfence_address(object))
+ return (void *)object;
+
/*
* Unpoison the object's data.
* Part of it might already have been unpoisoned, but it's unknown
@@ -428,9 +454,91 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
if (unlikely(!slab))
- return __kasan_kmalloc_large(object, size, flags);
+ poison_kmalloc_large_redzone(object, size, flags);
else
- return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
+ poison_kmalloc_redzone(slab->slab_cache, object, size, flags);
+
+ return (void *)object;
+}
+
+bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
+ unsigned long ip)
+{
+ unsigned long *ptr;
+
+ if (unlikely(PageHighMem(page)))
+ return true;
+
+ /* Bail out if allocation was excluded due to sampling. */
+ if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+ page_kasan_tag(page) == KASAN_TAG_KERNEL)
+ return true;
+
+ ptr = page_address(page);
+
+ if (check_page_allocation(ptr, ip))
+ return false;
+
+ kasan_poison(ptr, PAGE_SIZE << order, KASAN_PAGE_FREE, false);
+
+ return true;
+}
+
+void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
+ unsigned long ip)
+{
+ __kasan_unpoison_pages(page, order, false);
+}
+
+bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
+{
+ struct folio *folio = virt_to_folio(ptr);
+ struct slab *slab;
+
+ /*
+ * This function can be called for large kmalloc allocation that get
+ * their memory from page_alloc. Thus, the folio might not be a slab.
+ */
+ if (unlikely(!folio_test_slab(folio))) {
+ if (check_page_allocation(ptr, ip))
+ return false;
+ kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
+ return true;
+ }
+
+ if (is_kfence_address(ptr))
+ return false;
+
+ slab = folio_slab(folio);
+ return !poison_slab_object(slab->slab_cache, ptr, ip, false);
+}
+
+void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
+{
+ struct slab *slab;
+ gfp_t flags = 0; /* Might be executing under a lock. */
+
+ slab = virt_to_slab(ptr);
+
+ /*
+ * This function can be called for large kmalloc allocation that get
+ * their memory from page_alloc.
+ */
+ if (unlikely(!slab)) {
+ kasan_unpoison(ptr, size, false);
+ poison_kmalloc_large_redzone(ptr, size, flags);
+ return;
+ }
+
+ if (is_kfence_address(ptr))
+ return;
+
+ /* Unpoison the object and save alloc info for non-kmalloc() allocations. */
+ unpoison_slab_object(slab->slab_cache, ptr, size, flags);
+
+ /* Poison the redzone and save alloc info for kmalloc() allocations. */
+ if (is_kmalloc_cache(slab->slab_cache))
+ poison_kmalloc_redzone(slab->slab_cache, ptr, size, flags);
}
bool __kasan_check_byte(const void *address, unsigned long ip)
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 4d837ab83f08..24c13dfb1e94 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -25,6 +25,8 @@
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stackdepot.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -361,6 +363,8 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
{
unsigned int ok_size;
unsigned int optimal_size;
+ unsigned int rem_free_meta_size;
+ unsigned int orig_alloc_meta_offset;
if (!kasan_requires_meta())
return;
@@ -378,49 +382,77 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
ok_size = *size;
- /* Add alloc meta into redzone. */
+ /* Add alloc meta into the redzone. */
cache->kasan_info.alloc_meta_offset = *size;
*size += sizeof(struct kasan_alloc_meta);
- /*
- * If alloc meta doesn't fit, don't add it.
- * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
- * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
- * larger sizes.
- */
+ /* If alloc meta doesn't fit, don't add it. */
if (*size > KMALLOC_MAX_SIZE) {
cache->kasan_info.alloc_meta_offset = 0;
*size = ok_size;
/* Continue, since free meta might still fit. */
}
+ ok_size = *size;
+ orig_alloc_meta_offset = cache->kasan_info.alloc_meta_offset;
+
/*
- * Add free meta into redzone when it's not possible to store
+ * Store free meta in the redzone when it's not possible to store
* it in the object. This is the case when:
* 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
* be touched after it was freed, or
* 2. Object has a constructor, which means it's expected to
- * retain its content until the next allocation, or
- * 3. Object is too small.
- * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
+ * retain its content until the next allocation.
*/
- if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
- cache->object_size < sizeof(struct kasan_free_meta)) {
- ok_size = *size;
-
+ if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor) {
cache->kasan_info.free_meta_offset = *size;
*size += sizeof(struct kasan_free_meta);
+ goto free_meta_added;
+ }
- /* If free meta doesn't fit, don't add it. */
- if (*size > KMALLOC_MAX_SIZE) {
- cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
- *size = ok_size;
- }
+ /*
+ * Otherwise, if the object is large enough to contain free meta,
+ * store it within the object.
+ */
+ if (sizeof(struct kasan_free_meta) <= cache->object_size) {
+ /* cache->kasan_info.free_meta_offset = 0 is implied. */
+ goto free_meta_added;
+ }
+
+ /*
+ * For smaller objects, store the beginning of free meta within the
+ * object and the end in the redzone. And thus shift the location of
+ * alloc meta to free up space for free meta.
+ * This is only possible when slub_debug is disabled, as otherwise
+ * the end of free meta will overlap with slub_debug metadata.
+ */
+ if (!__slub_debug_enabled()) {
+ rem_free_meta_size = sizeof(struct kasan_free_meta) -
+ cache->object_size;
+ *size += rem_free_meta_size;
+ if (cache->kasan_info.alloc_meta_offset != 0)
+ cache->kasan_info.alloc_meta_offset += rem_free_meta_size;
+ goto free_meta_added;
+ }
+
+ /*
+ * If the object is small and slub_debug is enabled, store free meta
+ * in the redzone after alloc meta.
+ */
+ cache->kasan_info.free_meta_offset = *size;
+ *size += sizeof(struct kasan_free_meta);
+
+free_meta_added:
+ /* If free meta doesn't fit, don't add it. */
+ if (*size > KMALLOC_MAX_SIZE) {
+ cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
+ cache->kasan_info.alloc_meta_offset = orig_alloc_meta_offset;
+ *size = ok_size;
}
/* Calculate size with optimal redzone. */
optimal_size = cache->object_size + optimal_redzone(cache->object_size);
- /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
+ /* Limit it with KMALLOC_MAX_SIZE. */
if (optimal_size > KMALLOC_MAX_SIZE)
optimal_size = KMALLOC_MAX_SIZE;
/* Use optimal size if the size with added metas is not large enough. */
@@ -450,8 +482,63 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
struct kasan_alloc_meta *alloc_meta;
alloc_meta = kasan_get_alloc_meta(cache, object);
- if (alloc_meta)
+ if (alloc_meta) {
+ /* Zero out alloc meta to mark it as invalid. */
__memset(alloc_meta, 0, sizeof(*alloc_meta));
+
+ /*
+ * Temporarily disable KASAN bug reporting to allow instrumented
+ * raw_spin_lock_init to access aux_lock, which resides inside
+ * of a redzone.
+ */
+ kasan_disable_current();
+ raw_spin_lock_init(&alloc_meta->aux_lock);
+ kasan_enable_current();
+ }
+
+ /*
+ * Explicitly marking free meta as invalid is not required: the shadow
+ * value for the first 8 bytes of a newly allocated object is not
+ * KASAN_SLAB_FREE_META.
+ */
+}
+
+static void release_alloc_meta(struct kasan_alloc_meta *meta)
+{
+ /* Evict the stack traces from stack depot. */
+ stack_depot_put(meta->alloc_track.stack);
+ stack_depot_put(meta->aux_stack[0]);
+ stack_depot_put(meta->aux_stack[1]);
+
+ /* Zero out alloc meta to mark it as invalid. */
+ __memset(meta, 0, sizeof(*meta));
+}
+
+static void release_free_meta(const void *object, struct kasan_free_meta *meta)
+{
+ /* Check if free meta is valid. */
+ if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
+ return;
+
+ /* Evict the stack trace from the stack depot. */
+ stack_depot_put(meta->free_track.stack);
+
+ /* Mark free meta as invalid. */
+ *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
+}
+
+void kasan_release_object_meta(struct kmem_cache *cache, const void *object)
+{
+ struct kasan_alloc_meta *alloc_meta;
+ struct kasan_free_meta *free_meta;
+
+ alloc_meta = kasan_get_alloc_meta(cache, object);
+ if (alloc_meta)
+ release_alloc_meta(alloc_meta);
+
+ free_meta = kasan_get_free_meta(cache, object);
+ if (free_meta)
+ release_free_meta(object, free_meta);
}
size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
@@ -472,12 +559,14 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
sizeof(struct kasan_free_meta) : 0);
}
-static void __kasan_record_aux_stack(void *addr, bool can_alloc)
+static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
{
struct slab *slab = kasan_addr_to_slab(addr);
struct kmem_cache *cache;
struct kasan_alloc_meta *alloc_meta;
void *object;
+ depot_stack_handle_t new_handle, old_handle;
+ unsigned long flags;
if (is_kfence_address(addr) || !slab)
return;
@@ -488,18 +577,33 @@ static void __kasan_record_aux_stack(void *addr, bool can_alloc)
if (!alloc_meta)
return;
+ new_handle = kasan_save_stack(0, depot_flags);
+
+ /*
+ * Temporarily disable KASAN bug reporting to allow instrumented
+ * spinlock functions to access aux_lock, which resides inside of a
+ * redzone.
+ */
+ kasan_disable_current();
+ raw_spin_lock_irqsave(&alloc_meta->aux_lock, flags);
+ old_handle = alloc_meta->aux_stack[1];
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
- alloc_meta->aux_stack[0] = kasan_save_stack(0, can_alloc);
+ alloc_meta->aux_stack[0] = new_handle;
+ raw_spin_unlock_irqrestore(&alloc_meta->aux_lock, flags);
+ kasan_enable_current();
+
+ stack_depot_put(old_handle);
}
void kasan_record_aux_stack(void *addr)
{
- return __kasan_record_aux_stack(addr, true);
+ return __kasan_record_aux_stack(addr,
+ STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
}
void kasan_record_aux_stack_noalloc(void *addr)
{
- return __kasan_record_aux_stack(addr, false);
+ return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_GET);
}
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
@@ -507,8 +611,13 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
struct kasan_alloc_meta *alloc_meta;
alloc_meta = kasan_get_alloc_meta(cache, object);
- if (alloc_meta)
- kasan_set_track(&alloc_meta->alloc_track, flags);
+ if (!alloc_meta)
+ return;
+
+ /* Evict previous stack traces (might exist for krealloc or mempool). */
+ release_alloc_meta(alloc_meta);
+
+ kasan_save_track(&alloc_meta->alloc_track, flags);
}
void kasan_save_free_info(struct kmem_cache *cache, void *object)
@@ -519,7 +628,11 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object)
if (!free_meta)
return;
- kasan_set_track(&free_meta->free_track, 0);
- /* The object was freed and has free track set. */
- *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREETRACK;
+ /* Evict previous stack trace (might exist for mempool). */
+ release_free_meta(object, free_meta);
+
+ kasan_save_track(&free_meta->free_track, 0);
+
+ /* Mark free meta as valid. */
+ *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE_META;
}
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index 06141bbc1e51..2b994092a2d4 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -57,7 +57,12 @@ enum kasan_mode kasan_mode __ro_after_init;
EXPORT_SYMBOL_GPL(kasan_mode);
/* Whether to enable vmalloc tagging. */
+#ifdef CONFIG_KASAN_VMALLOC
DEFINE_STATIC_KEY_TRUE(kasan_flag_vmalloc);
+#else
+DEFINE_STATIC_KEY_FALSE(kasan_flag_vmalloc);
+#endif
+EXPORT_SYMBOL_GPL(kasan_flag_vmalloc);
#define PAGE_ALLOC_SAMPLE_DEFAULT 1
#define PAGE_ALLOC_SAMPLE_ORDER_DEFAULT 3
@@ -119,6 +124,9 @@ static int __init early_kasan_flag_vmalloc(char *arg)
if (!arg)
return -EINVAL;
+ if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
+ return 0;
+
if (!strcmp(arg, "off"))
kasan_arg_vmalloc = KASAN_ARG_VMALLOC_OFF;
else if (!strcmp(arg, "on"))
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index eef50233640a..d0f172f2b978 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -6,6 +6,7 @@
#include <linux/kasan.h>
#include <linux/kasan-tags.h>
#include <linux/kfence.h>
+#include <linux/spinlock.h>
#include <linux/stackdepot.h>
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
@@ -48,6 +49,7 @@ DECLARE_PER_CPU(long, kasan_page_alloc_skip);
static inline bool kasan_vmalloc_enabled(void)
{
+ /* Static branch is never enabled with CONFIG_KASAN_VMALLOC disabled. */
return static_branch_likely(&kasan_flag_vmalloc);
}
@@ -81,6 +83,11 @@ static inline bool kasan_sample_page_alloc(unsigned int order)
#else /* CONFIG_KASAN_HW_TAGS */
+static inline bool kasan_vmalloc_enabled(void)
+{
+ return IS_ENABLED(CONFIG_KASAN_VMALLOC);
+}
+
static inline bool kasan_async_fault_possible(void)
{
return false;
@@ -100,21 +107,21 @@ static inline bool kasan_sample_page_alloc(unsigned int order)
#ifdef CONFIG_KASAN_GENERIC
-/* Generic KASAN uses per-object metadata to store stack traces. */
+/*
+ * Generic KASAN uses per-object metadata to store alloc and free stack traces
+ * and the quarantine link.
+ */
static inline bool kasan_requires_meta(void)
{
- /*
- * Technically, Generic KASAN always collects stack traces right now.
- * However, let's use kasan_stack_collection_enabled() in case the
- * kasan.stacktrace command-line argument is changed to affect
- * Generic KASAN.
- */
- return kasan_stack_collection_enabled();
+ return true;
}
#else /* CONFIG_KASAN_GENERIC */
-/* Tag-based KASAN modes do not use per-object metadata. */
+/*
+ * Tag-based KASAN modes do not use per-object metadata: they use the stack
+ * ring to store alloc and free stack traces and do not use qurantine.
+ */
static inline bool kasan_requires_meta(void)
{
return false;
@@ -149,7 +156,7 @@ static inline bool kasan_requires_meta(void)
#ifdef CONFIG_KASAN_GENERIC
-#define KASAN_SLAB_FREETRACK 0xFA /* freed slab object with free track */
+#define KASAN_SLAB_FREE_META 0xFA /* freed slab object with free meta */
#define KASAN_GLOBAL_REDZONE 0xF9 /* redzone for global variable */
/* Stack redzone shadow values. Compiler ABI, do not change. */
@@ -187,6 +194,10 @@ static inline bool kasan_requires_meta(void)
struct kasan_track {
u32 pid;
depot_stack_handle_t stack;
+#ifdef CONFIG_KASAN_EXTRA_INFO
+ u64 cpu:20;
+ u64 timestamp:44;
+#endif /* CONFIG_KASAN_EXTRA_INFO */
};
enum kasan_report_type {
@@ -242,9 +253,25 @@ struct kasan_global {
#ifdef CONFIG_KASAN_GENERIC
+/*
+ * Alloc meta contains the allocation-related information about a slab object.
+ * Alloc meta is saved when an object is allocated and is kept until either the
+ * object returns to the slab freelist (leaves quarantine for quarantined
+ * objects or gets freed for the non-quarantined ones) or reallocated via
+ * krealloc or through a mempool.
+ * Alloc meta is stored inside of the object's redzone.
+ * Alloc meta is considered valid whenever it contains non-zero data.
+ */
struct kasan_alloc_meta {
struct kasan_track alloc_track;
/* Free track is stored in kasan_free_meta. */
+ /*
+ * aux_lock protects aux_stack from accesses from concurrent
+ * kasan_record_aux_stack calls. It is a raw spinlock to avoid sleeping
+ * on RT kernels, as kasan_record_aux_stack_noalloc can be called from
+ * non-sleepable contexts.
+ */
+ raw_spinlock_t aux_lock;
depot_stack_handle_t aux_stack[2];
};
@@ -260,8 +287,12 @@ struct qlist_node {
#define KASAN_NO_FREE_META INT_MAX
/*
- * Free meta is only used by Generic mode while the object is in quarantine.
- * After that, slab allocator stores the freelist pointer in the object.
+ * Free meta contains the freeing-related information about a slab object.
+ * Free meta is only kept for quarantined objects and for mempool objects until
+ * the object gets allocated again.
+ * Free meta is stored within the object's memory.
+ * Free meta is considered valid whenever the value of the shadow byte that
+ * corresponds to the first 8 bytes of the object is KASAN_SLAB_FREE_META.
*/
struct kasan_free_meta {
struct qlist_node quarantine_link;
@@ -275,8 +306,7 @@ struct kasan_free_meta {
struct kasan_stack_ring_entry {
void *ptr;
size_t size;
- u32 pid;
- depot_stack_handle_t stack;
+ struct kasan_track track;
bool is_free;
};
@@ -291,6 +321,12 @@ struct kasan_stack_ring {
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+static __always_inline bool addr_in_shadow(const void *addr)
+{
+ return addr >= (void *)KASAN_SHADOW_START &&
+ addr < (void *)KASAN_SHADOW_END;
+}
+
#ifndef kasan_shadow_to_mem
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
{
@@ -357,19 +393,20 @@ void kasan_report_invalid_free(void *object, unsigned long ip, enum kasan_report
struct slab *kasan_addr_to_slab(const void *addr);
#ifdef CONFIG_KASAN_GENERIC
-void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size);
-void kasan_init_object_meta(struct kmem_cache *cache, const void *object);
struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
const void *object);
struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
const void *object);
+void kasan_init_object_meta(struct kmem_cache *cache, const void *object);
+void kasan_release_object_meta(struct kmem_cache *cache, const void *object);
#else
-static inline void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size) { }
static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { }
+static inline void kasan_release_object_meta(struct kmem_cache *cache, const void *object) { }
#endif
-depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc);
-void kasan_set_track(struct kasan_track *track, gfp_t flags);
+depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags);
+void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack);
+void kasan_save_track(struct kasan_track *track, gfp_t flags);
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags);
void kasan_save_free_info(struct kmem_cache *cache, void *object);
@@ -443,35 +480,23 @@ static inline u8 kasan_random_tag(void) { return 0; }
static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init)
{
- addr = kasan_reset_tag(addr);
-
- /* Skip KFENCE memory if called explicitly outside of sl*b. */
- if (is_kfence_address(addr))
- return;
-
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;
if (WARN_ON(size & KASAN_GRANULE_MASK))
return;
- hw_set_mem_tag_range((void *)addr, size, value, init);
+ hw_set_mem_tag_range(kasan_reset_tag(addr), size, value, init);
}
static inline void kasan_unpoison(const void *addr, size_t size, bool init)
{
u8 tag = get_tag(addr);
- addr = kasan_reset_tag(addr);
-
- /* Skip KFENCE memory if called explicitly outside of sl*b. */
- if (is_kfence_address(addr))
- return;
-
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;
size = round_up(size, KASAN_GRANULE_SIZE);
- hw_set_mem_tag_range((void *)addr, size, tag, init);
+ hw_set_mem_tag_range(kasan_reset_tag(addr), size, tag, init);
}
static inline bool kasan_byte_accessible(const void *addr)
@@ -490,8 +515,6 @@ static inline bool kasan_byte_accessible(const void *addr)
* @size - range size, must be aligned to KASAN_GRANULE_SIZE
* @value - value that's written to metadata for the range
* @init - whether to initialize the memory range (only for hardware tag-based)
- *
- * The size gets aligned to KASAN_GRANULE_SIZE before marking the range.
*/
void kasan_poison(const void *addr, size_t size, u8 value, bool init);
diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
index 34515a106ca5..971cfff4ca0b 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
+#include <linux/mempool.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
@@ -213,17 +214,32 @@ static void kmalloc_node_oob_right(struct kunit *test)
}
/*
- * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
- * fit into a slab cache and therefore is allocated via the page allocator
- * fallback. Since this kind of fallback is only implemented for SLUB, these
- * tests are limited to that allocator.
+ * Check that KASAN detects an out-of-bounds access for a big object allocated
+ * via kmalloc(). But not as big as to trigger the page_alloc fallback.
*/
-static void kmalloc_pagealloc_oob_right(struct kunit *test)
+static void kmalloc_big_oob_right(struct kunit *test)
{
char *ptr;
- size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
+ size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
+
+ ptr = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ OPTIMIZER_HIDE_VAR(ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
+ kfree(ptr);
+}
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
+/*
+ * The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk
+ * that does not fit into the largest slab cache and therefore is allocated via
+ * the page_alloc fallback.
+ */
+
+static void kmalloc_large_oob_right(struct kunit *test)
+{
+ char *ptr;
+ size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -234,13 +250,11 @@ static void kmalloc_pagealloc_oob_right(struct kunit *test)
kfree(ptr);
}
-static void kmalloc_pagealloc_uaf(struct kunit *test)
+static void kmalloc_large_uaf(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
-
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree(ptr);
@@ -248,20 +262,18 @@ static void kmalloc_pagealloc_uaf(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
}
-static void kmalloc_pagealloc_invalid_free(struct kunit *test)
+static void kmalloc_large_invalid_free(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
-
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
}
-static void pagealloc_oob_right(struct kunit *test)
+static void page_alloc_oob_right(struct kunit *test)
{
char *ptr;
struct page *pages;
@@ -283,7 +295,7 @@ static void pagealloc_oob_right(struct kunit *test)
free_pages((unsigned long)ptr, order);
}
-static void pagealloc_uaf(struct kunit *test)
+static void page_alloc_uaf(struct kunit *test)
{
char *ptr;
struct page *pages;
@@ -297,23 +309,6 @@ static void pagealloc_uaf(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
}
-static void kmalloc_large_oob_right(struct kunit *test)
-{
- char *ptr;
- size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
-
- /*
- * Allocate a chunk that is large enough, but still fits into a slab
- * and does not trigger the page allocator fallback in SLUB.
- */
- ptr = kmalloc(size, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
-
- OPTIMIZER_HIDE_VAR(ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
- kfree(ptr);
-}
-
static void krealloc_more_oob_helper(struct kunit *test,
size_t size1, size_t size2)
{
@@ -403,20 +398,14 @@ static void krealloc_less_oob(struct kunit *test)
krealloc_less_oob_helper(test, 235, 201);
}
-static void krealloc_pagealloc_more_oob(struct kunit *test)
+static void krealloc_large_more_oob(struct kunit *test)
{
- /* page_alloc fallback in only implemented for SLUB. */
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
-
krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
KMALLOC_MAX_CACHE_SIZE + 235);
}
-static void krealloc_pagealloc_less_oob(struct kunit *test)
+static void krealloc_large_less_oob(struct kunit *test)
{
- /* page_alloc fallback in only implemented for SLUB. */
- KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
-
krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
KMALLOC_MAX_CACHE_SIZE + 201);
}
@@ -708,6 +697,126 @@ static void kmalloc_uaf3(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
}
+static void kmalloc_double_kzfree(struct kunit *test)
+{
+ char *ptr;
+ size_t size = 16;
+
+ ptr = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ kfree_sensitive(ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
+}
+
+/* Check that ksize() does NOT unpoison whole object. */
+static void ksize_unpoisons_memory(struct kunit *test)
+{
+ char *ptr;
+ size_t size = 128 - KASAN_GRANULE_SIZE - 5;
+ size_t real_size;
+
+ ptr = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ real_size = ksize(ptr);
+ KUNIT_EXPECT_GT(test, real_size, size);
+
+ OPTIMIZER_HIDE_VAR(ptr);
+
+ /* These accesses shouldn't trigger a KASAN report. */
+ ptr[0] = 'x';
+ ptr[size - 1] = 'x';
+
+ /* These must trigger a KASAN report. */
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
+
+ kfree(ptr);
+}
+
+/*
+ * Check that a use-after-free is detected by ksize() and via normal accesses
+ * after it.
+ */
+static void ksize_uaf(struct kunit *test)
+{
+ char *ptr;
+ int size = 128 - KASAN_GRANULE_SIZE;
+
+ ptr = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ kfree(ptr);
+
+ OPTIMIZER_HIDE_VAR(ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
+}
+
+/*
+ * The two tests below check that Generic KASAN prints auxiliary stack traces
+ * for RCU callbacks and workqueues. The reports need to be inspected manually.
+ *
+ * These tests are still enabled for other KASAN modes to make sure that all
+ * modes report bad accesses in tested scenarios.
+ */
+
+static struct kasan_rcu_info {
+ int i;
+ struct rcu_head rcu;
+} *global_rcu_ptr;
+
+static void rcu_uaf_reclaim(struct rcu_head *rp)
+{
+ struct kasan_rcu_info *fp =
+ container_of(rp, struct kasan_rcu_info, rcu);
+
+ kfree(fp);
+ ((volatile struct kasan_rcu_info *)fp)->i;
+}
+
+static void rcu_uaf(struct kunit *test)
+{
+ struct kasan_rcu_info *ptr;
+
+ ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ global_rcu_ptr = rcu_dereference_protected(
+ (struct kasan_rcu_info __rcu *)ptr, NULL);
+
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
+ rcu_barrier());
+}
+
+static void workqueue_uaf_work(struct work_struct *work)
+{
+ kfree(work);
+}
+
+static void workqueue_uaf(struct kunit *test)
+{
+ struct workqueue_struct *workqueue;
+ struct work_struct *work;
+
+ workqueue = create_workqueue("kasan_workqueue_test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue);
+
+ work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
+
+ INIT_WORK(work, workqueue_uaf_work);
+ queue_work(workqueue, work);
+ destroy_workqueue(workqueue);
+
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ ((volatile struct work_struct *)work)->data);
+}
+
static void kfree_via_page(struct kunit *test)
{
char *ptr;
@@ -758,6 +867,69 @@ static void kmem_cache_oob(struct kunit *test)
kmem_cache_destroy(cache);
}
+static void kmem_cache_double_free(struct kunit *test)
+{
+ char *p;
+ size_t size = 200;
+ struct kmem_cache *cache;
+
+ cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
+ p = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p) {
+ kunit_err(test, "Allocation failed: %s\n", __func__);
+ kmem_cache_destroy(cache);
+ return;
+ }
+
+ kmem_cache_free(cache, p);
+ KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
+ kmem_cache_destroy(cache);
+}
+
+static void kmem_cache_invalid_free(struct kunit *test)
+{
+ char *p;
+ size_t size = 200;
+ struct kmem_cache *cache;
+
+ cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
+ NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
+ p = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p) {
+ kunit_err(test, "Allocation failed: %s\n", __func__);
+ kmem_cache_destroy(cache);
+ return;
+ }
+
+ /* Trigger invalid free, the object doesn't get freed. */
+ KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
+
+ /*
+ * Properly free the object to prevent the "Objects remaining in
+ * test_cache on __kmem_cache_shutdown" BUG failure.
+ */
+ kmem_cache_free(cache, p);
+
+ kmem_cache_destroy(cache);
+}
+
+static void empty_cache_ctor(void *object) { }
+
+static void kmem_cache_double_destroy(struct kunit *test)
+{
+ struct kmem_cache *cache;
+
+ /* Provide a constructor to prevent cache merging. */
+ cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+ kmem_cache_destroy(cache);
+ KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
+}
+
static void kmem_cache_accounted(struct kunit *test)
{
int i;
@@ -810,6 +982,303 @@ static void kmem_cache_bulk(struct kunit *test)
kmem_cache_destroy(cache);
}
+static void *mempool_prepare_kmalloc(struct kunit *test, mempool_t *pool, size_t size)
+{
+ int pool_size = 4;
+ int ret;
+ void *elem;
+
+ memset(pool, 0, sizeof(*pool));
+ ret = mempool_init_kmalloc_pool(pool, pool_size, size);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /*
+ * Allocate one element to prevent mempool from freeing elements to the
+ * underlying allocator and instead make it add them to the element
+ * list when the tests trigger double-free and invalid-free bugs.
+ * This allows testing KASAN annotations in add_element().
+ */
+ elem = mempool_alloc_preallocated(pool);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
+
+ return elem;
+}
+
+static struct kmem_cache *mempool_prepare_slab(struct kunit *test, mempool_t *pool, size_t size)
+{
+ struct kmem_cache *cache;
+ int pool_size = 4;
+ int ret;
+
+ cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
+ memset(pool, 0, sizeof(*pool));
+ ret = mempool_init_slab_pool(pool, pool_size, cache);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /*
+ * Do not allocate one preallocated element, as we skip the double-free
+ * and invalid-free tests for slab mempool for simplicity.
+ */
+
+ return cache;
+}
+
+static void *mempool_prepare_page(struct kunit *test, mempool_t *pool, int order)
+{
+ int pool_size = 4;
+ int ret;
+ void *elem;
+
+ memset(pool, 0, sizeof(*pool));
+ ret = mempool_init_page_pool(pool, pool_size, order);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ elem = mempool_alloc_preallocated(pool);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
+
+ return elem;
+}
+
+static void mempool_oob_right_helper(struct kunit *test, mempool_t *pool, size_t size)
+{
+ char *elem;
+
+ elem = mempool_alloc_preallocated(pool);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
+
+ OPTIMIZER_HIDE_VAR(elem);
+
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ ((volatile char *)&elem[size])[0]);
+ else
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ ((volatile char *)&elem[round_up(size, KASAN_GRANULE_SIZE)])[0]);
+
+ mempool_free(elem, pool);
+}
+
+static void mempool_kmalloc_oob_right(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = 128 - KASAN_GRANULE_SIZE - 5;
+ void *extra_elem;
+
+ extra_elem = mempool_prepare_kmalloc(test, &pool, size);
+
+ mempool_oob_right_helper(test, &pool, size);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_kmalloc_large_oob_right(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
+ void *extra_elem;
+
+ extra_elem = mempool_prepare_kmalloc(test, &pool, size);
+
+ mempool_oob_right_helper(test, &pool, size);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_slab_oob_right(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = 123;
+ struct kmem_cache *cache;
+
+ cache = mempool_prepare_slab(test, &pool, size);
+
+ mempool_oob_right_helper(test, &pool, size);
+
+ mempool_exit(&pool);
+ kmem_cache_destroy(cache);
+}
+
+/*
+ * Skip the out-of-bounds test for page mempool. With Generic KASAN, page
+ * allocations have no redzones, and thus the out-of-bounds detection is not
+ * guaranteed; see https://bugzilla.kernel.org/show_bug.cgi?id=210503. With
+ * the tag-based KASAN modes, the neighboring allocation might have the same
+ * tag; see https://bugzilla.kernel.org/show_bug.cgi?id=203505.
+ */
+
+static void mempool_uaf_helper(struct kunit *test, mempool_t *pool, bool page)
+{
+ char *elem, *ptr;
+
+ elem = mempool_alloc_preallocated(pool);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
+
+ mempool_free(elem, pool);
+
+ ptr = page ? page_address((struct page *)elem) : elem;
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
+}
+
+static void mempool_kmalloc_uaf(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = 128;
+ void *extra_elem;
+
+ extra_elem = mempool_prepare_kmalloc(test, &pool, size);
+
+ mempool_uaf_helper(test, &pool, false);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_kmalloc_large_uaf(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
+ void *extra_elem;
+
+ extra_elem = mempool_prepare_kmalloc(test, &pool, size);
+
+ mempool_uaf_helper(test, &pool, false);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_slab_uaf(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = 123;
+ struct kmem_cache *cache;
+
+ cache = mempool_prepare_slab(test, &pool, size);
+
+ mempool_uaf_helper(test, &pool, false);
+
+ mempool_exit(&pool);
+ kmem_cache_destroy(cache);
+}
+
+static void mempool_page_alloc_uaf(struct kunit *test)
+{
+ mempool_t pool;
+ int order = 2;
+ void *extra_elem;
+
+ extra_elem = mempool_prepare_page(test, &pool, order);
+
+ mempool_uaf_helper(test, &pool, true);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_double_free_helper(struct kunit *test, mempool_t *pool)
+{
+ char *elem;
+
+ elem = mempool_alloc_preallocated(pool);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
+
+ mempool_free(elem, pool);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem, pool));
+}
+
+static void mempool_kmalloc_double_free(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = 128;
+ char *extra_elem;
+
+ extra_elem = mempool_prepare_kmalloc(test, &pool, size);
+
+ mempool_double_free_helper(test, &pool);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_kmalloc_large_double_free(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
+ char *extra_elem;
+
+ extra_elem = mempool_prepare_kmalloc(test, &pool, size);
+
+ mempool_double_free_helper(test, &pool);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_page_alloc_double_free(struct kunit *test)
+{
+ mempool_t pool;
+ int order = 2;
+ char *extra_elem;
+
+ extra_elem = mempool_prepare_page(test, &pool, order);
+
+ mempool_double_free_helper(test, &pool);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_kmalloc_invalid_free_helper(struct kunit *test, mempool_t *pool)
+{
+ char *elem;
+
+ elem = mempool_alloc_preallocated(pool);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem + 1, pool));
+
+ mempool_free(elem, pool);
+}
+
+static void mempool_kmalloc_invalid_free(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = 128;
+ char *extra_elem;
+
+ extra_elem = mempool_prepare_kmalloc(test, &pool, size);
+
+ mempool_kmalloc_invalid_free_helper(test, &pool);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+static void mempool_kmalloc_large_invalid_free(struct kunit *test)
+{
+ mempool_t pool;
+ size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
+ char *extra_elem;
+
+ extra_elem = mempool_prepare_kmalloc(test, &pool, size);
+
+ mempool_kmalloc_invalid_free_helper(test, &pool);
+
+ mempool_free(extra_elem, &pool);
+ mempool_exit(&pool);
+}
+
+/*
+ * Skip the invalid-free test for page mempool. The invalid-free detection only
+ * works for compound pages and mempool preallocates all page elements without
+ * the __GFP_COMP flag.
+ */
+
static char global_array[10];
static void kasan_global_oob_right(struct kunit *test)
@@ -849,53 +1318,6 @@ static void kasan_global_oob_left(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
-/* Check that ksize() does NOT unpoison whole object. */
-static void ksize_unpoisons_memory(struct kunit *test)
-{
- char *ptr;
- size_t size = 128 - KASAN_GRANULE_SIZE - 5;
- size_t real_size;
-
- ptr = kmalloc(size, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
-
- real_size = ksize(ptr);
- KUNIT_EXPECT_GT(test, real_size, size);
-
- OPTIMIZER_HIDE_VAR(ptr);
-
- /* These accesses shouldn't trigger a KASAN report. */
- ptr[0] = 'x';
- ptr[size - 1] = 'x';
-
- /* These must trigger a KASAN report. */
- if (IS_ENABLED(CONFIG_KASAN_GENERIC))
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
-
- kfree(ptr);
-}
-
-/*
- * Check that a use-after-free is detected by ksize() and via normal accesses
- * after it.
- */
-static void ksize_uaf(struct kunit *test)
-{
- char *ptr;
- int size = 128 - KASAN_GRANULE_SIZE;
-
- ptr = kmalloc(size, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- kfree(ptr);
-
- OPTIMIZER_HIDE_VAR(ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
- KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
-}
-
static void kasan_stack_oob(struct kunit *test)
{
char stack_array[10];
@@ -938,69 +1360,6 @@ static void kasan_alloca_oob_right(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}
-static void kmem_cache_double_free(struct kunit *test)
-{
- char *p;
- size_t size = 200;
- struct kmem_cache *cache;
-
- cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
-
- p = kmem_cache_alloc(cache, GFP_KERNEL);
- if (!p) {
- kunit_err(test, "Allocation failed: %s\n", __func__);
- kmem_cache_destroy(cache);
- return;
- }
-
- kmem_cache_free(cache, p);
- KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
- kmem_cache_destroy(cache);
-}
-
-static void kmem_cache_invalid_free(struct kunit *test)
-{
- char *p;
- size_t size = 200;
- struct kmem_cache *cache;
-
- cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
- NULL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
-
- p = kmem_cache_alloc(cache, GFP_KERNEL);
- if (!p) {
- kunit_err(test, "Allocation failed: %s\n", __func__);
- kmem_cache_destroy(cache);
- return;
- }
-
- /* Trigger invalid free, the object doesn't get freed. */
- KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
-
- /*
- * Properly free the object to prevent the "Objects remaining in
- * test_cache on __kmem_cache_shutdown" BUG failure.
- */
- kmem_cache_free(cache, p);
-
- kmem_cache_destroy(cache);
-}
-
-static void empty_cache_ctor(void *object) { }
-
-static void kmem_cache_double_destroy(struct kunit *test)
-{
- struct kmem_cache *cache;
-
- /* Provide a constructor to prevent cache merging. */
- cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
- kmem_cache_destroy(cache);
- KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
-}
-
static void kasan_memchr(struct kunit *test)
{
char *ptr;
@@ -1162,79 +1521,6 @@ static void kasan_bitops_tags(struct kunit *test)
kfree(bits);
}
-static void kmalloc_double_kzfree(struct kunit *test)
-{
- char *ptr;
- size_t size = 16;
-
- ptr = kmalloc(size, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
-
- kfree_sensitive(ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
-}
-
-/*
- * The two tests below check that Generic KASAN prints auxiliary stack traces
- * for RCU callbacks and workqueues. The reports need to be inspected manually.
- *
- * These tests are still enabled for other KASAN modes to make sure that all
- * modes report bad accesses in tested scenarios.
- */
-
-static struct kasan_rcu_info {
- int i;
- struct rcu_head rcu;
-} *global_rcu_ptr;
-
-static void rcu_uaf_reclaim(struct rcu_head *rp)
-{
- struct kasan_rcu_info *fp =
- container_of(rp, struct kasan_rcu_info, rcu);
-
- kfree(fp);
- ((volatile struct kasan_rcu_info *)fp)->i;
-}
-
-static void rcu_uaf(struct kunit *test)
-{
- struct kasan_rcu_info *ptr;
-
- ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
-
- global_rcu_ptr = rcu_dereference_protected(
- (struct kasan_rcu_info __rcu *)ptr, NULL);
-
- KUNIT_EXPECT_KASAN_FAIL(test,
- call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
- rcu_barrier());
-}
-
-static void workqueue_uaf_work(struct work_struct *work)
-{
- kfree(work);
-}
-
-static void workqueue_uaf(struct kunit *test)
-{
- struct workqueue_struct *workqueue;
- struct work_struct *work;
-
- workqueue = create_workqueue("kasan_workqueue_test");
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue);
-
- work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
-
- INIT_WORK(work, workqueue_uaf_work);
- queue_work(workqueue, work);
- destroy_workqueue(workqueue);
-
- KUNIT_EXPECT_KASAN_FAIL(test,
- ((volatile struct work_struct *)work)->data);
-}
-
static void vmalloc_helpers_tags(struct kunit *test)
{
void *ptr;
@@ -1244,6 +1530,9 @@ static void vmalloc_helpers_tags(struct kunit *test)
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
+ if (!kasan_vmalloc_enabled())
+ kunit_skip(test, "Test requires kasan.vmalloc=on");
+
ptr = vmalloc(PAGE_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -1278,6 +1567,9 @@ static void vmalloc_oob(struct kunit *test)
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
+ if (!kasan_vmalloc_enabled())
+ kunit_skip(test, "Test requires kasan.vmalloc=on");
+
v_ptr = vmalloc(size);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
@@ -1331,6 +1623,9 @@ static void vmap_tags(struct kunit *test)
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
+ if (!kasan_vmalloc_enabled())
+ kunit_skip(test, "Test requires kasan.vmalloc=on");
+
p_page = alloc_pages(GFP_KERNEL, 1);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
p_ptr = page_address(p_page);
@@ -1449,7 +1744,7 @@ static void match_all_not_assigned(struct kunit *test)
free_pages((unsigned long)ptr, order);
}
- if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
+ if (!kasan_vmalloc_enabled())
return;
for (i = 0; i < 256; i++) {
@@ -1502,6 +1797,14 @@ static void match_all_mem_tag(struct kunit *test)
/* For each possible tag value not matching the pointer tag. */
for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
+ /*
+ * For Software Tag-Based KASAN, skip the majority of tag
+ * values to avoid the test printing too many reports.
+ */
+ if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
+ tag >= KASAN_TAG_MIN + 8 && tag <= KASAN_TAG_KERNEL - 8)
+ continue;
+
if (tag == get_tag(ptr))
continue;
@@ -1521,16 +1824,16 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_right),
KUNIT_CASE(kmalloc_oob_left),
KUNIT_CASE(kmalloc_node_oob_right),
- KUNIT_CASE(kmalloc_pagealloc_oob_right),
- KUNIT_CASE(kmalloc_pagealloc_uaf),
- KUNIT_CASE(kmalloc_pagealloc_invalid_free),
- KUNIT_CASE(pagealloc_oob_right),
- KUNIT_CASE(pagealloc_uaf),
+ KUNIT_CASE(kmalloc_big_oob_right),
KUNIT_CASE(kmalloc_large_oob_right),
+ KUNIT_CASE(kmalloc_large_uaf),
+ KUNIT_CASE(kmalloc_large_invalid_free),
+ KUNIT_CASE(page_alloc_oob_right),
+ KUNIT_CASE(page_alloc_uaf),
KUNIT_CASE(krealloc_more_oob),
KUNIT_CASE(krealloc_less_oob),
- KUNIT_CASE(krealloc_pagealloc_more_oob),
- KUNIT_CASE(krealloc_pagealloc_less_oob),
+ KUNIT_CASE(krealloc_large_more_oob),
+ KUNIT_CASE(krealloc_large_less_oob),
KUNIT_CASE(krealloc_uaf),
KUNIT_CASE(kmalloc_oob_16),
KUNIT_CASE(kmalloc_uaf_16),
@@ -1545,29 +1848,41 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_uaf_memset),
KUNIT_CASE(kmalloc_uaf2),
KUNIT_CASE(kmalloc_uaf3),
+ KUNIT_CASE(kmalloc_double_kzfree),
+ KUNIT_CASE(ksize_unpoisons_memory),
+ KUNIT_CASE(ksize_uaf),
+ KUNIT_CASE(rcu_uaf),
+ KUNIT_CASE(workqueue_uaf),
KUNIT_CASE(kfree_via_page),
KUNIT_CASE(kfree_via_phys),
KUNIT_CASE(kmem_cache_oob),
+ KUNIT_CASE(kmem_cache_double_free),
+ KUNIT_CASE(kmem_cache_invalid_free),
+ KUNIT_CASE(kmem_cache_double_destroy),
KUNIT_CASE(kmem_cache_accounted),
KUNIT_CASE(kmem_cache_bulk),
+ KUNIT_CASE(mempool_kmalloc_oob_right),
+ KUNIT_CASE(mempool_kmalloc_large_oob_right),
+ KUNIT_CASE(mempool_slab_oob_right),
+ KUNIT_CASE(mempool_kmalloc_uaf),
+ KUNIT_CASE(mempool_kmalloc_large_uaf),
+ KUNIT_CASE(mempool_slab_uaf),
+ KUNIT_CASE(mempool_page_alloc_uaf),
+ KUNIT_CASE(mempool_kmalloc_double_free),
+ KUNIT_CASE(mempool_kmalloc_large_double_free),
+ KUNIT_CASE(mempool_page_alloc_double_free),
+ KUNIT_CASE(mempool_kmalloc_invalid_free),
+ KUNIT_CASE(mempool_kmalloc_large_invalid_free),
KUNIT_CASE(kasan_global_oob_right),
KUNIT_CASE(kasan_global_oob_left),
KUNIT_CASE(kasan_stack_oob),
KUNIT_CASE(kasan_alloca_oob_left),
KUNIT_CASE(kasan_alloca_oob_right),
- KUNIT_CASE(ksize_unpoisons_memory),
- KUNIT_CASE(ksize_uaf),
- KUNIT_CASE(kmem_cache_double_free),
- KUNIT_CASE(kmem_cache_invalid_free),
- KUNIT_CASE(kmem_cache_double_destroy),
KUNIT_CASE(kasan_memchr),
KUNIT_CASE(kasan_memcmp),
KUNIT_CASE(kasan_strings),
KUNIT_CASE(kasan_bitops_generic),
KUNIT_CASE(kasan_bitops_tags),
- KUNIT_CASE(kmalloc_double_kzfree),
- KUNIT_CASE(rcu_uaf),
- KUNIT_CASE(workqueue_uaf),
KUNIT_CASE(vmalloc_helpers_tags),
KUNIT_CASE(vmalloc_oob),
KUNIT_CASE(vmap_tags),
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index 138c57b836f2..3ba02efb952a 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -143,7 +143,9 @@ static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
{
void *object = qlink_to_object(qlink, cache);
- struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
+ struct kasan_free_meta *free_meta = kasan_get_free_meta(cache, object);
+
+ kasan_release_object_meta(cache, object);
/*
* If init_on_free is enabled and KASAN's free metadata is stored in
@@ -153,13 +155,7 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
*/
if (slab_want_init_on_free(cache) &&
cache->kasan_info.free_meta_offset == 0)
- memzero_explicit(meta, sizeof(*meta));
-
- /*
- * As the object now gets freed from the quarantine, assume that its
- * free track is no longer valid.
- */
- *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
+ memzero_explicit(free_meta, sizeof(*free_meta));
___cache_free(cache, object, _THIS_IP_);
}
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 011f727bfaff..7afa4feb03e1 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -263,7 +263,19 @@ static void print_error_description(struct kasan_report_info *info)
static void print_track(struct kasan_track *track, const char *prefix)
{
+#ifdef CONFIG_KASAN_EXTRA_INFO
+ u64 ts_nsec = track->timestamp;
+ unsigned long rem_usec;
+
+ ts_nsec <<= 3;
+ rem_usec = do_div(ts_nsec, NSEC_PER_SEC) / 1000;
+
+ pr_err("%s by task %u on cpu %d at %lu.%06lus:\n",
+ prefix, track->pid, track->cpu,
+ (unsigned long)ts_nsec, rem_usec);
+#else
pr_err("%s by task %u:\n", prefix, track->pid);
+#endif /* CONFIG_KASAN_EXTRA_INFO */
if (track->stack)
stack_depot_print(track->stack);
else
@@ -624,37 +636,43 @@ void kasan_report_async(void)
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
/*
- * With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
- * canonical half of the address space) cause out-of-bounds shadow memory reads
- * before the actual access. For addresses in the low canonical half of the
- * address space, as well as most non-canonical addresses, that out-of-bounds
- * shadow memory access lands in the non-canonical part of the address space.
- * Help the user figure out what the original bogus pointer was.
+ * With compiler-based KASAN modes, accesses to bogus pointers (outside of the
+ * mapped kernel address space regions) cause faults when KASAN tries to check
+ * the shadow memory before the actual memory access. This results in cryptic
+ * GPF reports, which are hard for users to interpret. This hook helps users to
+ * figure out what the original bogus pointer was.
*/
void kasan_non_canonical_hook(unsigned long addr)
{
unsigned long orig_addr;
const char *bug_type;
+ /*
+ * All addresses that came as a result of the memory-to-shadow mapping
+ * (even for bogus pointers) must be >= KASAN_SHADOW_OFFSET.
+ */
if (addr < KASAN_SHADOW_OFFSET)
return;
- orig_addr = (addr - KASAN_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT;
+ orig_addr = (unsigned long)kasan_shadow_to_mem((void *)addr);
+
/*
* For faults near the shadow address for NULL, we can be fairly certain
* that this is a KASAN shadow memory access.
- * For faults that correspond to shadow for low canonical addresses, we
- * can still be pretty sure - that shadow region is a fairly narrow
- * chunk of the non-canonical address space.
- * But faults that look like shadow for non-canonical addresses are a
- * really large chunk of the address space. In that case, we still
- * print the decoded address, but make it clear that this is not
- * necessarily what's actually going on.
+ * For faults that correspond to the shadow for low or high canonical
+ * addresses, we can still be pretty sure: these shadow regions are a
+ * fairly narrow chunk of the address space.
+ * But the shadow for non-canonical addresses is a really large chunk
+ * of the address space. For this case, we still print the decoded
+ * address, but make it clear that this is not necessarily what's
+ * actually going on.
*/
if (orig_addr < PAGE_SIZE)
bug_type = "null-ptr-deref";
else if (orig_addr < TASK_SIZE)
bug_type = "probably user-memory-access";
+ else if (addr_in_shadow((void *)addr))
+ bug_type = "probably wild-memory-access";
else
bug_type = "maybe wild-memory-access";
pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
diff --git a/mm/kasan/report_generic.c b/mm/kasan/report_generic.c
index 99cbcd73cff7..f5b8e37b3805 100644
--- a/mm/kasan/report_generic.c
+++ b/mm/kasan/report_generic.c
@@ -110,7 +110,7 @@ static const char *get_shadow_bug_type(struct kasan_report_info *info)
bug_type = "use-after-free";
break;
case KASAN_SLAB_FREE:
- case KASAN_SLAB_FREETRACK:
+ case KASAN_SLAB_FREE_META:
bug_type = "slab-use-after-free";
break;
case KASAN_ALLOCA_LEFT:
@@ -173,8 +173,8 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info)
memcpy(&info->alloc_track, &alloc_meta->alloc_track,
sizeof(info->alloc_track));
- if (*(u8 *)kasan_mem_to_shadow(info->object) == KASAN_SLAB_FREETRACK) {
- /* Free meta must be present with KASAN_SLAB_FREETRACK. */
+ if (*(u8 *)kasan_mem_to_shadow(info->object) == KASAN_SLAB_FREE_META) {
+ /* Free meta must be present with KASAN_SLAB_FREE_META. */
free_meta = kasan_get_free_meta(info->cache, info->object);
memcpy(&info->free_track, &free_meta->free_track,
sizeof(info->free_track));
diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c
index 8b8bfdb3cfdb..d15f8f580e2c 100644
--- a/mm/kasan/report_tags.c
+++ b/mm/kasan/report_tags.c
@@ -7,6 +7,7 @@
#include <linux/atomic.h>
#include "kasan.h"
+#include "../slab.h"
extern struct kasan_stack_ring stack_ring;
@@ -31,10 +32,6 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info)
unsigned long flags;
u64 pos;
struct kasan_stack_ring_entry *entry;
- void *ptr;
- u32 pid;
- depot_stack_handle_t stack;
- bool is_free;
bool alloc_found = false, free_found = false;
if ((!info->cache || !info->object) && !info->bug_type) {
@@ -61,18 +58,12 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info)
entry = &stack_ring.entries[i % stack_ring.size];
- /* Paired with smp_store_release() in save_stack_info(). */
- ptr = (void *)smp_load_acquire(&entry->ptr);
-
- if (kasan_reset_tag(ptr) != info->object ||
- get_tag(ptr) != get_tag(info->access_addr))
+ if (kasan_reset_tag(entry->ptr) != info->object ||
+ get_tag(entry->ptr) != get_tag(info->access_addr) ||
+ info->cache->object_size != entry->size)
continue;
- pid = READ_ONCE(entry->pid);
- stack = READ_ONCE(entry->stack);
- is_free = READ_ONCE(entry->is_free);
-
- if (is_free) {
+ if (entry->is_free) {
/*
* Second free of the same object.
* Give up on trying to find the alloc entry.
@@ -80,8 +71,8 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info)
if (free_found)
break;
- info->free_track.pid = pid;
- info->free_track.stack = stack;
+ memcpy(&info->free_track, &entry->track,
+ sizeof(info->free_track));
free_found = true;
/*
@@ -95,8 +86,8 @@ void kasan_complete_mode_report_info(struct kasan_report_info *info)
if (alloc_found)
break;
- info->alloc_track.pid = pid;
- info->alloc_track.stack = stack;
+ memcpy(&info->alloc_track, &entry->track,
+ sizeof(info->alloc_track));
alloc_found = true;
/*
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index d687f09a7ae3..9ef84f31833f 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -130,15 +130,11 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init)
/*
* Perform shadow offset calculation based on untagged address, as
- * some of the callers (e.g. kasan_poison_object_data) pass tagged
+ * some of the callers (e.g. kasan_poison_new_object) pass tagged
* addresses to this function.
*/
addr = kasan_reset_tag(addr);
- /* Skip KFENCE memory if called explicitly outside of sl*b. */
- if (is_kfence_address(addr))
- return;
-
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;
if (WARN_ON(size & KASAN_GRANULE_MASK))
@@ -149,7 +145,7 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init)
__memset(shadow_start, value, shadow_end - shadow_start);
}
-EXPORT_SYMBOL(kasan_poison);
+EXPORT_SYMBOL_GPL(kasan_poison);
#ifdef CONFIG_KASAN_GENERIC
void kasan_poison_last_granule(const void *addr, size_t size)
@@ -170,19 +166,11 @@ void kasan_unpoison(const void *addr, size_t size, bool init)
/*
* Perform shadow offset calculation based on untagged address, as
- * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
+ * some of the callers (e.g. kasan_unpoison_new_object) pass tagged
* addresses to this function.
*/
addr = kasan_reset_tag(addr);
- /*
- * Skip KFENCE memory if called explicitly outside of sl*b. Also note
- * that calls to ksize(), where size is not a multiple of machine-word
- * size, would otherwise poison the invalid portion of the word.
- */
- if (is_kfence_address(addr))
- return;
-
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return;
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
index 7dcfe341d48e..d65d48b85f90 100644
--- a/mm/kasan/tags.c
+++ b/mm/kasan/tags.c
@@ -13,6 +13,8 @@
#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/mm.h>
+#include <linux/sched/clock.h>
+#include <linux/stackdepot.h>
#include <linux/static_key.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -96,12 +98,13 @@ static void save_stack_info(struct kmem_cache *cache, void *object,
gfp_t gfp_flags, bool is_free)
{
unsigned long flags;
- depot_stack_handle_t stack;
+ depot_stack_handle_t stack, old_stack;
u64 pos;
struct kasan_stack_ring_entry *entry;
void *old_ptr;
- stack = kasan_save_stack(gfp_flags, true);
+ stack = kasan_save_stack(gfp_flags,
+ STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
/*
* Prevent save_stack_info() from modifying stack ring
@@ -120,17 +123,18 @@ next:
if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR))
goto next; /* Busy slot. */
- WRITE_ONCE(entry->size, cache->object_size);
- WRITE_ONCE(entry->pid, current->pid);
- WRITE_ONCE(entry->stack, stack);
- WRITE_ONCE(entry->is_free, is_free);
+ old_stack = entry->track.stack;
- /*
- * Paired with smp_load_acquire() in kasan_complete_mode_report_info().
- */
- smp_store_release(&entry->ptr, (s64)object);
+ entry->size = cache->object_size;
+ kasan_set_track(&entry->track, stack);
+ entry->is_free = is_free;
+
+ entry->ptr = object;
read_unlock_irqrestore(&stack_ring.lock, flags);
+
+ if (old_stack)
+ stack_depot_put(old_stack);
}
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 064654717843..3defe6713ef1 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -446,7 +446,8 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
hugepage_flags_enabled()) {
- if (hugepage_vma_check(vma, vm_flags, false, false, true))
+ if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
+ PMD_ORDER))
__khugepaged_enter(vma->vm_mm);
}
}
@@ -493,11 +494,6 @@ static void release_pte_folio(struct folio *folio)
folio_putback_lru(folio);
}
-static void release_pte_page(struct page *page)
-{
- release_pte_folio(page_folio(page));
-}
-
static void release_pte_pages(pte_t *pte, pte_t *_pte,
struct list_head *compound_pagelist)
{
@@ -686,6 +682,7 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte,
spinlock_t *ptl,
struct list_head *compound_pagelist)
{
+ struct folio *src_folio;
struct page *src_page;
struct page *tmp;
pte_t *_pte;
@@ -707,16 +704,17 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte,
}
} else {
src_page = pte_page(pteval);
- if (!PageCompound(src_page))
- release_pte_page(src_page);
+ src_folio = page_folio(src_page);
+ if (!folio_test_large(src_folio))
+ release_pte_folio(src_folio);
/*
* ptl mostly unnecessary, but preempt has to
* be disabled to update the per-cpu stats
- * inside page_remove_rmap().
+ * inside folio_remove_rmap_pte().
*/
spin_lock(ptl);
ptep_clear(vma->vm_mm, address, _pte);
- page_remove_rmap(src_page, vma, false);
+ folio_remove_rmap_pte(src_folio, src_page, vma);
spin_unlock(ptl);
free_page_and_swap_cache(src_page);
}
@@ -922,16 +920,16 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
if (!vma)
return SCAN_VMA_NULL;
- if (!transhuge_vma_suitable(vma, address))
+ if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
return SCAN_ADDRESS_RANGE;
- if (!hugepage_vma_check(vma, vma->vm_flags, false, false,
- cc->is_khugepaged))
+ if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
+ cc->is_khugepaged, PMD_ORDER))
return SCAN_VMA_CHECK;
/*
* Anon VMA expected, the address may be unmapped then
* remapped to file after khugepaged reaquired the mmap_lock.
*
- * hugepage_vma_check may return true for qualified file
+ * thp_vma_allowable_order may return true for qualified file
* vmas.
*/
if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
@@ -1089,6 +1087,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, _pmd;
pte_t *pte;
pgtable_t pgtable;
+ struct folio *folio;
struct page *hpage;
spinlock_t *pmd_ptl, *pte_ptl;
int result = SCAN_FAIL;
@@ -1139,6 +1138,9 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
* Prevent all access to pagetables with the exception of
* gup_fast later handled by the ptep_clear_flush and the VM
* handled by the anon_vma lock + PG_lock.
+ *
+ * UFFDIO_MOVE is prevented to race as well thanks to the
+ * mmap_lock.
*/
mmap_write_lock(mm);
result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
@@ -1208,13 +1210,13 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
if (unlikely(result != SCAN_SUCCEED))
goto out_up_write;
+ folio = page_folio(hpage);
/*
- * spin_lock() below is not the equivalent of smp_wmb(), but
- * the smp_wmb() inside __SetPageUptodate() can be reused to
- * avoid the copy_huge_page writes to become visible after
- * the set_pmd_at() write.
+ * The smp_wmb() inside __folio_mark_uptodate() ensures the
+ * copy_huge_page writes become visible before the set_pmd_at()
+ * write.
*/
- __SetPageUptodate(hpage);
+ __folio_mark_uptodate(folio);
pgtable = pmd_pgtable(_pmd);
_pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
@@ -1222,8 +1224,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
spin_lock(pmd_ptl);
BUG_ON(!pmd_none(*pmd));
- page_add_new_anon_rmap(hpage, vma, address);
- lru_cache_add_inactive_or_unevictable(hpage, vma);
+ folio_add_new_anon_rmap(folio, vma, address);
+ folio_add_lru_vma(folio, vma);
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache_pmd(vma, address, pmd);
@@ -1503,7 +1505,8 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
* and map it by a PMD, regardless of sysfs THP settings. As such, let's
* analogously elide sysfs THP settings here.
*/
- if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
+ if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
+ PMD_ORDER))
return SCAN_VMA_CHECK;
/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
@@ -1619,7 +1622,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
* PTE dirty? Shmem page is already dirty; file is read-only.
*/
ptep_clear(mm, addr, pte);
- page_remove_rmap(page, vma, false);
+ folio_remove_rmap_pte(folio, page, vma);
nr_ptes++;
}
@@ -2119,23 +2122,23 @@ immap_locked:
xas_lock_irq(&xas);
}
- nr = thp_nr_pages(hpage);
+ folio = page_folio(hpage);
+ nr = folio_nr_pages(folio);
if (is_shmem)
- __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
+ __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
else
- __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
+ __lruvec_stat_mod_folio(folio, NR_FILE_THPS, nr);
if (nr_none) {
- __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
+ __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_none);
/* nr_none is always 0 for non-shmem. */
- __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
+ __lruvec_stat_mod_folio(folio, NR_SHMEM, nr_none);
}
/*
* Mark hpage as uptodate before inserting it into the page cache so
* that it isn't mistaken for an fallocated but unwritten page.
*/
- folio = page_folio(hpage);
folio_mark_uptodate(folio);
folio_ref_add(folio, HPAGE_PMD_NR - 1);
@@ -2145,7 +2148,7 @@ immap_locked:
/* Join all the small entries into a single multi-index entry. */
xas_set_order(&xas, start, HPAGE_PMD_ORDER);
- xas_store(&xas, hpage);
+ xas_store(&xas, folio);
WARN_ON_ONCE(xas_error(&xas));
xas_unlock_irq(&xas);
@@ -2156,7 +2159,7 @@ immap_locked:
retract_page_tables(mapping, start);
if (cc && !cc->is_khugepaged)
result = SCAN_PTE_MAPPED_HUGEPAGE;
- unlock_page(hpage);
+ folio_unlock(folio);
/*
* The collapse has succeeded, so free the old pages.
@@ -2368,7 +2371,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
progress++;
break;
}
- if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) {
+ if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
+ true, PMD_ORDER)) {
skip:
progress++;
continue;
@@ -2492,7 +2496,7 @@ static void khugepaged_do_scan(struct collapse_control *cc)
while (true) {
cond_resched();
- if (unlikely(kthread_should_stop() || try_to_freeze()))
+ if (unlikely(kthread_should_stop()))
break;
spin_lock(&khugepaged_mm_lock);
@@ -2705,7 +2709,8 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
*prev = vma;
- if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
+ if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
+ PMD_ORDER))
return -EINVAL;
cc = kmalloc(sizeof(*cc), GFP_KERNEL);
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 5501363d6b31..6a540c2b27c5 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -14,17 +14,15 @@
* The following locks and mutexes are used by kmemleak:
*
* - kmemleak_lock (raw_spinlock_t): protects the object_list as well as
- * del_state modifications and accesses to the object_tree_root (or
- * object_phys_tree_root). The object_list is the main list holding the
- * metadata (struct kmemleak_object) for the allocated memory blocks.
- * The object_tree_root and object_phys_tree_root are red
- * black trees used to look-up metadata based on a pointer to the
- * corresponding memory block. The object_phys_tree_root is for objects
- * allocated with physical address. The kmemleak_object structures are
- * added to the object_list and object_tree_root (or object_phys_tree_root)
- * in the create_object() function called from the kmemleak_alloc() (or
- * kmemleak_alloc_phys()) callback and removed in delete_object() called from
- * the kmemleak_free() callback
+ * del_state modifications and accesses to the object trees
+ * (object_tree_root, object_phys_tree_root, object_percpu_tree_root). The
+ * object_list is the main list holding the metadata (struct
+ * kmemleak_object) for the allocated memory blocks. The object trees are
+ * red black trees used to look-up metadata based on a pointer to the
+ * corresponding memory block. The kmemleak_object structures are added to
+ * the object_list and the object tree root in the create_object() function
+ * called from the kmemleak_alloc{,_phys,_percpu}() callback and removed in
+ * delete_object() called from the kmemleak_free{,_phys,_percpu}() callback
* - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
* Accesses to the metadata (e.g. count) are protected by this lock. Note
* that some members of this structure may be protected by other means
@@ -178,6 +176,8 @@ struct kmemleak_object {
#define OBJECT_FULL_SCAN (1 << 3)
/* flag set for object allocated with physical address */
#define OBJECT_PHYS (1 << 4)
+/* flag set for per-CPU pointers */
+#define OBJECT_PERCPU (1 << 5)
/* set when __remove_object() called */
#define DELSTATE_REMOVED (1 << 0)
@@ -206,6 +206,8 @@ static LIST_HEAD(mem_pool_free_list);
static struct rb_root object_tree_root = RB_ROOT;
/* search tree for object (with OBJECT_PHYS flag) boundaries */
static struct rb_root object_phys_tree_root = RB_ROOT;
+/* search tree for object (with OBJECT_PERCPU flag) boundaries */
+static struct rb_root object_percpu_tree_root = RB_ROOT;
/* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
static DEFINE_RAW_SPINLOCK(kmemleak_lock);
@@ -298,7 +300,7 @@ static void hex_dump_object(struct seq_file *seq,
const u8 *ptr = (const u8 *)object->pointer;
size_t len;
- if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
+ if (WARN_ON_ONCE(object->flags & (OBJECT_PHYS | OBJECT_PERCPU)))
return;
/* limit the number of lines to HEX_MAX_LINES */
@@ -355,16 +357,14 @@ static void print_unreferenced(struct seq_file *seq,
int i;
unsigned long *entries;
unsigned int nr_entries;
- unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
nr_entries = stack_depot_fetch(object->trace_handle, &entries);
warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
object->pointer, object->size);
- warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
- object->comm, object->pid, object->jiffies,
- msecs_age / 1000, msecs_age % 1000);
+ warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
+ object->comm, object->pid, object->jiffies);
hex_dump_object(seq, object);
- warn_or_seq_printf(seq, " backtrace:\n");
+ warn_or_seq_printf(seq, " backtrace (crc %x):\n", object->checksum);
for (i = 0; i < nr_entries; i++) {
void *ptr = (void *)entries[i];
@@ -392,6 +392,15 @@ static void dump_object_info(struct kmemleak_object *object)
stack_depot_print(object->trace_handle);
}
+static struct rb_root *object_tree(unsigned long objflags)
+{
+ if (objflags & OBJECT_PHYS)
+ return &object_phys_tree_root;
+ if (objflags & OBJECT_PERCPU)
+ return &object_percpu_tree_root;
+ return &object_tree_root;
+}
+
/*
* Look-up a memory block metadata (kmemleak_object) in the object search
* tree based on a pointer value. If alias is 0, only values pointing to the
@@ -399,10 +408,9 @@ static void dump_object_info(struct kmemleak_object *object)
* when calling this function.
*/
static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
- bool is_phys)
+ unsigned int objflags)
{
- struct rb_node *rb = is_phys ? object_phys_tree_root.rb_node :
- object_tree_root.rb_node;
+ struct rb_node *rb = object_tree(objflags)->rb_node;
unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
while (rb) {
@@ -431,7 +439,7 @@ static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
/* Look-up a kmemleak object which allocated with virtual address. */
static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
{
- return __lookup_object(ptr, alias, false);
+ return __lookup_object(ptr, alias, 0);
}
/*
@@ -544,14 +552,14 @@ static void put_object(struct kmemleak_object *object)
* Look up an object in the object search tree and increase its use_count.
*/
static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
- bool is_phys)
+ unsigned int objflags)
{
unsigned long flags;
struct kmemleak_object *object;
rcu_read_lock();
raw_spin_lock_irqsave(&kmemleak_lock, flags);
- object = __lookup_object(ptr, alias, is_phys);
+ object = __lookup_object(ptr, alias, objflags);
raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
/* check whether the object is still available */
@@ -565,19 +573,16 @@ static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alia
/* Look up and get an object which allocated with virtual address. */
static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
{
- return __find_and_get_object(ptr, alias, false);
+ return __find_and_get_object(ptr, alias, 0);
}
/*
- * Remove an object from the object_tree_root (or object_phys_tree_root)
- * and object_list. Must be called with the kmemleak_lock held _if_ kmemleak
- * is still enabled.
+ * Remove an object from its object tree and object_list. Must be called with
+ * the kmemleak_lock held _if_ kmemleak is still enabled.
*/
static void __remove_object(struct kmemleak_object *object)
{
- rb_erase(&object->rb_node, object->flags & OBJECT_PHYS ?
- &object_phys_tree_root :
- &object_tree_root);
+ rb_erase(&object->rb_node, object_tree(object->flags));
if (!(object->del_state & DELSTATE_NO_DELETE))
list_del_rcu(&object->object_list);
object->del_state |= DELSTATE_REMOVED;
@@ -585,11 +590,11 @@ static void __remove_object(struct kmemleak_object *object)
static struct kmemleak_object *__find_and_remove_object(unsigned long ptr,
int alias,
- bool is_phys)
+ unsigned int objflags)
{
struct kmemleak_object *object;
- object = __lookup_object(ptr, alias, is_phys);
+ object = __lookup_object(ptr, alias, objflags);
if (object)
__remove_object(object);
@@ -597,19 +602,18 @@ static struct kmemleak_object *__find_and_remove_object(unsigned long ptr,
}
/*
- * Look up an object in the object search tree and remove it from both
- * object_tree_root (or object_phys_tree_root) and object_list. The
- * returned object's use_count should be at least 1, as initially set
- * by create_object().
+ * Look up an object in the object search tree and remove it from both object
+ * tree root and object_list. The returned object's use_count should be at
+ * least 1, as initially set by create_object().
*/
static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
- bool is_phys)
+ unsigned int objflags)
{
unsigned long flags;
struct kmemleak_object *object;
raw_spin_lock_irqsave(&kmemleak_lock, flags);
- object = __find_and_remove_object(ptr, alias, is_phys);
+ object = __find_and_remove_object(ptr, alias, objflags);
raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
return object;
@@ -680,7 +684,7 @@ static struct kmemleak_object *__alloc_object(gfp_t gfp)
}
static int __link_object(struct kmemleak_object *object, unsigned long ptr,
- size_t size, int min_count, bool is_phys)
+ size_t size, int min_count, unsigned int objflags)
{
struct kmemleak_object *parent;
@@ -688,7 +692,7 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr,
unsigned long untagged_ptr;
unsigned long untagged_objp;
- object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
+ object->flags = OBJECT_ALLOCATED | objflags;
object->pointer = ptr;
object->size = kfence_ksize((void *)ptr) ?: size;
object->min_count = min_count;
@@ -699,12 +703,11 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr,
* Only update min_addr and max_addr with object
* storing virtual address.
*/
- if (!is_phys) {
+ if (!(objflags & (OBJECT_PHYS | OBJECT_PERCPU))) {
min_addr = min(min_addr, untagged_ptr);
max_addr = max(max_addr, untagged_ptr + size);
}
- link = is_phys ? &object_phys_tree_root.rb_node :
- &object_tree_root.rb_node;
+ link = &object_tree(objflags)->rb_node;
rb_parent = NULL;
while (*link) {
rb_parent = *link;
@@ -726,8 +729,7 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr,
}
}
rb_link_node(&object->rb_node, rb_parent, link);
- rb_insert_color(&object->rb_node, is_phys ? &object_phys_tree_root :
- &object_tree_root);
+ rb_insert_color(&object->rb_node, object_tree(objflags));
list_add_tail_rcu(&object->object_list, &object_list);
return 0;
@@ -735,11 +737,10 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr,
/*
* Create the metadata (struct kmemleak_object) corresponding to an allocated
- * memory block and add it to the object_list and object_tree_root (or
- * object_phys_tree_root).
+ * memory block and add it to the object_list and object tree.
*/
static void __create_object(unsigned long ptr, size_t size,
- int min_count, gfp_t gfp, bool is_phys)
+ int min_count, gfp_t gfp, unsigned int objflags)
{
struct kmemleak_object *object;
unsigned long flags;
@@ -750,7 +751,7 @@ static void __create_object(unsigned long ptr, size_t size,
return;
raw_spin_lock_irqsave(&kmemleak_lock, flags);
- ret = __link_object(object, ptr, size, min_count, is_phys);
+ ret = __link_object(object, ptr, size, min_count, objflags);
raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
if (ret)
mem_pool_free(object);
@@ -760,14 +761,21 @@ static void __create_object(unsigned long ptr, size_t size,
static void create_object(unsigned long ptr, size_t size,
int min_count, gfp_t gfp)
{
- __create_object(ptr, size, min_count, gfp, false);
+ __create_object(ptr, size, min_count, gfp, 0);
}
/* Create kmemleak object which allocated with physical address. */
static void create_object_phys(unsigned long ptr, size_t size,
int min_count, gfp_t gfp)
{
- __create_object(ptr, size, min_count, gfp, true);
+ __create_object(ptr, size, min_count, gfp, OBJECT_PHYS);
+}
+
+/* Create kmemleak object corresponding to a per-CPU allocation. */
+static void create_object_percpu(unsigned long ptr, size_t size,
+ int min_count, gfp_t gfp)
+{
+ __create_object(ptr, size, min_count, gfp, OBJECT_PERCPU);
}
/*
@@ -794,11 +802,11 @@ static void __delete_object(struct kmemleak_object *object)
* Look up the metadata (struct kmemleak_object) corresponding to ptr and
* delete it.
*/
-static void delete_object_full(unsigned long ptr)
+static void delete_object_full(unsigned long ptr, unsigned int objflags)
{
struct kmemleak_object *object;
- object = find_and_remove_object(ptr, 0, false);
+ object = find_and_remove_object(ptr, 0, objflags);
if (!object) {
#ifdef DEBUG
kmemleak_warn("Freeing unknown object at 0x%08lx\n",
@@ -814,7 +822,8 @@ static void delete_object_full(unsigned long ptr)
* delete it. If the memory block is partially freed, the function may create
* additional metadata for the remaining parts of the block.
*/
-static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
+static void delete_object_part(unsigned long ptr, size_t size,
+ unsigned int objflags)
{
struct kmemleak_object *object, *object_l, *object_r;
unsigned long start, end, flags;
@@ -828,7 +837,7 @@ static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
goto out;
raw_spin_lock_irqsave(&kmemleak_lock, flags);
- object = __find_and_remove_object(ptr, 1, is_phys);
+ object = __find_and_remove_object(ptr, 1, objflags);
if (!object) {
#ifdef DEBUG
kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
@@ -846,11 +855,11 @@ static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
end = object->pointer + object->size;
if ((ptr > start) &&
!__link_object(object_l, start, ptr - start,
- object->min_count, is_phys))
+ object->min_count, objflags))
object_l = NULL;
if ((ptr + size < end) &&
!__link_object(object_r, ptr + size, end - ptr - size,
- object->min_count, is_phys))
+ object->min_count, objflags))
object_r = NULL;
unlock:
@@ -881,11 +890,11 @@ static void paint_it(struct kmemleak_object *object, int color)
raw_spin_unlock_irqrestore(&object->lock, flags);
}
-static void paint_ptr(unsigned long ptr, int color, bool is_phys)
+static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
{
struct kmemleak_object *object;
- object = __find_and_get_object(ptr, 0, is_phys);
+ object = __find_and_get_object(ptr, 0, objflags);
if (!object) {
kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
ptr,
@@ -903,16 +912,16 @@ static void paint_ptr(unsigned long ptr, int color, bool is_phys)
*/
static void make_gray_object(unsigned long ptr)
{
- paint_ptr(ptr, KMEMLEAK_GREY, false);
+ paint_ptr(ptr, KMEMLEAK_GREY, 0);
}
/*
* Mark the object as black-colored so that it is ignored from scans and
* reporting.
*/
-static void make_black_object(unsigned long ptr, bool is_phys)
+static void make_black_object(unsigned long ptr, unsigned int objflags)
{
- paint_ptr(ptr, KMEMLEAK_BLACK, is_phys);
+ paint_ptr(ptr, KMEMLEAK_BLACK, objflags);
}
/*
@@ -1048,8 +1057,6 @@ EXPORT_SYMBOL_GPL(kmemleak_alloc);
void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
gfp_t gfp)
{
- unsigned int cpu;
-
pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
/*
@@ -1057,9 +1064,7 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
* (min_count is set to 0).
*/
if (kmemleak_enabled && ptr && !IS_ERR(ptr))
- for_each_possible_cpu(cpu)
- create_object((unsigned long)per_cpu_ptr(ptr, cpu),
- size, 0, gfp);
+ create_object_percpu((unsigned long)ptr, size, 0, gfp);
}
EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
@@ -1100,7 +1105,7 @@ void __ref kmemleak_free(const void *ptr)
pr_debug("%s(0x%px)\n", __func__, ptr);
if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
- delete_object_full((unsigned long)ptr);
+ delete_object_full((unsigned long)ptr, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free);
@@ -1118,7 +1123,7 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
pr_debug("%s(0x%px)\n", __func__, ptr);
if (kmemleak_enabled && ptr && !IS_ERR(ptr))
- delete_object_part((unsigned long)ptr, size, false);
+ delete_object_part((unsigned long)ptr, size, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free_part);
@@ -1131,14 +1136,10 @@ EXPORT_SYMBOL_GPL(kmemleak_free_part);
*/
void __ref kmemleak_free_percpu(const void __percpu *ptr)
{
- unsigned int cpu;
-
pr_debug("%s(0x%px)\n", __func__, ptr);
if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
- for_each_possible_cpu(cpu)
- delete_object_full((unsigned long)per_cpu_ptr(ptr,
- cpu));
+ delete_object_full((unsigned long)ptr, OBJECT_PERCPU);
}
EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
@@ -1208,7 +1209,7 @@ void __ref kmemleak_ignore(const void *ptr)
pr_debug("%s(0x%px)\n", __func__, ptr);
if (kmemleak_enabled && ptr && !IS_ERR(ptr))
- make_black_object((unsigned long)ptr, false);
+ make_black_object((unsigned long)ptr, 0);
}
EXPORT_SYMBOL(kmemleak_ignore);
@@ -1282,7 +1283,7 @@ void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
pr_debug("%s(0x%px)\n", __func__, &phys);
if (kmemleak_enabled)
- delete_object_part((unsigned long)phys, size, true);
+ delete_object_part((unsigned long)phys, size, OBJECT_PHYS);
}
EXPORT_SYMBOL(kmemleak_free_part_phys);
@@ -1296,7 +1297,7 @@ void __ref kmemleak_ignore_phys(phys_addr_t phys)
pr_debug("%s(0x%px)\n", __func__, &phys);
if (kmemleak_enabled)
- make_black_object((unsigned long)phys, true);
+ make_black_object((unsigned long)phys, OBJECT_PHYS);
}
EXPORT_SYMBOL(kmemleak_ignore_phys);
@@ -1307,7 +1308,7 @@ static bool update_checksum(struct kmemleak_object *object)
{
u32 old_csum = object->checksum;
- if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
+ if (WARN_ON_ONCE(object->flags & (OBJECT_PHYS | OBJECT_PERCPU)))
return false;
kasan_disable_current();
@@ -1463,7 +1464,6 @@ static void scan_object(struct kmemleak_object *object)
{
struct kmemleak_scan_area *area;
unsigned long flags;
- void *obj_ptr;
/*
* Once the object->lock is acquired, the corresponding memory block
@@ -1476,14 +1476,27 @@ static void scan_object(struct kmemleak_object *object)
/* already freed object */
goto out;
- obj_ptr = object->flags & OBJECT_PHYS ?
- __va((phys_addr_t)object->pointer) :
- (void *)object->pointer;
+ if (object->flags & OBJECT_PERCPU) {
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu);
+ void *end = start + object->size;
- if (hlist_empty(&object->area_list) ||
+ scan_block(start, end, object);
+
+ raw_spin_unlock_irqrestore(&object->lock, flags);
+ cond_resched();
+ raw_spin_lock_irqsave(&object->lock, flags);
+ if (!(object->flags & OBJECT_ALLOCATED))
+ break;
+ }
+ } else if (hlist_empty(&object->area_list) ||
object->flags & OBJECT_FULL_SCAN) {
- void *start = obj_ptr;
- void *end = obj_ptr + object->size;
+ void *start = object->flags & OBJECT_PHYS ?
+ __va((phys_addr_t)object->pointer) :
+ (void *)object->pointer;
+ void *end = start + object->size;
void *next;
do {
@@ -1498,11 +1511,12 @@ static void scan_object(struct kmemleak_object *object)
cond_resched();
raw_spin_lock_irqsave(&object->lock, flags);
} while (object->flags & OBJECT_ALLOCATED);
- } else
+ } else {
hlist_for_each_entry(area, &object->area_list, node)
scan_block((void *)area->start,
(void *)(area->start + area->size),
object);
+ }
out:
raw_spin_unlock_irqrestore(&object->lock, flags);
}
diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c
index c19f47af0424..cf2d70e9c9a5 100644
--- a/mm/kmsan/core.c
+++ b/mm/kmsan/core.c
@@ -76,7 +76,7 @@ depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
/* Don't sleep. */
flags &= ~(__GFP_DIRECT_RECLAIM | __GFP_KSWAPD_RECLAIM);
- handle = __stack_depot_save(entries, nr_entries, flags, true);
+ handle = stack_depot_save(entries, nr_entries, flags);
return stack_depot_set_extra_bits(handle, extra);
}
@@ -185,11 +185,10 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id)
/*
* @entries is a local var in non-instrumented code, so KMSAN does not
* know it is initialized. Explicitly unpoison it to avoid false
- * positives when __stack_depot_save() passes it to instrumented code.
+ * positives when stack_depot_save() passes it to instrumented code.
*/
kmsan_internal_unpoison_memory(entries, sizeof(entries), false);
- handle = __stack_depot_save(entries, ARRAY_SIZE(entries), __GFP_HIGH,
- true);
+ handle = stack_depot_save(entries, ARRAY_SIZE(entries), __GFP_HIGH);
return stack_depot_set_extra_bits(handle, extra_bits);
}
diff --git a/mm/kmsan/init.c b/mm/kmsan/init.c
index ffedf4dbc49d..3ac3b8921d36 100644
--- a/mm/kmsan/init.c
+++ b/mm/kmsan/init.c
@@ -96,7 +96,7 @@ void __init kmsan_init_shadow(void)
struct metadata_page_pair {
struct page *shadow, *origin;
};
-static struct metadata_page_pair held_back[MAX_ORDER + 1] __initdata;
+static struct metadata_page_pair held_back[NR_PAGE_ORDERS] __initdata;
/*
* Eager metadata allocation. When the memblock allocator is freeing pages to
@@ -141,7 +141,7 @@ struct smallstack {
static struct smallstack collect = {
.index = 0,
- .order = MAX_ORDER,
+ .order = MAX_PAGE_ORDER,
};
static void smallstack_push(struct smallstack *stack, struct page *pages)
@@ -211,8 +211,8 @@ static void kmsan_memblock_discard(void)
* order=N-1,
* - repeat.
*/
- collect.order = MAX_ORDER;
- for (int i = MAX_ORDER; i >= 0; i--) {
+ collect.order = MAX_PAGE_ORDER;
+ for (int i = MAX_PAGE_ORDER; i >= 0; i--) {
if (held_back[i].shadow)
smallstack_push(&collect, held_back[i].shadow);
if (held_back[i].origin)
diff --git a/mm/ksm.c b/mm/ksm.c
index 6a831009b4cb..8c001819cf10 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -21,6 +21,7 @@
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/coredump.h>
+#include <linux/sched/cputime.h>
#include <linux/rwsem.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
@@ -248,6 +249,9 @@ static struct kmem_cache *rmap_item_cache;
static struct kmem_cache *stable_node_cache;
static struct kmem_cache *mm_slot_cache;
+/* Default number of pages to scan per batch */
+#define DEFAULT_PAGES_TO_SCAN 100
+
/* The number of pages scanned */
static unsigned long ksm_pages_scanned;
@@ -276,7 +280,7 @@ static unsigned int ksm_stable_node_chains_prune_millisecs = 2000;
static int ksm_max_page_sharing = 256;
/* Number of pages ksmd should scan in one batch */
-static unsigned int ksm_thread_pages_to_scan = 100;
+static unsigned int ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
/* Milliseconds ksmd should sleep between batches */
static unsigned int ksm_thread_sleep_millisecs = 20;
@@ -297,6 +301,172 @@ unsigned long ksm_zero_pages;
/* The number of pages that have been skipped due to "smart scanning" */
static unsigned long ksm_pages_skipped;
+/* Don't scan more than max pages per batch. */
+static unsigned long ksm_advisor_max_pages_to_scan = 30000;
+
+/* Min CPU for scanning pages per scan */
+#define KSM_ADVISOR_MIN_CPU 10
+
+/* Max CPU for scanning pages per scan */
+static unsigned int ksm_advisor_max_cpu = 70;
+
+/* Target scan time in seconds to analyze all KSM candidate pages. */
+static unsigned long ksm_advisor_target_scan_time = 200;
+
+/* Exponentially weighted moving average. */
+#define EWMA_WEIGHT 30
+
+/**
+ * struct advisor_ctx - metadata for KSM advisor
+ * @start_scan: start time of the current scan
+ * @scan_time: scan time of previous scan
+ * @change: change in percent to pages_to_scan parameter
+ * @cpu_time: cpu time consumed by the ksmd thread in the previous scan
+ */
+struct advisor_ctx {
+ ktime_t start_scan;
+ unsigned long scan_time;
+ unsigned long change;
+ unsigned long long cpu_time;
+};
+static struct advisor_ctx advisor_ctx;
+
+/* Define different advisor's */
+enum ksm_advisor_type {
+ KSM_ADVISOR_NONE,
+ KSM_ADVISOR_SCAN_TIME,
+};
+static enum ksm_advisor_type ksm_advisor;
+
+#ifdef CONFIG_SYSFS
+/*
+ * Only called through the sysfs control interface:
+ */
+
+/* At least scan this many pages per batch. */
+static unsigned long ksm_advisor_min_pages_to_scan = 500;
+
+static void set_advisor_defaults(void)
+{
+ if (ksm_advisor == KSM_ADVISOR_NONE) {
+ ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
+ } else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) {
+ advisor_ctx = (const struct advisor_ctx){ 0 };
+ ksm_thread_pages_to_scan = ksm_advisor_min_pages_to_scan;
+ }
+}
+#endif /* CONFIG_SYSFS */
+
+static inline void advisor_start_scan(void)
+{
+ if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
+ advisor_ctx.start_scan = ktime_get();
+}
+
+/*
+ * Use previous scan time if available, otherwise use current scan time as an
+ * approximation for the previous scan time.
+ */
+static inline unsigned long prev_scan_time(struct advisor_ctx *ctx,
+ unsigned long scan_time)
+{
+ return ctx->scan_time ? ctx->scan_time : scan_time;
+}
+
+/* Calculate exponential weighted moving average */
+static unsigned long ewma(unsigned long prev, unsigned long curr)
+{
+ return ((100 - EWMA_WEIGHT) * prev + EWMA_WEIGHT * curr) / 100;
+}
+
+/*
+ * The scan time advisor is based on the current scan rate and the target
+ * scan rate.
+ *
+ * new_pages_to_scan = pages_to_scan * (scan_time / target_scan_time)
+ *
+ * To avoid perturbations it calculates a change factor of previous changes.
+ * A new change factor is calculated for each iteration and it uses an
+ * exponentially weighted moving average. The new pages_to_scan value is
+ * multiplied with that change factor:
+ *
+ * new_pages_to_scan *= change facor
+ *
+ * The new_pages_to_scan value is limited by the cpu min and max values. It
+ * calculates the cpu percent for the last scan and calculates the new
+ * estimated cpu percent cost for the next scan. That value is capped by the
+ * cpu min and max setting.
+ *
+ * In addition the new pages_to_scan value is capped by the max and min
+ * limits.
+ */
+static void scan_time_advisor(void)
+{
+ unsigned int cpu_percent;
+ unsigned long cpu_time;
+ unsigned long cpu_time_diff;
+ unsigned long cpu_time_diff_ms;
+ unsigned long pages;
+ unsigned long per_page_cost;
+ unsigned long factor;
+ unsigned long change;
+ unsigned long last_scan_time;
+ unsigned long scan_time;
+
+ /* Convert scan time to seconds */
+ scan_time = div_s64(ktime_ms_delta(ktime_get(), advisor_ctx.start_scan),
+ MSEC_PER_SEC);
+ scan_time = scan_time ? scan_time : 1;
+
+ /* Calculate CPU consumption of ksmd background thread */
+ cpu_time = task_sched_runtime(current);
+ cpu_time_diff = cpu_time - advisor_ctx.cpu_time;
+ cpu_time_diff_ms = cpu_time_diff / 1000 / 1000;
+
+ cpu_percent = (cpu_time_diff_ms * 100) / (scan_time * 1000);
+ cpu_percent = cpu_percent ? cpu_percent : 1;
+ last_scan_time = prev_scan_time(&advisor_ctx, scan_time);
+
+ /* Calculate scan time as percentage of target scan time */
+ factor = ksm_advisor_target_scan_time * 100 / scan_time;
+ factor = factor ? factor : 1;
+
+ /*
+ * Calculate scan time as percentage of last scan time and use
+ * exponentially weighted average to smooth it
+ */
+ change = scan_time * 100 / last_scan_time;
+ change = change ? change : 1;
+ change = ewma(advisor_ctx.change, change);
+
+ /* Calculate new scan rate based on target scan rate. */
+ pages = ksm_thread_pages_to_scan * 100 / factor;
+ /* Update pages_to_scan by weighted change percentage. */
+ pages = pages * change / 100;
+
+ /* Cap new pages_to_scan value */
+ per_page_cost = ksm_thread_pages_to_scan / cpu_percent;
+ per_page_cost = per_page_cost ? per_page_cost : 1;
+
+ pages = min(pages, per_page_cost * ksm_advisor_max_cpu);
+ pages = max(pages, per_page_cost * KSM_ADVISOR_MIN_CPU);
+ pages = min(pages, ksm_advisor_max_pages_to_scan);
+
+ /* Update advisor context */
+ advisor_ctx.change = change;
+ advisor_ctx.scan_time = scan_time;
+ advisor_ctx.cpu_time = cpu_time;
+
+ ksm_thread_pages_to_scan = pages;
+ trace_ksm_advisor(scan_time, pages, cpu_percent);
+}
+
+static void advisor_stop_scan(void)
+{
+ if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
+ scan_time_advisor();
+}
+
#ifdef CONFIG_NUMA
/* Zeroed when merging across nodes is not allowed */
static unsigned int ksm_merge_across_nodes = 1;
@@ -1099,9 +1269,9 @@ error:
static u32 calc_checksum(struct page *page)
{
u32 checksum;
- void *addr = kmap_atomic(page);
+ void *addr = kmap_local_page(page);
checksum = xxhash(addr, PAGE_SIZE, 0);
- kunmap_atomic(addr);
+ kunmap_local(addr);
return checksum;
}
@@ -1161,8 +1331,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
goto out_unlock;
}
- /* See page_try_share_anon_rmap(): clear PTE first. */
- if (anon_exclusive && page_try_share_anon_rmap(page)) {
+ /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
+ if (anon_exclusive &&
+ folio_try_share_anon_rmap_pte(page_folio(page), page)) {
set_pte_at(mm, pvmw.address, pvmw.pte, entry);
goto out_unlock;
}
@@ -1199,6 +1370,7 @@ out:
static int replace_page(struct vm_area_struct *vma, struct page *page,
struct page *kpage, pte_t orig_pte)
{
+ struct folio *kfolio = page_folio(kpage);
struct mm_struct *mm = vma->vm_mm;
struct folio *folio;
pmd_t *pmd;
@@ -1238,15 +1410,16 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
goto out_mn;
}
VM_BUG_ON_PAGE(PageAnonExclusive(page), page);
- VM_BUG_ON_PAGE(PageAnon(kpage) && PageAnonExclusive(kpage), kpage);
+ VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage),
+ kfolio);
/*
* No need to check ksm_use_zero_pages here: we can only have a
* zero_page here if ksm_use_zero_pages was enabled already.
*/
if (!is_zero_pfn(page_to_pfn(kpage))) {
- get_page(kpage);
- page_add_anon_rmap(kpage, vma, addr, RMAP_NONE);
+ folio_get(kfolio);
+ folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE);
newpte = mk_pte(kpage, vma->vm_page_prot);
} else {
/*
@@ -1277,7 +1450,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
set_pte_at_notify(mm, addr, ptep, newpte);
folio = page_folio(page);
- page_remove_rmap(page, vma, false);
+ folio_remove_rmap_pte(folio, page, vma);
if (!folio_mapped(folio))
folio_free_swap(folio);
folio_put(folio);
@@ -2401,6 +2574,7 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
mm_slot = ksm_scan.mm_slot;
if (mm_slot == &ksm_mm_head) {
+ advisor_start_scan();
trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items);
/*
@@ -2558,6 +2732,8 @@ no_vmas:
if (mm_slot != &ksm_mm_head)
goto next_mm;
+ advisor_stop_scan();
+
trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items);
ksm_scan.seqnr++;
return NULL;
@@ -2604,11 +2780,9 @@ static int ksm_scan_thread(void *nothing)
ksm_do_scan(ksm_thread_pages_to_scan);
mutex_unlock(&ksm_thread_mutex);
- try_to_freeze();
-
if (ksmd_should_run()) {
sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs);
- wait_event_interruptible_timeout(ksm_iter_wait,
+ wait_event_freezable_timeout(ksm_iter_wait,
sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs),
msecs_to_jiffies(sleep_ms));
} else {
@@ -2875,49 +3049,53 @@ void __ksm_exit(struct mm_struct *mm)
trace_ksm_exit(mm);
}
-struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr)
{
- struct folio *folio = page_folio(page);
+ struct page *page = folio_page(folio, 0);
struct anon_vma *anon_vma = folio_anon_vma(folio);
- struct page *new_page;
+ struct folio *new_folio;
- if (PageKsm(page)) {
- if (page_stable_node(page) &&
+ if (folio_test_large(folio))
+ return folio;
+
+ if (folio_test_ksm(folio)) {
+ if (folio_stable_node(folio) &&
!(ksm_run & KSM_RUN_UNMERGE))
- return page; /* no need to copy it */
+ return folio; /* no need to copy it */
} else if (!anon_vma) {
- return page; /* no need to copy it */
- } else if (page->index == linear_page_index(vma, address) &&
+ return folio; /* no need to copy it */
+ } else if (folio->index == linear_page_index(vma, addr) &&
anon_vma->root == vma->anon_vma->root) {
- return page; /* still no need to copy it */
+ return folio; /* still no need to copy it */
}
if (PageHWPoison(page))
return ERR_PTR(-EHWPOISON);
- if (!PageUptodate(page))
- return page; /* let do_swap_page report the error */
-
- new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
- if (new_page &&
- mem_cgroup_charge(page_folio(new_page), vma->vm_mm, GFP_KERNEL)) {
- put_page(new_page);
- new_page = NULL;
- }
- if (new_page) {
- if (copy_mc_user_highpage(new_page, page, address, vma)) {
- put_page(new_page);
- memory_failure_queue(page_to_pfn(page), 0);
+ if (!folio_test_uptodate(folio))
+ return folio; /* let do_swap_page report the error */
+
+ new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
+ if (new_folio &&
+ mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) {
+ folio_put(new_folio);
+ new_folio = NULL;
+ }
+ if (new_folio) {
+ if (copy_mc_user_highpage(folio_page(new_folio, 0), page,
+ addr, vma)) {
+ folio_put(new_folio);
+ memory_failure_queue(folio_pfn(folio), 0);
return ERR_PTR(-EHWPOISON);
}
- SetPageDirty(new_page);
- __SetPageUptodate(new_page);
- __SetPageLocked(new_page);
+ folio_set_dirty(new_folio);
+ __folio_mark_uptodate(new_folio);
+ __folio_set_locked(new_folio);
#ifdef CONFIG_SWAP
count_vm_event(KSM_SWPIN_COPY);
#endif
}
- return new_page;
+ return new_folio;
}
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
@@ -3244,6 +3422,9 @@ static ssize_t pages_to_scan_store(struct kobject *kobj,
unsigned int nr_pages;
int err;
+ if (ksm_advisor != KSM_ADVISOR_NONE)
+ return -EINVAL;
+
err = kstrtouint(buf, 10, &nr_pages);
if (err)
return -EINVAL;
@@ -3563,6 +3744,130 @@ static ssize_t smart_scan_store(struct kobject *kobj,
}
KSM_ATTR(smart_scan);
+static ssize_t advisor_mode_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ const char *output;
+
+ if (ksm_advisor == KSM_ADVISOR_NONE)
+ output = "[none] scan-time";
+ else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
+ output = "none [scan-time]";
+
+ return sysfs_emit(buf, "%s\n", output);
+}
+
+static ssize_t advisor_mode_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf,
+ size_t count)
+{
+ enum ksm_advisor_type curr_advisor = ksm_advisor;
+
+ if (sysfs_streq("scan-time", buf))
+ ksm_advisor = KSM_ADVISOR_SCAN_TIME;
+ else if (sysfs_streq("none", buf))
+ ksm_advisor = KSM_ADVISOR_NONE;
+ else
+ return -EINVAL;
+
+ /* Set advisor default values */
+ if (curr_advisor != ksm_advisor)
+ set_advisor_defaults();
+
+ return count;
+}
+KSM_ATTR(advisor_mode);
+
+static ssize_t advisor_max_cpu_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", ksm_advisor_max_cpu);
+}
+
+static ssize_t advisor_max_cpu_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ unsigned long value;
+
+ err = kstrtoul(buf, 10, &value);
+ if (err)
+ return -EINVAL;
+
+ ksm_advisor_max_cpu = value;
+ return count;
+}
+KSM_ATTR(advisor_max_cpu);
+
+static ssize_t advisor_min_pages_to_scan_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%lu\n", ksm_advisor_min_pages_to_scan);
+}
+
+static ssize_t advisor_min_pages_to_scan_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ unsigned long value;
+
+ err = kstrtoul(buf, 10, &value);
+ if (err)
+ return -EINVAL;
+
+ ksm_advisor_min_pages_to_scan = value;
+ return count;
+}
+KSM_ATTR(advisor_min_pages_to_scan);
+
+static ssize_t advisor_max_pages_to_scan_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%lu\n", ksm_advisor_max_pages_to_scan);
+}
+
+static ssize_t advisor_max_pages_to_scan_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ unsigned long value;
+
+ err = kstrtoul(buf, 10, &value);
+ if (err)
+ return -EINVAL;
+
+ ksm_advisor_max_pages_to_scan = value;
+ return count;
+}
+KSM_ATTR(advisor_max_pages_to_scan);
+
+static ssize_t advisor_target_scan_time_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%lu\n", ksm_advisor_target_scan_time);
+}
+
+static ssize_t advisor_target_scan_time_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ unsigned long value;
+
+ err = kstrtoul(buf, 10, &value);
+ if (err)
+ return -EINVAL;
+ if (value < 1)
+ return -EINVAL;
+
+ ksm_advisor_target_scan_time = value;
+ return count;
+}
+KSM_ATTR(advisor_target_scan_time);
+
static struct attribute *ksm_attrs[] = {
&sleep_millisecs_attr.attr,
&pages_to_scan_attr.attr,
@@ -3585,6 +3890,11 @@ static struct attribute *ksm_attrs[] = {
&use_zero_pages_attr.attr,
&general_profit_attr.attr,
&smart_scan_attr.attr,
+ &advisor_mode_attr.attr,
+ &advisor_max_cpu_attr.attr,
+ &advisor_min_pages_to_scan_attr.attr,
+ &advisor_max_pages_to_scan_attr.attr,
+ &advisor_target_scan_time_attr.attr,
NULL,
};
diff --git a/mm/list_lru.c b/mm/list_lru.c
index a05e5bef3b40..35b0147542a9 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -59,28 +59,6 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
}
return &lru->node[nid].lru;
}
-
-static inline struct list_lru_one *
-list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
- struct mem_cgroup **memcg_ptr)
-{
- struct list_lru_node *nlru = &lru->node[nid];
- struct list_lru_one *l = &nlru->lru;
- struct mem_cgroup *memcg = NULL;
-
- if (!list_lru_memcg_aware(lru))
- goto out;
-
- memcg = mem_cgroup_from_slab_obj(ptr);
- if (!memcg)
- goto out;
-
- l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
-out:
- if (memcg_ptr)
- *memcg_ptr = memcg;
- return l;
-}
#else
static void list_lru_register(struct list_lru *lru)
{
@@ -105,32 +83,21 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
{
return &lru->node[nid].lru;
}
-
-static inline struct list_lru_one *
-list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
- struct mem_cgroup **memcg_ptr)
-{
- if (memcg_ptr)
- *memcg_ptr = NULL;
- return &lru->node[nid].lru;
-}
#endif /* CONFIG_MEMCG_KMEM */
-bool list_lru_add(struct list_lru *lru, struct list_head *item)
+bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg)
{
- int nid = page_to_nid(virt_to_page(item));
struct list_lru_node *nlru = &lru->node[nid];
- struct mem_cgroup *memcg;
struct list_lru_one *l;
spin_lock(&nlru->lock);
if (list_empty(item)) {
- l = list_lru_from_kmem(lru, nid, item, &memcg);
+ l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
list_add_tail(item, &l->list);
/* Set shrinker bit if the first element was added */
if (!l->nr_items++)
- set_shrinker_bit(memcg, nid,
- lru_shrinker_id(lru));
+ set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
nlru->nr_items++;
spin_unlock(&nlru->lock);
return true;
@@ -140,15 +107,25 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
}
EXPORT_SYMBOL_GPL(list_lru_add);
-bool list_lru_del(struct list_lru *lru, struct list_head *item)
+bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
{
int nid = page_to_nid(virt_to_page(item));
+ struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
+ mem_cgroup_from_slab_obj(item) : NULL;
+
+ return list_lru_add(lru, item, nid, memcg);
+}
+EXPORT_SYMBOL_GPL(list_lru_add_obj);
+
+bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg)
+{
struct list_lru_node *nlru = &lru->node[nid];
struct list_lru_one *l;
spin_lock(&nlru->lock);
if (!list_empty(item)) {
- l = list_lru_from_kmem(lru, nid, item, NULL);
+ l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
list_del_init(item);
l->nr_items--;
nlru->nr_items--;
@@ -160,6 +137,16 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
}
EXPORT_SYMBOL_GPL(list_lru_del);
+bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
+{
+ int nid = page_to_nid(virt_to_page(item));
+ struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
+ mem_cgroup_from_slab_obj(item) : NULL;
+
+ return list_lru_del(lru, item, nid, memcg);
+}
+EXPORT_SYMBOL_GPL(list_lru_del_obj);
+
void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
{
list_del_init(item);
@@ -175,6 +162,20 @@ void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
}
EXPORT_SYMBOL_GPL(list_lru_isolate_move);
+void list_lru_putback(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg)
+{
+ struct list_lru_one *list =
+ list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
+
+ if (list_empty(item)) {
+ list_add_tail(item, &list->list);
+ if (!list->nr_items++)
+ set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
+ }
+}
+EXPORT_SYMBOL_GPL(list_lru_putback);
+
unsigned long list_lru_count_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg)
{
diff --git a/mm/madvise.c b/mm/madvise.c
index 6214a1ab5654..912155a94ed5 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -180,7 +180,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
for (addr = start; addr < end; addr += PAGE_SIZE) {
pte_t pte;
swp_entry_t entry;
- struct page *page;
+ struct folio *folio;
if (!ptep++) {
ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
@@ -198,10 +198,10 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
pte_unmap_unlock(ptep, ptl);
ptep = NULL;
- page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
+ folio = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
vma, addr, &splug);
- if (page)
- put_page(page);
+ if (folio)
+ folio_put(folio);
}
if (ptep)
@@ -223,17 +223,17 @@ static void shmem_swapin_range(struct vm_area_struct *vma,
{
XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
pgoff_t end_index = linear_page_index(vma, end) - 1;
- struct page *page;
+ struct folio *folio;
struct swap_iocb *splug = NULL;
rcu_read_lock();
- xas_for_each(&xas, page, end_index) {
+ xas_for_each(&xas, folio, end_index) {
unsigned long addr;
swp_entry_t entry;
- if (!xa_is_value(page))
+ if (!xa_is_value(folio))
continue;
- entry = radix_to_swp_entry(page);
+ entry = radix_to_swp_entry(folio);
/* There might be swapin error entries in shmem mapping. */
if (non_swap_entry(entry))
continue;
@@ -243,10 +243,10 @@ static void shmem_swapin_range(struct vm_area_struct *vma,
xas_pause(&xas);
rcu_read_unlock();
- page = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
+ folio = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
vma, addr, &splug);
- if (page)
- put_page(page);
+ if (folio)
+ folio_put(folio);
rcu_read_lock();
}
diff --git a/mm/memblock.c b/mm/memblock.c
index 5a88d6d24d79..8c194d8afeec 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -735,6 +735,40 @@ int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
}
/**
+ * memblock_validate_numa_coverage - check if amount of memory with
+ * no node ID assigned is less than a threshold
+ * @threshold_bytes: maximal number of pages that can have unassigned node
+ * ID (in bytes).
+ *
+ * A buggy firmware may report memory that does not belong to any node.
+ * Check if amount of such memory is below @threshold_bytes.
+ *
+ * Return: true on success, false on failure.
+ */
+bool __init_memblock memblock_validate_numa_coverage(unsigned long threshold_bytes)
+{
+ unsigned long nr_pages = 0;
+ unsigned long start_pfn, end_pfn, mem_size_mb;
+ int nid, i;
+
+ /* calculate lose page */
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
+ if (nid == NUMA_NO_NODE)
+ nr_pages += end_pfn - start_pfn;
+ }
+
+ if ((nr_pages << PAGE_SHIFT) >= threshold_bytes) {
+ mem_size_mb = memblock_phys_mem_size() >> 20;
+ pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n",
+ (nr_pages << PAGE_SHIFT) >> 20, mem_size_mb);
+ return false;
+ }
+
+ return true;
+}
+
+
+/**
* memblock_isolate_range - isolate given range into disjoint memblocks
* @type: memblock type to isolate range for
* @base: base of range to isolate
@@ -2079,12 +2113,13 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
* Free the pages in the largest chunks alignment allows.
*
* __ffs() behaviour is undefined for 0. start == 0 is
- * MAX_ORDER-aligned, set order to MAX_ORDER for the case.
+ * MAX_PAGE_ORDER-aligned, set order to MAX_PAGE_ORDER for
+ * the case.
*/
if (start)
- order = min_t(int, MAX_ORDER, __ffs(start));
+ order = min_t(int, MAX_PAGE_ORDER, __ffs(start));
else
- order = MAX_ORDER;
+ order = MAX_PAGE_ORDER;
while (start + (1UL << order) > end)
order--;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d58ec11317c7..e4c8735e7c85 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -574,116 +574,6 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
return mz;
}
-/*
- * memcg and lruvec stats flushing
- *
- * Many codepaths leading to stats update or read are performance sensitive and
- * adding stats flushing in such codepaths is not desirable. So, to optimize the
- * flushing the kernel does:
- *
- * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
- * rstat update tree grow unbounded.
- *
- * 2) Flush the stats synchronously on reader side only when there are more than
- * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
- * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
- * only for 2 seconds due to (1).
- */
-static void flush_memcg_stats_dwork(struct work_struct *w);
-static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
-static DEFINE_PER_CPU(unsigned int, stats_updates);
-static atomic_t stats_flush_ongoing = ATOMIC_INIT(0);
-static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
-static u64 flush_next_time;
-
-#define FLUSH_TIME (2UL*HZ)
-
-/*
- * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
- * not rely on this as part of an acquired spinlock_t lock. These functions are
- * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
- * is sufficient.
- */
-static void memcg_stats_lock(void)
-{
- preempt_disable_nested();
- VM_WARN_ON_IRQS_ENABLED();
-}
-
-static void __memcg_stats_lock(void)
-{
- preempt_disable_nested();
-}
-
-static void memcg_stats_unlock(void)
-{
- preempt_enable_nested();
-}
-
-static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
-{
- unsigned int x;
-
- if (!val)
- return;
-
- cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
-
- x = __this_cpu_add_return(stats_updates, abs(val));
- if (x > MEMCG_CHARGE_BATCH) {
- /*
- * If stats_flush_threshold exceeds the threshold
- * (>num_online_cpus()), cgroup stats update will be triggered
- * in __mem_cgroup_flush_stats(). Increasing this var further
- * is redundant and simply adds overhead in atomic update.
- */
- if (atomic_read(&stats_flush_threshold) <= num_online_cpus())
- atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
- __this_cpu_write(stats_updates, 0);
- }
-}
-
-static void do_flush_stats(void)
-{
- /*
- * We always flush the entire tree, so concurrent flushers can just
- * skip. This avoids a thundering herd problem on the rstat global lock
- * from memcg flushers (e.g. reclaim, refault, etc).
- */
- if (atomic_read(&stats_flush_ongoing) ||
- atomic_xchg(&stats_flush_ongoing, 1))
- return;
-
- WRITE_ONCE(flush_next_time, jiffies_64 + 2*FLUSH_TIME);
-
- cgroup_rstat_flush(root_mem_cgroup->css.cgroup);
-
- atomic_set(&stats_flush_threshold, 0);
- atomic_set(&stats_flush_ongoing, 0);
-}
-
-void mem_cgroup_flush_stats(void)
-{
- if (atomic_read(&stats_flush_threshold) > num_online_cpus())
- do_flush_stats();
-}
-
-void mem_cgroup_flush_stats_ratelimited(void)
-{
- if (time_after64(jiffies_64, READ_ONCE(flush_next_time)))
- mem_cgroup_flush_stats();
-}
-
-static void flush_memcg_stats_dwork(struct work_struct *w)
-{
- /*
- * Always flush here so that flushing in latency-sensitive paths is
- * as cheap as possible.
- */
- do_flush_stats();
- queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
-}
-
/* Subset of vm_event_item to report for memcg event stats */
static const unsigned int memcg_vm_event_stat[] = {
PGPGIN,
@@ -704,6 +594,7 @@ static const unsigned int memcg_vm_event_stat[] = {
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
ZSWPIN,
ZSWPOUT,
+ ZSWPWB,
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
THP_FAULT_ALLOC,
@@ -741,6 +632,9 @@ struct memcg_vmstats_percpu {
/* Cgroup1: threshold notifications & softlimit tree updates */
unsigned long nr_page_events;
unsigned long targets[MEM_CGROUP_NTARGETS];
+
+ /* Stats updates since the last flush */
+ unsigned int stats_updates;
};
struct memcg_vmstats {
@@ -755,8 +649,134 @@ struct memcg_vmstats {
/* Pending child counts during tree propagation */
long state_pending[MEMCG_NR_STAT];
unsigned long events_pending[NR_MEMCG_EVENTS];
+
+ /* Stats updates since the last flush */
+ atomic64_t stats_updates;
};
+/*
+ * memcg and lruvec stats flushing
+ *
+ * Many codepaths leading to stats update or read are performance sensitive and
+ * adding stats flushing in such codepaths is not desirable. So, to optimize the
+ * flushing the kernel does:
+ *
+ * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
+ * rstat update tree grow unbounded.
+ *
+ * 2) Flush the stats synchronously on reader side only when there are more than
+ * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
+ * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
+ * only for 2 seconds due to (1).
+ */
+static void flush_memcg_stats_dwork(struct work_struct *w);
+static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
+static u64 flush_last_time;
+
+#define FLUSH_TIME (2UL*HZ)
+
+/*
+ * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
+ * not rely on this as part of an acquired spinlock_t lock. These functions are
+ * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
+ * is sufficient.
+ */
+static void memcg_stats_lock(void)
+{
+ preempt_disable_nested();
+ VM_WARN_ON_IRQS_ENABLED();
+}
+
+static void __memcg_stats_lock(void)
+{
+ preempt_disable_nested();
+}
+
+static void memcg_stats_unlock(void)
+{
+ preempt_enable_nested();
+}
+
+
+static bool memcg_should_flush_stats(struct mem_cgroup *memcg)
+{
+ return atomic64_read(&memcg->vmstats->stats_updates) >
+ MEMCG_CHARGE_BATCH * num_online_cpus();
+}
+
+static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
+{
+ int cpu = smp_processor_id();
+ unsigned int x;
+
+ if (!val)
+ return;
+
+ cgroup_rstat_updated(memcg->css.cgroup, cpu);
+
+ for (; memcg; memcg = parent_mem_cgroup(memcg)) {
+ x = __this_cpu_add_return(memcg->vmstats_percpu->stats_updates,
+ abs(val));
+
+ if (x < MEMCG_CHARGE_BATCH)
+ continue;
+
+ /*
+ * If @memcg is already flush-able, increasing stats_updates is
+ * redundant. Avoid the overhead of the atomic update.
+ */
+ if (!memcg_should_flush_stats(memcg))
+ atomic64_add(x, &memcg->vmstats->stats_updates);
+ __this_cpu_write(memcg->vmstats_percpu->stats_updates, 0);
+ }
+}
+
+static void do_flush_stats(struct mem_cgroup *memcg)
+{
+ if (mem_cgroup_is_root(memcg))
+ WRITE_ONCE(flush_last_time, jiffies_64);
+
+ cgroup_rstat_flush(memcg->css.cgroup);
+}
+
+/*
+ * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
+ * @memcg: root of the subtree to flush
+ *
+ * Flushing is serialized by the underlying global rstat lock. There is also a
+ * minimum amount of work to be done even if there are no stat updates to flush.
+ * Hence, we only flush the stats if the updates delta exceeds a threshold. This
+ * avoids unnecessary work and contention on the underlying lock.
+ */
+void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
+{
+ if (mem_cgroup_disabled())
+ return;
+
+ if (!memcg)
+ memcg = root_mem_cgroup;
+
+ if (memcg_should_flush_stats(memcg))
+ do_flush_stats(memcg);
+}
+
+void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
+{
+ /* Only flush if the periodic flusher is one full cycle late */
+ if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
+ mem_cgroup_flush_stats(memcg);
+}
+
+static void flush_memcg_stats_dwork(struct work_struct *w)
+{
+ /*
+ * Deliberately ignore memcg_should_flush_stats() here so that flushing
+ * in latency-sensitive paths is as cheap as possible.
+ */
+ do_flush_stats(root_mem_cgroup);
+ queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
+}
+
unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
{
long x = READ_ONCE(memcg->vmstats->state[idx]);
@@ -871,16 +891,15 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
__mod_memcg_lruvec_state(lruvec, idx, val);
}
-void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
+void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
int val)
{
- struct page *head = compound_head(page); /* rmap on tail pages */
struct mem_cgroup *memcg;
- pg_data_t *pgdat = page_pgdat(page);
+ pg_data_t *pgdat = folio_pgdat(folio);
struct lruvec *lruvec;
rcu_read_lock();
- memcg = page_memcg(head);
+ memcg = folio_memcg(folio);
/* Untracked pages have no memcg, no lruvec. Update only the node */
if (!memcg) {
rcu_read_unlock();
@@ -892,7 +911,7 @@ void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
__mod_lruvec_state(lruvec, idx, val);
rcu_read_unlock();
}
-EXPORT_SYMBOL(__mod_lruvec_page_state);
+EXPORT_SYMBOL(__lruvec_stat_mod_folio);
void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
{
@@ -1628,7 +1647,7 @@ static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
*
* Current memory state:
*/
- mem_cgroup_flush_stats();
+ mem_cgroup_flush_stats(memcg);
for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
u64 size;
@@ -4178,7 +4197,7 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
int nid;
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
- mem_cgroup_flush_stats();
+ mem_cgroup_flush_stats(memcg);
for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
seq_printf(m, "%s=%lu", stat->name,
@@ -4259,7 +4278,7 @@ static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
- mem_cgroup_flush_stats();
+ mem_cgroup_flush_stats(memcg);
for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
unsigned long nr;
@@ -4755,7 +4774,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
struct mem_cgroup *parent;
- mem_cgroup_flush_stats();
+ mem_cgroup_flush_stats(memcg);
*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
@@ -5518,6 +5537,8 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
memcg->zswap_max = PAGE_COUNTER_MAX;
+ WRITE_ONCE(memcg->zswap_writeback,
+ !parent || READ_ONCE(parent->zswap_writeback));
#endif
page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
if (parent) {
@@ -5614,6 +5635,8 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
page_counter_set_min(&memcg->memory, 0);
page_counter_set_low(&memcg->memory, 0);
+ zswap_memcg_offline_cleanup(memcg);
+
memcg_offline_kmem(memcg);
reparent_shrinker_deferred(memcg);
wb_memcg_offline(memcg);
@@ -5784,6 +5807,10 @@ static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
}
}
}
+ statc->stats_updates = 0;
+ /* We are in a per-cpu loop here, only do the atomic write once */
+ if (atomic64_read(&memcg->vmstats->stats_updates))
+ atomic64_set(&memcg->vmstats->stats_updates, 0);
}
#ifdef CONFIG_MMU
@@ -6783,6 +6810,10 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
return nbytes;
}
+/*
+ * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
+ * if any new events become available.
+ */
static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
{
seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
@@ -6839,7 +6870,7 @@ static int memory_numa_stat_show(struct seq_file *m, void *v)
int i;
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
- mem_cgroup_flush_stats();
+ mem_cgroup_flush_stats(memcg);
for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
int nid;
@@ -8081,7 +8112,11 @@ bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
break;
}
- cgroup_rstat_flush(memcg->css.cgroup);
+ /*
+ * mem_cgroup_flush_stats() ignores small changes. Use
+ * do_flush_stats() directly to get accurate stats for charging.
+ */
+ do_flush_stats(memcg);
pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
if (pages < max)
continue;
@@ -8143,11 +8178,19 @@ void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
rcu_read_unlock();
}
+bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
+{
+ /* if zswap is disabled, do not block pages going to the swapping device */
+ return !is_zswap_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback);
+}
+
static u64 zswap_current_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
- cgroup_rstat_flush(css->cgroup);
- return memcg_page_state(mem_cgroup_from_css(css), MEMCG_ZSWAP_B);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+
+ mem_cgroup_flush_stats(memcg);
+ return memcg_page_state(memcg, MEMCG_ZSWAP_B);
}
static int zswap_max_show(struct seq_file *m, void *v)
@@ -8173,6 +8216,31 @@ static ssize_t zswap_max_write(struct kernfs_open_file *of,
return nbytes;
}
+static int zswap_writeback_show(struct seq_file *m, void *v)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
+
+ seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
+ return 0;
+}
+
+static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+ int zswap_writeback;
+ ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
+
+ if (parse_ret)
+ return parse_ret;
+
+ if (zswap_writeback != 0 && zswap_writeback != 1)
+ return -EINVAL;
+
+ WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
+ return nbytes;
+}
+
static struct cftype zswap_files[] = {
{
.name = "zswap.current",
@@ -8185,6 +8253,11 @@ static struct cftype zswap_files[] = {
.seq_show = zswap_max_show,
.write = zswap_max_write,
},
+ {
+ .name = "zswap.writeback",
+ .seq_show = zswap_writeback_show,
+ .write = zswap_writeback_write,
+ },
{ } /* terminate */
};
#endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 455093f73a70..a0d9b4ac7d54 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -901,39 +901,38 @@ static const char * const action_page_types[] = {
* The page count will stop it from being freed by unpoison.
* Stress tests should be aware of this memory leak problem.
*/
-static int delete_from_lru_cache(struct page *p)
+static int delete_from_lru_cache(struct folio *folio)
{
- if (isolate_lru_page(p)) {
+ if (folio_isolate_lru(folio)) {
/*
* Clear sensible page flags, so that the buddy system won't
- * complain when the page is unpoison-and-freed.
+ * complain when the folio is unpoison-and-freed.
*/
- ClearPageActive(p);
- ClearPageUnevictable(p);
+ folio_clear_active(folio);
+ folio_clear_unevictable(folio);
/*
* Poisoned page might never drop its ref count to 0 so we have
* to uncharge it manually from its memcg.
*/
- mem_cgroup_uncharge(page_folio(p));
+ mem_cgroup_uncharge(folio);
/*
- * drop the page count elevated by isolate_lru_page()
+ * drop the refcount elevated by folio_isolate_lru()
*/
- put_page(p);
+ folio_put(folio);
return 0;
}
return -EIO;
}
-static int truncate_error_page(struct page *p, unsigned long pfn,
+static int truncate_error_folio(struct folio *folio, unsigned long pfn,
struct address_space *mapping)
{
int ret = MF_FAILED;
- if (mapping->a_ops->error_remove_page) {
- struct folio *folio = page_folio(p);
- int err = mapping->a_ops->error_remove_page(mapping, p);
+ if (mapping->a_ops->error_remove_folio) {
+ int err = mapping->a_ops->error_remove_folio(mapping, folio);
if (err != 0)
pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
@@ -946,7 +945,7 @@ static int truncate_error_page(struct page *p, unsigned long pfn,
* If the file system doesn't support it just invalidate
* This fails on dirty or anything with private pages
*/
- if (invalidate_inode_page(p))
+ if (mapping_evict_folio(mapping, folio))
ret = MF_RECOVERED;
else
pr_info("%#lx: Failed to invalidate\n", pfn);
@@ -1013,17 +1012,18 @@ static int me_unknown(struct page_state *ps, struct page *p)
*/
static int me_pagecache_clean(struct page_state *ps, struct page *p)
{
+ struct folio *folio = page_folio(p);
int ret;
struct address_space *mapping;
bool extra_pins;
- delete_from_lru_cache(p);
+ delete_from_lru_cache(folio);
/*
- * For anonymous pages we're done the only reference left
+ * For anonymous folios the only reference left
* should be the one m_f() holds.
*/
- if (PageAnon(p)) {
+ if (folio_test_anon(folio)) {
ret = MF_RECOVERED;
goto out;
}
@@ -1035,11 +1035,9 @@ static int me_pagecache_clean(struct page_state *ps, struct page *p)
* has a reference, because it could be file system metadata
* and that's not safe to truncate.
*/
- mapping = page_mapping(p);
+ mapping = folio_mapping(folio);
if (!mapping) {
- /*
- * Page has been teared down in the meanwhile
- */
+ /* Folio has been torn down in the meantime */
ret = MF_FAILED;
goto out;
}
@@ -1055,12 +1053,12 @@ static int me_pagecache_clean(struct page_state *ps, struct page *p)
*
* Open: to take i_rwsem or not for this? Right now we don't.
*/
- ret = truncate_error_page(p, page_to_pfn(p), mapping);
+ ret = truncate_error_folio(folio, page_to_pfn(p), mapping);
if (has_extra_refcount(ps, p, extra_pins))
ret = MF_FAILED;
out:
- unlock_page(p);
+ folio_unlock(folio);
return ret;
}
@@ -1138,15 +1136,16 @@ static int me_pagecache_dirty(struct page_state *ps, struct page *p)
*/
static int me_swapcache_dirty(struct page_state *ps, struct page *p)
{
+ struct folio *folio = page_folio(p);
int ret;
bool extra_pins = false;
- ClearPageDirty(p);
+ folio_clear_dirty(folio);
/* Trigger EIO in shmem: */
- ClearPageUptodate(p);
+ folio_clear_uptodate(folio);
- ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED;
- unlock_page(p);
+ ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_DELAYED;
+ folio_unlock(folio);
if (ret == MF_DELAYED)
extra_pins = true;
@@ -1164,7 +1163,7 @@ static int me_swapcache_clean(struct page_state *ps, struct page *p)
delete_from_swap_cache(folio);
- ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
+ ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_RECOVERED;
folio_unlock(folio);
if (has_extra_refcount(ps, p, false))
@@ -1181,25 +1180,25 @@ static int me_swapcache_clean(struct page_state *ps, struct page *p)
*/
static int me_huge_page(struct page_state *ps, struct page *p)
{
+ struct folio *folio = page_folio(p);
int res;
- struct page *hpage = compound_head(p);
struct address_space *mapping;
bool extra_pins = false;
- mapping = page_mapping(hpage);
+ mapping = folio_mapping(folio);
if (mapping) {
- res = truncate_error_page(hpage, page_to_pfn(p), mapping);
+ res = truncate_error_folio(folio, page_to_pfn(p), mapping);
/* The page is kept in page cache. */
extra_pins = true;
- unlock_page(hpage);
+ folio_unlock(folio);
} else {
- unlock_page(hpage);
+ folio_unlock(folio);
/*
* migration entry prevents later access on error hugepage,
* so we can free and dissolve it into buddy to save healthy
* subpages.
*/
- put_page(hpage);
+ folio_put(folio);
if (__page_handle_poison(p) >= 0) {
page_ref_inc(p);
res = MF_RECOVERED;
@@ -2316,8 +2315,8 @@ try_again:
* We use page flags to determine what action should be taken, but
* the flags can be modified by the error containment action. One
* example is an mlocked page, where PG_mlocked is cleared by
- * page_remove_rmap() in try_to_unmap_one(). So to determine page status
- * correctly, we save a copy of the page flags at this time.
+ * folio_remove_rmap_*() in try_to_unmap_one(). So to determine page
+ * status correctly, we save a copy of the page flags at this time.
*/
page_flags = p->flags;
@@ -2601,37 +2600,37 @@ unlock_mutex:
}
EXPORT_SYMBOL(unpoison_memory);
-static bool isolate_page(struct page *page, struct list_head *pagelist)
+static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist)
{
bool isolated = false;
- if (PageHuge(page)) {
- isolated = isolate_hugetlb(page_folio(page), pagelist);
+ if (folio_test_hugetlb(folio)) {
+ isolated = isolate_hugetlb(folio, pagelist);
} else {
- bool lru = !__PageMovable(page);
+ bool lru = !__folio_test_movable(folio);
if (lru)
- isolated = isolate_lru_page(page);
+ isolated = folio_isolate_lru(folio);
else
- isolated = isolate_movable_page(page,
+ isolated = isolate_movable_page(&folio->page,
ISOLATE_UNEVICTABLE);
if (isolated) {
- list_add(&page->lru, pagelist);
+ list_add(&folio->lru, pagelist);
if (lru)
- inc_node_page_state(page, NR_ISOLATED_ANON +
- page_is_file_lru(page));
+ node_stat_add_folio(folio, NR_ISOLATED_ANON +
+ folio_is_file_lru(folio));
}
}
/*
- * If we succeed to isolate the page, we grabbed another refcount on
- * the page, so we can safely drop the one we got from get_any_page().
- * If we failed to isolate the page, it means that we cannot go further
+ * If we succeed to isolate the folio, we grabbed another refcount on
+ * the folio, so we can safely drop the one we got from get_any_page().
+ * If we failed to isolate the folio, it means that we cannot go further
* and we will return an error, so drop the reference we got from
* get_any_page() as well.
*/
- put_page(page);
+ folio_put(folio);
return isolated;
}
@@ -2644,40 +2643,40 @@ static int soft_offline_in_use_page(struct page *page)
{
long ret = 0;
unsigned long pfn = page_to_pfn(page);
- struct page *hpage = compound_head(page);
+ struct folio *folio = page_folio(page);
char const *msg_page[] = {"page", "hugepage"};
- bool huge = PageHuge(page);
+ bool huge = folio_test_hugetlb(folio);
LIST_HEAD(pagelist);
struct migration_target_control mtc = {
.nid = NUMA_NO_NODE,
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
};
- if (!huge && PageTransHuge(hpage)) {
+ if (!huge && folio_test_large(folio)) {
if (try_to_split_thp_page(page)) {
pr_info("soft offline: %#lx: thp split failed\n", pfn);
return -EBUSY;
}
- hpage = page;
+ folio = page_folio(page);
}
- lock_page(page);
+ folio_lock(folio);
if (!huge)
- wait_on_page_writeback(page);
+ folio_wait_writeback(folio);
if (PageHWPoison(page)) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
pr_info("soft offline: %#lx page already poisoned\n", pfn);
return 0;
}
- if (!huge && PageLRU(page) && !PageSwapCache(page))
+ if (!huge && folio_test_lru(folio) && !folio_test_swapcache(folio))
/*
* Try to invalidate first. This should work for
* non dirty unmapped page cache pages.
*/
- ret = invalidate_inode_page(page);
- unlock_page(page);
+ ret = mapping_evict_folio(folio_mapping(folio), folio);
+ folio_unlock(folio);
if (ret) {
pr_info("soft_offline: %#lx: invalidated\n", pfn);
@@ -2685,7 +2684,7 @@ static int soft_offline_in_use_page(struct page *page)
return 0;
}
- if (isolate_page(hpage, &pagelist)) {
+ if (mf_isolate_folio(folio, &pagelist)) {
ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
if (!ret) {
diff --git a/mm/memory.c b/mm/memory.c
index 6e0712d06cd4..7e1f4849463a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -123,9 +123,7 @@ static bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
/*
* A number of key systems in x86 including ioremap() rely on the assumption
* that high_memory defines the upper bound on direct map memory, then end
- * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
- * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
- * and ZONE_HIGHMEM.
+ * of ZONE_NORMAL.
*/
void *high_memory;
EXPORT_SYMBOL(high_memory);
@@ -374,6 +372,8 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
* be 0. This will underflow and is okay.
*/
next = mas_find(mas, ceiling - 1);
+ if (unlikely(xa_is_zero(next)))
+ next = NULL;
/*
* Hide vma from rmap and truncate_pagecache before freeing
@@ -395,6 +395,8 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
&& !is_vm_hugetlb_page(next)) {
vma = next;
next = mas_find(mas, ceiling - 1);
+ if (unlikely(xa_is_zero(next)))
+ next = NULL;
if (mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma);
@@ -706,6 +708,7 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
struct page *page, unsigned long address,
pte_t *ptep)
{
+ struct folio *folio = page_folio(page);
pte_t orig_pte;
pte_t pte;
swp_entry_t entry;
@@ -721,14 +724,15 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
else if (is_writable_device_exclusive_entry(entry))
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
- VM_BUG_ON(pte_write(pte) && !(PageAnon(page) && PageAnonExclusive(page)));
+ VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) &&
+ PageAnonExclusive(page)), folio);
/*
* No need to take a page reference as one was already
* created when the swap entry was made.
*/
- if (PageAnon(page))
- page_add_anon_rmap(page, vma, address, RMAP_NONE);
+ if (folio_test_anon(folio))
+ folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE);
else
/*
* Currently device exclusive access only supports anonymous
@@ -779,6 +783,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
unsigned long vm_flags = dst_vma->vm_flags;
pte_t orig_pte = ptep_get(src_pte);
pte_t pte = orig_pte;
+ struct folio *folio;
struct page *page;
swp_entry_t entry = pte_to_swp_entry(orig_pte);
@@ -823,6 +828,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
}
} else if (is_device_private_entry(entry)) {
page = pfn_swap_entry_to_page(entry);
+ folio = page_folio(page);
/*
* Update rss count even for unaddressable pages, as
@@ -833,10 +839,10 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* for unaddressable pages, at some point. But for now
* keep things as they are.
*/
- get_page(page);
+ folio_get(folio);
rss[mm_counter(page)]++;
/* Cannot fail as these pages cannot get pinned. */
- BUG_ON(page_try_dup_anon_rmap(page, false, src_vma));
+ folio_try_dup_anon_rmap_pte(folio, page, src_vma);
/*
* We do not preserve soft-dirty information, because so
@@ -950,7 +956,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
* future.
*/
folio_get(folio);
- if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) {
+ if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) {
/* Page may be pinned, we have to copy. */
folio_put(folio);
return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
@@ -959,7 +965,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
rss[MM_ANONPAGES]++;
} else if (page) {
folio_get(folio);
- page_dup_file_rmap(page, false);
+ folio_dup_file_rmap_pte(folio, page);
rss[mm_counter_file(page)]++;
}
@@ -988,12 +994,17 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
return 0;
}
-static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm,
- struct vm_area_struct *vma, unsigned long addr)
+static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
+ struct vm_area_struct *vma, unsigned long addr, bool need_zero)
{
struct folio *new_folio;
- new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
+ if (need_zero)
+ new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
+ else
+ new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
+ addr, false);
+
if (!new_folio)
return NULL;
@@ -1125,7 +1136,7 @@ again:
} else if (ret == -EBUSY) {
goto out;
} else if (ret == -EAGAIN) {
- prealloc = page_copy_prealloc(src_mm, src_vma, addr);
+ prealloc = folio_prealloc(src_mm, src_vma, addr, false);
if (!prealloc)
return -ENOMEM;
} else if (ret) {
@@ -1423,6 +1434,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
arch_enter_lazy_mmu_mode();
do {
pte_t ptent = ptep_get(pte);
+ struct folio *folio;
struct page *page;
if (pte_none(ptent))
@@ -1448,21 +1460,22 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
continue;
}
+ folio = page_folio(page);
delay_rmap = 0;
- if (!PageAnon(page)) {
+ if (!folio_test_anon(folio)) {
if (pte_dirty(ptent)) {
- set_page_dirty(page);
+ folio_set_dirty(folio);
if (tlb_delay_rmap(tlb)) {
delay_rmap = 1;
force_flush = 1;
}
}
if (pte_young(ptent) && likely(vma_has_recency(vma)))
- mark_page_accessed(page);
+ folio_mark_accessed(folio);
}
rss[mm_counter(page)]--;
if (!delay_rmap) {
- page_remove_rmap(page, vma, false);
+ folio_remove_rmap_pte(folio, page, vma);
if (unlikely(page_mapcount(page) < 0))
print_bad_pte(vma, addr, ptent, page);
}
@@ -1478,6 +1491,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
if (is_device_private_entry(entry) ||
is_device_exclusive_entry(entry)) {
page = pfn_swap_entry_to_page(entry);
+ folio = page_folio(page);
if (unlikely(!should_zap_page(details, page)))
continue;
/*
@@ -1489,8 +1503,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
WARN_ON_ONCE(!vma_is_anonymous(vma));
rss[mm_counter(page)]--;
if (is_device_private_entry(entry))
- page_remove_rmap(page, vma, false);
- put_page(page);
+ folio_remove_rmap_pte(folio, page, vma);
+ folio_put(folio);
} else if (!non_swap_entry(entry)) {
/* Genuine swap entry, hence a private anon page */
if (!should_zap_cows(details))
@@ -1744,7 +1758,8 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
unmap_single_vma(tlb, vma, start, end, &details,
mm_wr_locked);
hugetlb_zap_end(vma, &details);
- } while ((vma = mas_find(mas, tree_end - 1)) != NULL);
+ vma = mas_find(mas, tree_end - 1);
+ } while (vma && likely(!xa_is_zero(vma)));
mmu_notifier_invalidate_range_end(&range);
}
@@ -1837,21 +1852,26 @@ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
static int validate_page_before_insert(struct page *page)
{
- if (PageAnon(page) || PageSlab(page) || page_has_type(page))
+ struct folio *folio = page_folio(page);
+
+ if (folio_test_anon(folio) || folio_test_slab(folio) ||
+ page_has_type(page))
return -EINVAL;
- flush_dcache_page(page);
+ flush_dcache_folio(folio);
return 0;
}
static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
unsigned long addr, struct page *page, pgprot_t prot)
{
+ struct folio *folio = page_folio(page);
+
if (!pte_none(ptep_get(pte)))
return -EBUSY;
/* Ok, finally just insert the thing.. */
- get_page(page);
+ folio_get(folio);
inc_mm_counter(vma->vm_mm, mm_counter_file(page));
- page_add_file_rmap(page, vma, false);
+ folio_add_file_rmap_pte(folio, page, vma);
set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot));
return 0;
}
@@ -2836,7 +2856,8 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src,
* just copying from the original user address. If that
* fails, we just zero-fill it. Live with it.
*/
- kaddr = kmap_atomic(dst);
+ kaddr = kmap_local_page(dst);
+ pagefault_disable();
uaddr = (void __user *)(addr & PAGE_MASK);
/*
@@ -2904,7 +2925,8 @@ warn:
pte_unlock:
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
- kunmap_atomic(kaddr);
+ pagefault_enable();
+ kunmap_local(kaddr);
flush_dcache_page(dst);
return ret;
@@ -3102,6 +3124,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
int page_copied = 0;
struct mmu_notifier_range range;
vm_fault_t ret;
+ bool pfn_is_zero;
delayacct_wpcopy_start();
@@ -3111,16 +3134,13 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
if (unlikely(ret))
goto out;
- if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
- new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
- if (!new_folio)
- goto oom;
- } else {
+ pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte));
+ new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero);
+ if (!new_folio)
+ goto oom;
+
+ if (!pfn_is_zero) {
int err;
- new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
- vmf->address, false);
- if (!new_folio)
- goto oom;
err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
if (err) {
@@ -3141,10 +3161,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
kmsan_copy_page_meta(&new_folio->page, vmf->page);
}
- if (mem_cgroup_charge(new_folio, mm, GFP_KERNEL))
- goto oom_free_new;
- folio_throttle_swaprate(new_folio, GFP_KERNEL);
-
__folio_mark_uptodate(new_folio);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
@@ -3207,10 +3223,10 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* threads.
*
* The critical issue is to order this
- * page_remove_rmap with the ptp_clear_flush above.
- * Those stores are ordered by (if nothing else,)
+ * folio_remove_rmap_pte() with the ptp_clear_flush
+ * above. Those stores are ordered by (if nothing else,)
* the barrier present in the atomic_add_negative
- * in page_remove_rmap.
+ * in folio_remove_rmap_pte();
*
* Then the TLB flush in ptep_clear_flush ensures that
* no process can access the old page before the
@@ -3219,7 +3235,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* mapcount is visible. So transitively, TLBs to
* old page will be flushed before it can be reused.
*/
- page_remove_rmap(vmf->page, vma, false);
+ folio_remove_rmap_pte(old_folio, vmf->page, vma);
}
/* Free the old page.. */
@@ -3243,8 +3259,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
delayacct_wpcopy_end();
return 0;
-oom_free_new:
- folio_put(new_folio);
oom:
ret = VM_FAULT_OOM;
out:
@@ -3875,9 +3889,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
folio_add_lru(folio);
- /* To provide entry to swap_readpage() */
+ /* To provide entry to swap_read_folio() */
folio->swap = entry;
- swap_readpage(page, true, NULL);
+ swap_read_folio(folio, true, NULL);
folio->private = NULL;
}
} else {
@@ -3935,15 +3949,18 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* page->index of !PageKSM() pages would be nonlinear inside the
* anon VMA -- PageKSM() is lost on actual swapout.
*/
- page = ksm_might_need_to_copy(page, vma, vmf->address);
- if (unlikely(!page)) {
+ folio = ksm_might_need_to_copy(folio, vma, vmf->address);
+ if (unlikely(!folio)) {
ret = VM_FAULT_OOM;
+ folio = swapcache;
goto out_page;
- } else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
+ } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
ret = VM_FAULT_HWPOISON;
+ folio = swapcache;
goto out_page;
}
- folio = page_folio(page);
+ if (folio != swapcache)
+ page = folio_page(folio, 0);
/*
* If we want to map a page that's in the swapcache writable, we
@@ -4061,10 +4078,11 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
/* ksm created a completely new copy */
if (unlikely(folio != swapcache && swapcache)) {
- page_add_new_anon_rmap(page, vma, vmf->address);
+ folio_add_new_anon_rmap(folio, vma, vmf->address);
folio_add_lru_vma(folio, vma);
} else {
- page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
+ folio_add_anon_rmap_pte(folio, page, vma, vmf->address,
+ rmap_flags);
}
VM_BUG_ON(!folio_test_anon(folio) ||
@@ -4118,6 +4136,84 @@ out_release:
return ret;
}
+static bool pte_range_none(pte_t *pte, int nr_pages)
+{
+ int i;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (!pte_none(ptep_get_lockless(pte + i)))
+ return false;
+ }
+
+ return true;
+}
+
+static struct folio *alloc_anon_folio(struct vm_fault *vmf)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct vm_area_struct *vma = vmf->vma;
+ unsigned long orders;
+ struct folio *folio;
+ unsigned long addr;
+ pte_t *pte;
+ gfp_t gfp;
+ int order;
+
+ /*
+ * If uffd is active for the vma we need per-page fault fidelity to
+ * maintain the uffd semantics.
+ */
+ if (unlikely(userfaultfd_armed(vma)))
+ goto fallback;
+
+ /*
+ * Get a list of all the (large) orders below PMD_ORDER that are enabled
+ * for this vma. Then filter out the orders that can't be allocated over
+ * the faulting address and still be fully contained in the vma.
+ */
+ orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
+ BIT(PMD_ORDER) - 1);
+ orders = thp_vma_suitable_orders(vma, vmf->address, orders);
+
+ if (!orders)
+ goto fallback;
+
+ pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK);
+ if (!pte)
+ return ERR_PTR(-EAGAIN);
+
+ /*
+ * Find the highest order where the aligned range is completely
+ * pte_none(). Note that all remaining orders will be completely
+ * pte_none().
+ */
+ order = highest_order(orders);
+ while (orders) {
+ addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
+ if (pte_range_none(pte + pte_index(addr), 1 << order))
+ break;
+ order = next_order(&orders, order);
+ }
+
+ pte_unmap(pte);
+
+ /* Try allocating the highest of the remaining orders. */
+ gfp = vma_thp_gfp_mask(vma);
+ while (orders) {
+ addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
+ folio = vma_alloc_folio(gfp, order, vma, addr, true);
+ if (folio) {
+ clear_huge_page(&folio->page, vmf->address, 1 << order);
+ return folio;
+ }
+ order = next_order(&orders, order);
+ }
+
+fallback:
+#endif
+ return vma_alloc_zeroed_movable_folio(vmf->vma, vmf->address);
+}
+
/*
* We enter with non-exclusive mmap_lock (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
@@ -4127,9 +4223,12 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
{
bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
struct vm_area_struct *vma = vmf->vma;
+ unsigned long addr = vmf->address;
struct folio *folio;
vm_fault_t ret = 0;
+ int nr_pages = 1;
pte_t entry;
+ int i;
/* File mapping without ->vm_ops ? */
if (vma->vm_flags & VM_SHARED)
@@ -4169,10 +4268,16 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
/* Allocate our own private page. */
if (unlikely(anon_vma_prepare(vma)))
goto oom;
- folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
+ /* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */
+ folio = alloc_anon_folio(vmf);
+ if (IS_ERR(folio))
+ return 0;
if (!folio)
goto oom;
+ nr_pages = folio_nr_pages(folio);
+ addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
+
if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
goto oom_free_page;
folio_throttle_swaprate(folio, GFP_KERNEL);
@@ -4189,12 +4294,15 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry), vma);
- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
- &vmf->ptl);
+ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
if (!vmf->pte)
goto release;
- if (vmf_pte_changed(vmf)) {
- update_mmu_tlb(vma, vmf->address, vmf->pte);
+ if (nr_pages == 1 && vmf_pte_changed(vmf)) {
+ update_mmu_tlb(vma, addr, vmf->pte);
+ goto release;
+ } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
+ for (i = 0; i < nr_pages; i++)
+ update_mmu_tlb(vma, addr + PAGE_SIZE * i, vmf->pte + i);
goto release;
}
@@ -4209,16 +4317,17 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
return handle_userfault(vmf, VM_UFFD_MISSING);
}
- inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
- folio_add_new_anon_rmap(folio, vma, vmf->address);
+ folio_ref_add(folio, nr_pages - 1);
+ add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
+ folio_add_new_anon_rmap(folio, vma, addr);
folio_add_lru_vma(folio, vma);
setpte:
if (uffd_wp)
entry = pte_mkuffd_wp(entry);
- set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
+ set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages);
/* No need to invalidate - it was non-present before */
- update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
+ update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages);
unlock:
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -4240,6 +4349,7 @@ oom:
static vm_fault_t __do_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
+ struct folio *folio;
vm_fault_t ret;
/*
@@ -4268,27 +4378,26 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
VM_FAULT_DONE_COW)))
return ret;
+ folio = page_folio(vmf->page);
if (unlikely(PageHWPoison(vmf->page))) {
- struct page *page = vmf->page;
vm_fault_t poisonret = VM_FAULT_HWPOISON;
if (ret & VM_FAULT_LOCKED) {
- if (page_mapped(page))
- unmap_mapping_pages(page_mapping(page),
- page->index, 1, false);
- /* Retry if a clean page was removed from the cache. */
- if (invalidate_inode_page(page))
+ if (page_mapped(vmf->page))
+ unmap_mapping_folio(folio);
+ /* Retry if a clean folio was removed from the cache. */
+ if (mapping_evict_folio(folio->mapping, folio))
poisonret = VM_FAULT_NOPAGE;
- unlock_page(page);
+ folio_unlock(folio);
}
- put_page(page);
+ folio_put(folio);
vmf->page = NULL;
return poisonret;
}
if (unlikely(!(ret & VM_FAULT_LOCKED)))
- lock_page(vmf->page);
+ folio_lock(folio);
else
- VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
+ VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page);
return ret;
}
@@ -4309,17 +4418,17 @@ static void deposit_prealloc_pte(struct vm_fault *vmf)
vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
{
+ struct folio *folio = page_folio(page);
struct vm_area_struct *vma = vmf->vma;
bool write = vmf->flags & FAULT_FLAG_WRITE;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
pmd_t entry;
vm_fault_t ret = VM_FAULT_FALLBACK;
- if (!transhuge_vma_suitable(vma, haddr))
+ if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
return ret;
- page = compound_head(page);
- if (compound_order(page) != HPAGE_PMD_ORDER)
+ if (page != &folio->page || folio_order(folio) != HPAGE_PMD_ORDER)
return ret;
/*
@@ -4328,7 +4437,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
* check. This kind of THP just can be PTE mapped. Access to
* the corrupted subpage should trigger SIGBUS as expected.
*/
- if (unlikely(PageHasHWPoisoned(page)))
+ if (unlikely(folio_test_has_hwpoisoned(folio)))
return ret;
/*
@@ -4352,7 +4461,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
- page_add_file_rmap(page, vma, true);
+ folio_add_file_rmap_pmd(folio, page, vma);
/*
* deposit and withdraw with pmd lock held
@@ -4415,7 +4524,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
folio_add_lru_vma(folio, vma);
} else {
add_mm_counter(vma->vm_mm, mm_counter_file(page), nr);
- folio_add_file_rmap_range(folio, page, nr, vma, false);
+ folio_add_file_rmap_ptes(folio, page, nr, vma);
}
set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
@@ -4641,6 +4750,7 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf)
static vm_fault_t do_cow_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
+ struct folio *folio;
vm_fault_t ret;
ret = vmf_can_call_fault(vmf);
@@ -4649,16 +4759,11 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
if (ret)
return ret;
- vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
- if (!vmf->cow_page)
+ folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false);
+ if (!folio)
return VM_FAULT_OOM;
- if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm,
- GFP_KERNEL)) {
- put_page(vmf->cow_page);
- return VM_FAULT_OOM;
- }
- folio_throttle_swaprate(page_folio(vmf->cow_page), GFP_KERNEL);
+ vmf->cow_page = &folio->page;
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
@@ -4667,7 +4772,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
return ret;
copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
- __SetPageUptodate(vmf->cow_page);
+ __folio_mark_uptodate(folio);
ret |= finish_fault(vmf);
unlock_page(vmf->page);
@@ -4676,7 +4781,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
goto uncharge_out;
return ret;
uncharge_out:
- put_page(vmf->cow_page);
+ folio_put(folio);
return ret;
}
@@ -5113,7 +5218,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
return VM_FAULT_OOM;
retry_pud:
if (pud_none(*vmf.pud) &&
- hugepage_vma_check(vma, vm_flags, false, true, true)) {
+ thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
ret = create_huge_pud(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
@@ -5147,7 +5252,7 @@ retry_pud:
goto retry_pud;
if (pmd_none(*vmf.pmd) &&
- hugepage_vma_check(vma, vm_flags, false, true, true)) {
+ thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
ret = create_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
@@ -5850,7 +5955,7 @@ static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
if (bytes > PAGE_SIZE-offset)
bytes = PAGE_SIZE-offset;
- maddr = kmap(page);
+ maddr = kmap_local_page(page);
if (write) {
copy_to_user_page(vma, page, addr,
maddr + offset, buf, bytes);
@@ -5859,8 +5964,7 @@ static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
copy_from_user_page(vma, page, addr,
buf, maddr + offset, bytes);
}
- kunmap(page);
- put_page(page);
+ unmap_and_put_page(page, maddr);
}
len -= bytes;
buf += bytes;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 7a5fc89a8652..b3c0ff52bb72 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -645,7 +645,7 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
unsigned long pfn;
/*
- * Online the pages in MAX_ORDER aligned chunks. The callback might
+ * Online the pages in MAX_PAGE_ORDER aligned chunks. The callback might
* decide to not expose all pages to the buddy (e.g., expose them
* later). We account all pages as being online and belonging to this
* zone ("present").
@@ -660,12 +660,13 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
* Free to online pages in the largest chunks alignment allows.
*
* __ffs() behaviour is undefined for 0. start == 0 is
- * MAX_ORDER-aligned, Set order to MAX_ORDER for the case.
+ * MAX_PAGE_ORDER-aligned, Set order to MAX_PAGE_ORDER for
+ * the case.
*/
if (pfn)
- order = min_t(int, MAX_ORDER, __ffs(pfn));
+ order = min_t(int, MAX_PAGE_ORDER, __ffs(pfn));
else
- order = MAX_ORDER;
+ order = MAX_PAGE_ORDER;
(*online_page_callback)(pfn_to_page(pfn), order);
pfn += (1UL << order);
@@ -1380,6 +1381,85 @@ static bool mhp_supports_memmap_on_memory(unsigned long size)
return arch_supports_memmap_on_memory(vmemmap_size);
}
+static void __ref remove_memory_blocks_and_altmaps(u64 start, u64 size)
+{
+ unsigned long memblock_size = memory_block_size_bytes();
+ u64 cur_start;
+
+ /*
+ * For memmap_on_memory, the altmaps were added on a per-memblock
+ * basis; we have to process each individual memory block.
+ */
+ for (cur_start = start; cur_start < start + size;
+ cur_start += memblock_size) {
+ struct vmem_altmap *altmap = NULL;
+ struct memory_block *mem;
+
+ mem = find_memory_block(pfn_to_section_nr(PFN_DOWN(cur_start)));
+ if (WARN_ON_ONCE(!mem))
+ continue;
+
+ altmap = mem->altmap;
+ mem->altmap = NULL;
+
+ remove_memory_block_devices(cur_start, memblock_size);
+
+ arch_remove_memory(cur_start, memblock_size, altmap);
+
+ /* Verify that all vmemmap pages have actually been freed. */
+ WARN(altmap->alloc, "Altmap not fully unmapped");
+ kfree(altmap);
+ }
+}
+
+static int create_altmaps_and_memory_blocks(int nid, struct memory_group *group,
+ u64 start, u64 size)
+{
+ unsigned long memblock_size = memory_block_size_bytes();
+ u64 cur_start;
+ int ret;
+
+ for (cur_start = start; cur_start < start + size;
+ cur_start += memblock_size) {
+ struct mhp_params params = { .pgprot =
+ pgprot_mhp(PAGE_KERNEL) };
+ struct vmem_altmap mhp_altmap = {
+ .base_pfn = PHYS_PFN(cur_start),
+ .end_pfn = PHYS_PFN(cur_start + memblock_size - 1),
+ };
+
+ mhp_altmap.free = memory_block_memmap_on_memory_pages();
+ params.altmap = kmemdup(&mhp_altmap, sizeof(struct vmem_altmap),
+ GFP_KERNEL);
+ if (!params.altmap) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* call arch's memory hotadd */
+ ret = arch_add_memory(nid, cur_start, memblock_size, &params);
+ if (ret < 0) {
+ kfree(params.altmap);
+ goto out;
+ }
+
+ /* create memory block devices after memory was added */
+ ret = create_memory_block_devices(cur_start, memblock_size,
+ params.altmap, group);
+ if (ret) {
+ arch_remove_memory(cur_start, memblock_size, NULL);
+ kfree(params.altmap);
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ if (ret && cur_start != start)
+ remove_memory_blocks_and_altmaps(start, cur_start - start);
+ return ret;
+}
+
/*
* NOTE: The caller must call lock_device_hotplug() to serialize hotplug
* and online/offline operations (triggered e.g. by sysfs).
@@ -1390,10 +1470,6 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
{
struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) };
enum memblock_flags memblock_flags = MEMBLOCK_NONE;
- struct vmem_altmap mhp_altmap = {
- .base_pfn = PHYS_PFN(res->start),
- .end_pfn = PHYS_PFN(res->end),
- };
struct memory_group *group = NULL;
u64 start, size;
bool new_node = false;
@@ -1436,30 +1512,22 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
/*
* Self hosted memmap array
*/
- if (mhp_flags & MHP_MEMMAP_ON_MEMORY) {
- if (mhp_supports_memmap_on_memory(size)) {
- mhp_altmap.free = memory_block_memmap_on_memory_pages();
- params.altmap = kmalloc(sizeof(struct vmem_altmap), GFP_KERNEL);
- if (!params.altmap) {
- ret = -ENOMEM;
- goto error;
- }
+ if ((mhp_flags & MHP_MEMMAP_ON_MEMORY) &&
+ mhp_supports_memmap_on_memory(memory_block_size_bytes())) {
+ ret = create_altmaps_and_memory_blocks(nid, group, start, size);
+ if (ret)
+ goto error;
+ } else {
+ ret = arch_add_memory(nid, start, size, &params);
+ if (ret < 0)
+ goto error;
- memcpy(params.altmap, &mhp_altmap, sizeof(mhp_altmap));
+ /* create memory block devices after memory was added */
+ ret = create_memory_block_devices(start, size, NULL, group);
+ if (ret) {
+ arch_remove_memory(start, size, params.altmap);
+ goto error;
}
- /* fallback to not using altmap */
- }
-
- /* call arch's memory hotadd */
- ret = arch_add_memory(nid, start, size, &params);
- if (ret < 0)
- goto error_free;
-
- /* create memory block devices after memory was added */
- ret = create_memory_block_devices(start, size, params.altmap, group);
- if (ret) {
- arch_remove_memory(start, size, params.altmap);
- goto error_free;
}
if (new_node) {
@@ -1496,8 +1564,6 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
walk_memory_blocks(start, size, NULL, online_memory_block);
return ret;
-error_free:
- kfree(params.altmap);
error:
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
memblock_remove(start, size);
@@ -2067,17 +2133,13 @@ static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
return 0;
}
-static int test_has_altmap_cb(struct memory_block *mem, void *arg)
+static int count_memory_range_altmaps_cb(struct memory_block *mem, void *arg)
{
- struct memory_block **mem_ptr = (struct memory_block **)arg;
- /*
- * return the memblock if we have altmap
- * and break callback.
- */
- if (mem->altmap) {
- *mem_ptr = mem;
- return 1;
- }
+ u64 *num_altmaps = (u64 *)arg;
+
+ if (mem->altmap)
+ *num_altmaps += 1;
+
return 0;
}
@@ -2151,11 +2213,29 @@ void try_offline_node(int nid)
}
EXPORT_SYMBOL(try_offline_node);
+static int memory_blocks_have_altmaps(u64 start, u64 size)
+{
+ u64 num_memblocks = size / memory_block_size_bytes();
+ u64 num_altmaps = 0;
+
+ if (!mhp_memmap_on_memory())
+ return 0;
+
+ walk_memory_blocks(start, size, &num_altmaps,
+ count_memory_range_altmaps_cb);
+
+ if (num_altmaps == 0)
+ return 0;
+
+ if (WARN_ON_ONCE(num_memblocks != num_altmaps))
+ return -EINVAL;
+
+ return 1;
+}
+
static int __ref try_remove_memory(u64 start, u64 size)
{
- struct memory_block *mem;
- int rc = 0, nid = NUMA_NO_NODE;
- struct vmem_altmap *altmap = NULL;
+ int rc, nid = NUMA_NO_NODE;
BUG_ON(check_hotplug_memory_range(start, size));
@@ -2172,45 +2252,26 @@ static int __ref try_remove_memory(u64 start, u64 size)
if (rc)
return rc;
- /*
- * We only support removing memory added with MHP_MEMMAP_ON_MEMORY in
- * the same granularity it was added - a single memory block.
- */
- if (mhp_memmap_on_memory()) {
- rc = walk_memory_blocks(start, size, &mem, test_has_altmap_cb);
- if (rc) {
- if (size != memory_block_size_bytes()) {
- pr_warn("Refuse to remove %#llx - %#llx,"
- "wrong granularity\n",
- start, start + size);
- return -EINVAL;
- }
- altmap = mem->altmap;
- /*
- * Mark altmap NULL so that we can add a debug
- * check on memblock free.
- */
- mem->altmap = NULL;
- }
- }
-
/* remove memmap entry */
firmware_map_remove(start, start + size, "System RAM");
- /*
- * Memory block device removal under the device_hotplug_lock is
- * a barrier against racing online attempts.
- */
- remove_memory_block_devices(start, size);
-
mem_hotplug_begin();
- arch_remove_memory(start, size, altmap);
-
- /* Verify that all vmemmap pages have actually been freed. */
- if (altmap) {
- WARN(altmap->alloc, "Altmap not fully unmapped");
- kfree(altmap);
+ rc = memory_blocks_have_altmaps(start, size);
+ if (rc < 0) {
+ mem_hotplug_done();
+ return rc;
+ } else if (!rc) {
+ /*
+ * Memory block device removal under the device_hotplug_lock is
+ * a barrier against racing online attempts.
+ * No altmaps present, do the removal directly
+ */
+ remove_memory_block_devices(start, size);
+ arch_remove_memory(start, size, NULL);
+ } else {
+ /* all memblocks in the range have altmaps */
+ remove_memory_blocks_and_altmaps(start, size);
}
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
diff --git a/mm/mempool.c b/mm/mempool.c
index 4759be0ff9de..dbbf0e9fb424 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -56,6 +56,10 @@ static void __check_element(mempool_t *pool, void *element, size_t size)
static void check_element(mempool_t *pool, void *element)
{
+ /* Skip checking: KASAN might save its metadata in the element. */
+ if (kasan_enabled())
+ return;
+
/* Mempools backed by slab allocator */
if (pool->free == mempool_kfree) {
__check_element(pool, element, (size_t)pool->pool_data);
@@ -64,10 +68,10 @@ static void check_element(mempool_t *pool, void *element)
} else if (pool->free == mempool_free_pages) {
/* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
- void *addr = kmap_atomic((struct page *)element);
+ void *addr = kmap_local_page((struct page *)element);
__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
}
@@ -81,6 +85,10 @@ static void __poison_element(void *element, size_t size)
static void poison_element(mempool_t *pool, void *element)
{
+ /* Skip poisoning: KASAN might save its metadata in the element. */
+ if (kasan_enabled())
+ return;
+
/* Mempools backed by slab allocator */
if (pool->alloc == mempool_kmalloc) {
__poison_element(element, (size_t)pool->pool_data);
@@ -89,10 +97,10 @@ static void poison_element(mempool_t *pool, void *element)
} else if (pool->alloc == mempool_alloc_pages) {
/* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
- void *addr = kmap_atomic((struct page *)element);
+ void *addr = kmap_local_page((struct page *)element);
__poison_element(addr, 1UL << (PAGE_SHIFT + order));
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
}
#else /* CONFIG_SLUB_DEBUG_ON */
@@ -104,32 +112,34 @@ static inline void poison_element(mempool_t *pool, void *element)
}
#endif /* CONFIG_SLUB_DEBUG_ON */
-static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
+static __always_inline bool kasan_poison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
- kasan_slab_free_mempool(element);
+ return kasan_mempool_poison_object(element);
else if (pool->alloc == mempool_alloc_pages)
- kasan_poison_pages(element, (unsigned long)pool->pool_data,
- false);
+ return kasan_mempool_poison_pages(element,
+ (unsigned long)pool->pool_data);
+ return true;
}
static void kasan_unpoison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_kmalloc)
- kasan_unpoison_range(element, (size_t)pool->pool_data);
+ kasan_mempool_unpoison_object(element, (size_t)pool->pool_data);
else if (pool->alloc == mempool_alloc_slab)
- kasan_unpoison_range(element, kmem_cache_size(pool->pool_data));
+ kasan_mempool_unpoison_object(element,
+ kmem_cache_size(pool->pool_data));
else if (pool->alloc == mempool_alloc_pages)
- kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
- false);
+ kasan_mempool_unpoison_pages(element,
+ (unsigned long)pool->pool_data);
}
static __always_inline void add_element(mempool_t *pool, void *element)
{
BUG_ON(pool->curr_nr >= pool->min_nr);
poison_element(pool, element);
- kasan_poison_element(pool, element);
- pool->elements[pool->curr_nr++] = element;
+ if (kasan_poison_element(pool, element))
+ pool->elements[pool->curr_nr++] = element;
}
static void *remove_element(mempool_t *pool)
@@ -447,6 +457,43 @@ repeat_alloc:
EXPORT_SYMBOL(mempool_alloc);
/**
+ * mempool_alloc_preallocated - allocate an element from preallocated elements
+ * belonging to a specific memory pool
+ * @pool: pointer to the memory pool which was allocated via
+ * mempool_create().
+ *
+ * This function is similar to mempool_alloc, but it only attempts allocating
+ * an element from the preallocated elements. It does not sleep and immediately
+ * returns if no preallocated elements are available.
+ *
+ * Return: pointer to the allocated element or %NULL if no elements are
+ * available.
+ */
+void *mempool_alloc_preallocated(mempool_t *pool)
+{
+ void *element;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ if (likely(pool->curr_nr)) {
+ element = remove_element(pool);
+ spin_unlock_irqrestore(&pool->lock, flags);
+ /* paired with rmb in mempool_free(), read comment there */
+ smp_wmb();
+ /*
+ * Update the allocation stack trace as this is more useful
+ * for debugging.
+ */
+ kmemleak_update_trace(element);
+ return element;
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return NULL;
+}
+EXPORT_SYMBOL(mempool_alloc_preallocated);
+
+/**
* mempool_free - return an element to the pool.
* @element: pool element pointer.
* @pool: pointer to the memory pool which was allocated via
diff --git a/mm/memremap.c b/mm/memremap.c
index bee85560a243..9e9fb1972fff 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -7,6 +7,7 @@
#include <linux/memremap.h>
#include <linux/pfn_t.h>
#include <linux/swap.h>
+#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/swapops.h>
#include <linux/types.h>
@@ -422,19 +423,6 @@ void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
}
EXPORT_SYMBOL_GPL(devm_memunmap_pages);
-unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
-{
- /* number of pfns from base where pfn_to_page() is valid */
- if (altmap)
- return altmap->reserve + altmap->free;
- return 0;
-}
-
-void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
-{
- altmap->alloc -= nr_pfns;
-}
-
/**
* get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
* @pfn: page frame number to lookup page_map
@@ -485,21 +473,11 @@ void free_zone_device_page(struct page *page)
__ClearPageAnonExclusive(page);
/*
- * When a device managed page is freed, the page->mapping field
+ * When a device managed page is freed, the folio->mapping field
* may still contain a (stale) mapping value. For example, the
- * lower bits of page->mapping may still identify the page as an
- * anonymous page. Ultimately, this entire field is just stale
- * and wrong, and it will cause errors if not cleared. One
- * example is:
- *
- * migrate_vma_pages()
- * migrate_vma_insert_page()
- * page_add_new_anon_rmap()
- * __page_set_anon_rmap()
- * ...checks page->mapping, via PageAnon(page) call,
- * and incorrectly concludes that the page is an
- * anonymous page. Therefore, it incorrectly,
- * silently fails to set up the new anon rmap.
+ * lower bits of folio->mapping may still identify the folio as an
+ * anonymous folio. Ultimately, this entire field is just stale
+ * and wrong, and it will cause errors if not cleared.
*
* For other types of ZONE_DEVICE pages, migration is either
* handled differently or not done at all, so there is no need
diff --git a/mm/migrate.c b/mm/migrate.c
index 36c011a36b71..bde8273cf15b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -249,20 +249,20 @@ static bool remove_migration_pte(struct folio *folio,
pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
if (folio_test_anon(folio))
- hugepage_add_anon_rmap(folio, vma, pvmw.address,
- rmap_flags);
+ hugetlb_add_anon_rmap(folio, vma, pvmw.address,
+ rmap_flags);
else
- page_dup_file_rmap(new, true);
+ hugetlb_add_file_rmap(folio);
set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
psize);
} else
#endif
{
if (folio_test_anon(folio))
- page_add_anon_rmap(new, vma, pvmw.address,
- rmap_flags);
+ folio_add_anon_rmap_pte(folio, new, vma,
+ pvmw.address, rmap_flags);
else
- page_add_file_rmap(new, vma, false);
+ folio_add_file_rmap_pte(folio, new, vma);
set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
}
if (vma->vm_flags & VM_LOCKED)
@@ -1025,38 +1025,31 @@ out:
}
/*
- * To record some information during migration, we use some unused
- * fields (mapping and private) of struct folio of the newly allocated
- * destination folio. This is safe because nobody is using them
- * except us.
+ * To record some information during migration, we use unused private
+ * field of struct folio of the newly allocated destination folio.
+ * This is safe because nobody is using it except us.
*/
-union migration_ptr {
- struct anon_vma *anon_vma;
- struct address_space *mapping;
-};
-
enum {
PAGE_WAS_MAPPED = BIT(0),
PAGE_WAS_MLOCKED = BIT(1),
+ PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
};
static void __migrate_folio_record(struct folio *dst,
- unsigned long old_page_state,
+ int old_page_state,
struct anon_vma *anon_vma)
{
- union migration_ptr ptr = { .anon_vma = anon_vma };
- dst->mapping = ptr.mapping;
- dst->private = (void *)old_page_state;
+ dst->private = (void *)anon_vma + old_page_state;
}
static void __migrate_folio_extract(struct folio *dst,
int *old_page_state,
struct anon_vma **anon_vmap)
{
- union migration_ptr ptr = { .mapping = dst->mapping };
- *anon_vmap = ptr.anon_vma;
- *old_page_state = (unsigned long)dst->private;
- dst->mapping = NULL;
+ unsigned long private = (unsigned long)dst->private;
+
+ *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
+ *old_page_state = private & PAGE_OLD_STATES;
dst->private = NULL;
}
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 8ac1f79f754a..b6c27c76e1a0 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -107,6 +107,7 @@ again:
for (; addr < end; addr += PAGE_SIZE, ptep++) {
unsigned long mpfn = 0, pfn;
+ struct folio *folio;
struct page *page;
swp_entry_t entry;
pte_t pte;
@@ -168,41 +169,43 @@ again:
}
/*
- * By getting a reference on the page we pin it and that blocks
+ * By getting a reference on the folio we pin it and that blocks
* any kind of migration. Side effect is that it "freezes" the
* pte.
*
- * We drop this reference after isolating the page from the lru
- * for non device page (device page are not on the lru and thus
+ * We drop this reference after isolating the folio from the lru
+ * for non device folio (device folio are not on the lru and thus
* can't be dropped from it).
*/
- get_page(page);
+ folio = page_folio(page);
+ folio_get(folio);
/*
- * We rely on trylock_page() to avoid deadlock between
+ * We rely on folio_trylock() to avoid deadlock between
* concurrent migrations where each is waiting on the others
- * page lock. If we can't immediately lock the page we fail this
+ * folio lock. If we can't immediately lock the folio we fail this
* migration as it is only best effort anyway.
*
- * If we can lock the page it's safe to set up a migration entry
- * now. In the common case where the page is mapped once in a
+ * If we can lock the folio it's safe to set up a migration entry
+ * now. In the common case where the folio is mapped once in a
* single process setting up the migration entry now is an
* optimisation to avoid walking the rmap later with
* try_to_migrate().
*/
- if (trylock_page(page)) {
+ if (folio_trylock(folio)) {
bool anon_exclusive;
pte_t swp_pte;
flush_cache_page(vma, addr, pte_pfn(pte));
- anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
+ anon_exclusive = folio_test_anon(folio) &&
+ PageAnonExclusive(page);
if (anon_exclusive) {
pte = ptep_clear_flush(vma, addr, ptep);
- if (page_try_share_anon_rmap(page)) {
+ if (folio_try_share_anon_rmap_pte(folio, page)) {
set_pte_at(mm, addr, ptep, pte);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
mpfn = 0;
goto next;
}
@@ -214,7 +217,7 @@ again:
/* Set the dirty flag on the folio now the pte is gone. */
if (pte_dirty(pte))
- folio_mark_dirty(page_folio(page));
+ folio_mark_dirty(folio);
/* Setup special migration page table entry */
if (mpfn & MIGRATE_PFN_WRITE)
@@ -248,16 +251,16 @@ again:
/*
* This is like regular unmap: we remove the rmap and
- * drop page refcount. Page won't be freed, as we took
- * a reference just above.
+ * drop the folio refcount. The folio won't be freed, as
+ * we took a reference just above.
*/
- page_remove_rmap(page, vma, false);
- put_page(page);
+ folio_remove_rmap_pte(folio, page, vma);
+ folio_put(folio);
if (pte_present(pte))
unmapped++;
} else {
- put_page(page);
+ folio_put(folio);
mpfn = 0;
}
@@ -564,6 +567,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
struct page *page,
unsigned long *src)
{
+ struct folio *folio = page_folio(page);
struct vm_area_struct *vma = migrate->vma;
struct mm_struct *mm = vma->vm_mm;
bool flush = false;
@@ -596,17 +600,17 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
goto abort;
if (unlikely(anon_vma_prepare(vma)))
goto abort;
- if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
+ if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
goto abort;
/*
- * The memory barrier inside __SetPageUptodate makes sure that
- * preceding stores to the page contents become visible before
+ * The memory barrier inside __folio_mark_uptodate makes sure that
+ * preceding stores to the folio contents become visible before
* the set_pte_at() write.
*/
- __SetPageUptodate(page);
+ __folio_mark_uptodate(folio);
- if (is_device_private_page(page)) {
+ if (folio_is_device_private(folio)) {
swp_entry_t swp_entry;
if (vma->vm_flags & VM_WRITE)
@@ -617,8 +621,8 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
page_to_pfn(page));
entry = swp_entry_to_pte(swp_entry);
} else {
- if (is_zone_device_page(page) &&
- !is_device_coherent_page(page)) {
+ if (folio_is_zone_device(folio) &&
+ !folio_is_device_coherent(folio)) {
pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
goto abort;
}
@@ -652,10 +656,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
goto unlock_abort;
inc_mm_counter(mm, MM_ANONPAGES);
- page_add_new_anon_rmap(page, vma, addr);
- if (!is_zone_device_page(page))
- lru_cache_add_inactive_or_unevictable(page, vma);
- get_page(page);
+ folio_add_new_anon_rmap(folio, vma, addr);
+ if (!folio_is_zone_device(folio))
+ folio_add_lru_vma(folio, vma);
+ folio_get(folio);
if (flush) {
flush_cache_page(vma, addr, pte_pfn(orig_pte));
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 077bfe393b5e..89dc29f1e6c6 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -796,6 +796,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
* - physical memory bank size is not necessarily the exact multiple of the
* arbitrary section size
* - early reserved memory may not be listed in memblock.memory
+ * - non-memory regions covered by the contigious flatmem mapping
* - memory layouts defined with memmap= kernel parameter may not align
* nicely with memmap sections
*
@@ -826,7 +827,7 @@ static void __init init_unavailable_range(unsigned long spfn,
}
if (pgcnt)
- pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
+ pr_info("On node %d, zone %s: %lld pages in unavailable ranges\n",
node, zone_names[zone], pgcnt);
}
@@ -1454,7 +1455,7 @@ static inline void setup_usemap(struct zone *zone) {}
/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
void __init set_pageblock_order(void)
{
- unsigned int order = MAX_ORDER;
+ unsigned int order = MAX_PAGE_ORDER;
/* Check that pageblock_nr_pages has not already been setup */
if (pageblock_order)
@@ -1466,8 +1467,7 @@ void __init set_pageblock_order(void)
/*
* Assume the largest contiguous order of interest is a huge page.
- * This value may be variable depending on boot parameters on IA64 and
- * powerpc.
+ * This value may be variable depending on boot parameters on powerpc.
*/
pageblock_order = order;
}
@@ -1628,8 +1628,8 @@ void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
#ifdef CONFIG_FLATMEM
static void __init alloc_node_mem_map(struct pglist_data *pgdat)
{
- unsigned long __maybe_unused start = 0;
- unsigned long __maybe_unused offset = 0;
+ unsigned long start, offset, size, end;
+ struct page *map;
/* Skip empty nodes */
if (!pgdat->node_spanned_pages)
@@ -1637,33 +1637,24 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
offset = pgdat->node_start_pfn - start;
- /* ia64 gets its own node_mem_map, before this, without bootmem */
- if (!pgdat->node_mem_map) {
- unsigned long size, end;
- struct page *map;
-
- /*
- * The zone's endpoints aren't required to be MAX_ORDER
- * aligned but the node_mem_map endpoints must be in order
- * for the buddy allocator to function correctly.
- */
- end = pgdat_end_pfn(pgdat);
- end = ALIGN(end, MAX_ORDER_NR_PAGES);
- size = (end - start) * sizeof(struct page);
- map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
- pgdat->node_id, false);
- if (!map)
- panic("Failed to allocate %ld bytes for node %d memory map\n",
- size, pgdat->node_id);
- pgdat->node_mem_map = map + offset;
- }
- pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
- __func__, pgdat->node_id, (unsigned long)pgdat,
- (unsigned long)pgdat->node_mem_map);
-#ifndef CONFIG_NUMA
/*
- * With no DISCONTIG, the global mem_map is just set as node 0's
+ * The zone's endpoints aren't required to be MAX_PAGE_ORDER
+ * aligned but the node_mem_map endpoints must be in order
+ * for the buddy allocator to function correctly.
*/
+ end = ALIGN(pgdat_end_pfn(pgdat), MAX_ORDER_NR_PAGES);
+ size = (end - start) * sizeof(struct page);
+ map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
+ pgdat->node_id, false);
+ if (!map)
+ panic("Failed to allocate %ld bytes for node %d memory map\n",
+ size, pgdat->node_id);
+ pgdat->node_mem_map = map + offset;
+ pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
+ __func__, pgdat->node_id, (unsigned long)pgdat,
+ (unsigned long)pgdat->node_mem_map);
+#ifndef CONFIG_NUMA
+ /* the global mem_map is just set as node 0's */
if (pgdat == NODE_DATA(0)) {
mem_map = NODE_DATA(0)->node_mem_map;
if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
@@ -1973,11 +1964,11 @@ static void __init deferred_free_range(unsigned long pfn,
if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
for (i = 0; i < nr_pages; i += pageblock_nr_pages)
set_pageblock_migratetype(page + i, MIGRATE_MOVABLE);
- __free_pages_core(page, MAX_ORDER);
+ __free_pages_core(page, MAX_PAGE_ORDER);
return;
}
- /* Accept chunks smaller than MAX_ORDER upfront */
+ /* Accept chunks smaller than MAX_PAGE_ORDER upfront */
accept_memory(PFN_PHYS(pfn), PFN_PHYS(pfn + nr_pages));
for (i = 0; i < nr_pages; i++, page++, pfn++) {
@@ -2000,8 +1991,8 @@ static inline void __init pgdat_init_report_one_done(void)
/*
* Returns true if page needs to be initialized or freed to buddy allocator.
*
- * We check if a current MAX_ORDER block is valid by only checking the validity
- * of the head pfn.
+ * We check if a current MAX_PAGE_ORDER block is valid by only checking the
+ * validity of the head pfn.
*/
static inline bool __init deferred_pfn_valid(unsigned long pfn)
{
@@ -2158,8 +2149,8 @@ deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
/*
- * Initialize and free pages in MAX_ORDER sized increments so that we
- * can avoid introducing any issues with the buddy allocator.
+ * Initialize and free pages in MAX_PAGE_ORDER sized increments so that
+ * we can avoid introducing any issues with the buddy allocator.
*/
while (spfn < end_pfn) {
deferred_init_maxorder(&i, zone, &spfn, &epfn);
@@ -2300,7 +2291,7 @@ bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
}
/*
- * Initialize and free pages in MAX_ORDER sized increments so
+ * Initialize and free pages in MAX_PAGE_ORDER sized increments so
* that we can avoid introducing any issues with the buddy
* allocator.
*/
@@ -2518,7 +2509,7 @@ void *__init alloc_large_system_hash(const char *tablename,
else
table = memblock_alloc_raw(size,
SMP_CACHE_BYTES);
- } else if (get_order(size) > MAX_ORDER || hashdist) {
+ } else if (get_order(size) > MAX_PAGE_ORDER || hashdist) {
table = vmalloc_huge(size, gfp_flags);
virt = true;
if (table)
@@ -2765,7 +2756,7 @@ void __init mm_core_init(void)
/*
* page_ext requires contiguous pages,
- * bigger than MAX_ORDER unless SPARSEMEM.
+ * bigger than MAX_PAGE_ORDER unless SPARSEMEM.
*/
page_ext_init_flatmem();
mem_debugging_and_hardening_init();
diff --git a/mm/mmap.c b/mm/mmap.c
index aa82eec17489..b78e83d351d2 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2210,42 +2210,7 @@ struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned lon
}
#endif
-/*
- * IA64 has some horrid mapping rules: it can expand both up and down,
- * but with various special rules.
- *
- * We'll get rid of this architecture eventually, so the ugliness is
- * temporary.
- */
-#ifdef CONFIG_IA64
-static inline bool vma_expand_ok(struct vm_area_struct *vma, unsigned long addr)
-{
- return REGION_NUMBER(addr) == REGION_NUMBER(vma->vm_start) &&
- REGION_OFFSET(addr) < RGN_MAP_LIMIT;
-}
-
-/*
- * IA64 stacks grow down, but there's a special register backing store
- * that can grow up. Only sequentially, though, so the new address must
- * match vm_end.
- */
-static inline int vma_expand_up(struct vm_area_struct *vma, unsigned long addr)
-{
- if (!vma_expand_ok(vma, addr))
- return -EFAULT;
- if (vma->vm_end != (addr & PAGE_MASK))
- return -EFAULT;
- return expand_upwards(vma, addr);
-}
-
-static inline bool vma_expand_down(struct vm_area_struct *vma, unsigned long addr)
-{
- if (!vma_expand_ok(vma, addr))
- return -EFAULT;
- return expand_downwards(vma, addr);
-}
-
-#elif defined(CONFIG_STACK_GROWSUP)
+#if defined(CONFIG_STACK_GROWSUP)
#define vma_expand_up(vma,addr) expand_upwards(vma, addr)
#define vma_expand_down(vma, addr) (-EFAULT)
@@ -3297,10 +3262,11 @@ void exit_mmap(struct mm_struct *mm)
arch_exit_mmap(mm);
vma = mas_find(&mas, ULONG_MAX);
- if (!vma) {
+ if (!vma || unlikely(xa_is_zero(vma))) {
/* Can happen if dup_mmap() received an OOM */
mmap_read_unlock(mm);
- return;
+ mmap_write_lock(mm);
+ goto destroy;
}
lru_add_drain();
@@ -3335,11 +3301,13 @@ void exit_mmap(struct mm_struct *mm)
remove_vma(vma, true);
count++;
cond_resched();
- } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
+ vma = mas_find(&mas, ULONG_MAX);
+ } while (vma && likely(!xa_is_zero(vma)));
BUG_ON(count != mm->map_count);
trace_exit_mmap(mm);
+destroy:
__mt_destroy(&mm->mm_mt);
mmap_write_unlock(mm);
vm_unacct_memory(nr_accounted);
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 4f559f4ddd21..604ddf08affe 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -55,7 +55,7 @@ static void tlb_flush_rmap_batch(struct mmu_gather_batch *batch, struct vm_area_
if (encoded_page_flags(enc)) {
struct page *page = encoded_page_ptr(enc);
- page_remove_rmap(page, vma, false);
+ folio_remove_rmap_pte(page_folio(page), page, vma);
}
}
}
diff --git a/mm/mmzone.c b/mm/mmzone.c
index b594d3f268fe..c01896eca736 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -78,6 +78,7 @@ void lruvec_init(struct lruvec *lruvec)
memset(lruvec, 0, sizeof(struct lruvec));
spin_lock_init(&lruvec->lru_lock);
+ zswap_lruvec_state_init(lruvec);
for_each_lru(lru)
INIT_LIST_HEAD(&lruvec->lists[lru]);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 9e6071fde34a..91ccd82097c2 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -399,10 +399,11 @@ static int dump_task(struct task_struct *p, void *arg)
return 0;
}
- pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
+ pr_info("[%7d] %5d %5d %8lu %8lu %8lu %8lu %9lu %8ld %8lu %5hd %s\n",
task->pid, from_kuid(&init_user_ns, task_uid(task)),
task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
- mm_pgtables_bytes(task->mm),
+ get_mm_counter(task->mm, MM_ANONPAGES), get_mm_counter(task->mm, MM_FILEPAGES),
+ get_mm_counter(task->mm, MM_SHMEMPAGES), mm_pgtables_bytes(task->mm),
get_mm_counter(task->mm, MM_SWAPENTS),
task->signal->oom_score_adj, task->comm);
task_unlock(task);
@@ -423,7 +424,7 @@ static int dump_task(struct task_struct *p, void *arg)
static void dump_tasks(struct oom_control *oc)
{
pr_info("Tasks state (memory values in pages):\n");
- pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
+ pr_info("[ pid ] uid tgid total_vm rss rss_anon rss_file rss_shmem pgtables_bytes swapents oom_score_adj name\n");
if (is_memcg_oom(oc))
mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 05e5c425b3ff..cd4e4ae77c40 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2982,67 +2982,63 @@ bool __folio_end_writeback(struct folio *folio)
return ret;
}
-bool __folio_start_writeback(struct folio *folio, bool keep_write)
+void __folio_start_writeback(struct folio *folio, bool keep_write)
{
long nr = folio_nr_pages(folio);
struct address_space *mapping = folio_mapping(folio);
- bool ret;
int access_ret;
+ VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
+
folio_memcg_lock(folio);
if (mapping && mapping_use_writeback_tags(mapping)) {
XA_STATE(xas, &mapping->i_pages, folio_index(folio));
struct inode *inode = mapping->host;
struct backing_dev_info *bdi = inode_to_bdi(inode);
unsigned long flags;
+ bool on_wblist;
xas_lock_irqsave(&xas, flags);
xas_load(&xas);
- ret = folio_test_set_writeback(folio);
- if (!ret) {
- bool on_wblist;
+ folio_test_set_writeback(folio);
- on_wblist = mapping_tagged(mapping,
- PAGECACHE_TAG_WRITEBACK);
+ on_wblist = mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
- xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
- if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
- struct bdi_writeback *wb = inode_to_wb(inode);
-
- wb_stat_mod(wb, WB_WRITEBACK, nr);
- if (!on_wblist)
- wb_inode_writeback_start(wb);
- }
+ xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
+ if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
+ struct bdi_writeback *wb = inode_to_wb(inode);
- /*
- * We can come through here when swapping
- * anonymous folios, so we don't necessarily
- * have an inode to track for sync.
- */
- if (mapping->host && !on_wblist)
- sb_mark_inode_writeback(mapping->host);
+ wb_stat_mod(wb, WB_WRITEBACK, nr);
+ if (!on_wblist)
+ wb_inode_writeback_start(wb);
}
+
+ /*
+ * We can come through here when swapping anonymous
+ * folios, so we don't necessarily have an inode to
+ * track for sync.
+ */
+ if (mapping->host && !on_wblist)
+ sb_mark_inode_writeback(mapping->host);
if (!folio_test_dirty(folio))
xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
if (!keep_write)
xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
xas_unlock_irqrestore(&xas, flags);
} else {
- ret = folio_test_set_writeback(folio);
- }
- if (!ret) {
- lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
- zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
+ folio_test_set_writeback(folio);
}
+
+ lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
+ zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
folio_memcg_unlock(folio);
+
access_ret = arch_make_folio_accessible(folio);
/*
* If writeback has been triggered on a page that cannot be made
* accessible, it is too late to recover here.
*/
VM_BUG_ON_FOLIO(access_ret != 0, folio);
-
- return ret;
}
EXPORT_SYMBOL(__folio_start_writeback);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 733732e7e0ba..a01baf0454f8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -727,7 +727,7 @@ buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
unsigned long higher_page_pfn;
struct page *higher_page;
- if (order >= MAX_ORDER - 1)
+ if (order >= MAX_PAGE_ORDER - 1)
return false;
higher_page_pfn = buddy_pfn & pfn;
@@ -782,7 +782,7 @@ static inline void __free_one_page(struct page *page,
VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
VM_BUG_ON_PAGE(bad_range(zone, page), page);
- while (order < MAX_ORDER) {
+ while (order < MAX_PAGE_ORDER) {
if (compaction_capture(capc, page, order, migratetype)) {
__mod_zone_freepage_state(zone, -(1 << order),
migratetype);
@@ -1059,7 +1059,7 @@ static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
return deferred_pages_enabled();
- return page_kasan_tag(page) == 0xff;
+ return page_kasan_tag(page) == KASAN_TAG_KERNEL;
}
static void kernel_init_pages(struct page *page, int numpages)
@@ -1086,13 +1086,11 @@ static __always_inline bool free_pages_prepare(struct page *page,
trace_mm_page_free(page, order);
kmsan_free_page(page, order);
+ if (memcg_kmem_online() && PageMemcgKmem(page))
+ __memcg_kmem_uncharge_page(page, order);
+
if (unlikely(PageHWPoison(page)) && !order) {
- /*
- * Do not let hwpoison pages hit pcplists/buddy
- * Untie memcg state and reset page's owner
- */
- if (memcg_kmem_online() && PageMemcgKmem(page))
- __memcg_kmem_uncharge_page(page, order);
+ /* Do not let hwpoison pages hit pcplists/buddy */
reset_page_owner(page, order);
page_table_check_free(page, order);
return false;
@@ -1123,8 +1121,6 @@ static __always_inline bool free_pages_prepare(struct page *page,
}
if (PageMappingFlags(page))
page->mapping = NULL;
- if (memcg_kmem_online() && PageMemcgKmem(page))
- __memcg_kmem_uncharge_page(page, order);
if (is_check_pages_enabled()) {
if (free_page_is_bad(page))
bad++;
@@ -1259,7 +1255,6 @@ static void free_one_page(struct zone *zone,
static void __free_pages_ok(struct page *page, unsigned int order,
fpi_t fpi_flags)
{
- unsigned long flags;
int migratetype;
unsigned long pfn = page_to_pfn(page);
struct zone *zone = page_zone(page);
@@ -1274,13 +1269,7 @@ static void __free_pages_ok(struct page *page, unsigned int order,
*/
migratetype = get_pfnblock_migratetype(page, pfn);
- spin_lock_irqsave(&zone->lock, flags);
- if (unlikely(has_isolate_pageblock(zone) ||
- is_migrate_isolate(migratetype))) {
- migratetype = get_pfnblock_migratetype(page, pfn);
- }
- __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
- spin_unlock_irqrestore(&zone->lock, flags);
+ free_one_page(zone, page, pfn, order, migratetype, fpi_flags);
__count_vm_events(PGFREE, 1 << order);
}
@@ -1308,7 +1297,7 @@ void __free_pages_core(struct page *page, unsigned int order)
atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
if (page_contains_unaccepted(page, order)) {
- if (order == MAX_ORDER && __free_unaccepted(page))
+ if (order == MAX_PAGE_ORDER && __free_unaccepted(page))
return;
accept_page(page, order);
@@ -1338,7 +1327,7 @@ void __free_pages_core(struct page *page, unsigned int order)
*
* Note: the function may return non-NULL struct page even for a page block
* which contains a memory hole (i.e. there is no physical memory for a subset
- * of the pfn range). For example, if the pageblock order is MAX_ORDER, which
+ * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which
* will fall into 2 sub-sections, and the end pfn of the pageblock may be hole
* even though the start pfn is online and valid. This should be safe most of
* the time because struct pages are still initialized via init_unavailable_range()
@@ -1571,7 +1560,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
struct page *page;
/* Find a page of the appropriate size in the preferred list */
- for (current_order = order; current_order <= MAX_ORDER; ++current_order) {
+ for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) {
area = &(zone->free_area[current_order]);
page = get_page_from_free_area(area, migratetype);
if (!page)
@@ -1884,10 +1873,14 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
unsigned long max_managed, flags;
/*
- * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
+ * The number reserved as: minimum is 1 pageblock, maximum is
+ * roughly 1% of a zone. But if 1% of a zone falls below a
+ * pageblock size, then don't reserve any pageblocks.
* Check is race-prone but harmless.
*/
- max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
+ if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages)
+ return;
+ max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages);
if (zone->nr_reserved_highatomic >= max_managed)
return;
@@ -1941,7 +1934,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
continue;
spin_lock_irqsave(&zone->lock, flags);
- for (order = 0; order <= MAX_ORDER; order++) {
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct free_area *area = &(zone->free_area[order]);
page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
@@ -2025,7 +2018,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
* approximates finding the pageblock with the most free pages, which
* would be too costly to do exactly.
*/
- for (current_order = MAX_ORDER; current_order >= min_order;
+ for (current_order = MAX_PAGE_ORDER; current_order >= min_order;
--current_order) {
area = &(zone->free_area[current_order]);
fallback_mt = find_suitable_fallback(area, current_order,
@@ -2051,8 +2044,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
return false;
find_smallest:
- for (current_order = order; current_order <= MAX_ORDER;
- current_order++) {
+ for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
area = &(zone->free_area[current_order]);
fallback_mt = find_suitable_fallback(area, current_order,
start_migratetype, false, &can_steal);
@@ -2064,7 +2056,7 @@ find_smallest:
* This should not happen - we already found a suitable fallback
* when looking for the largest page.
*/
- VM_BUG_ON(current_order > MAX_ORDER);
+ VM_BUG_ON(current_order > MAX_PAGE_ORDER);
do_steal:
page = get_page_from_free_area(area, fallback_mt);
@@ -3007,7 +2999,7 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
return true;
/* For a high-order request, check at least one suitable page is free */
- for (o = order; o <= MAX_ORDER; o++) {
+ for (o = order; o < NR_PAGE_ORDERS; o++) {
struct free_area *area = &z->free_area[o];
int mt;
@@ -3951,14 +3943,9 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
else
(*no_progress_loops)++;
- /*
- * Make sure we converge to OOM if we cannot make any progress
- * several times in the row.
- */
- if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
- /* Before OOM, exhaust highatomic_reserve */
- return unreserve_highatomic_pageblock(ac, true);
- }
+ if (*no_progress_loops > MAX_RECLAIM_RETRIES)
+ goto out;
+
/*
* Keep reclaiming pages while there is a chance this will lead
@@ -4001,6 +3988,11 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
schedule_timeout_uninterruptible(1);
else
cond_resched();
+out:
+ /* Before OOM, exhaust highatomic_reserve */
+ if (!ret)
+ return unreserve_highatomic_pageblock(ac, true);
+
return ret;
}
@@ -4541,7 +4533,7 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
* There are several places where we assume that the order value is sane
* so bail out early if the request is out of bound.
*/
- if (WARN_ON_ONCE_GFP(order > MAX_ORDER, gfp))
+ if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp))
return NULL;
gfp &= gfp_allowed_mask;
@@ -4823,7 +4815,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
* minimum number of pages to satisfy the request. alloc_pages() can only
* allocate memory in power-of-two pages.
*
- * This function is also limited by MAX_ORDER.
+ * This function is also limited by MAX_PAGE_ORDER.
*
* Memory allocated by this function must be released by free_pages_exact().
*
@@ -6381,7 +6373,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
order = 0;
outer_start = start;
while (!PageBuddy(pfn_to_page(outer_start))) {
- if (++order > MAX_ORDER) {
+ if (++order > MAX_PAGE_ORDER) {
outer_start = start;
break;
}
@@ -6635,7 +6627,7 @@ bool is_free_buddy_page(struct page *page)
unsigned long pfn = page_to_pfn(page);
unsigned int order;
- for (order = 0; order <= MAX_ORDER; order++) {
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct page *page_head = page - (pfn & ((1 << order) - 1));
if (PageBuddy(page_head) &&
@@ -6643,7 +6635,7 @@ bool is_free_buddy_page(struct page *page)
break;
}
- return order <= MAX_ORDER;
+ return order <= MAX_PAGE_ORDER;
}
EXPORT_SYMBOL(is_free_buddy_page);
@@ -6690,7 +6682,7 @@ bool take_page_off_buddy(struct page *page)
bool ret = false;
spin_lock_irqsave(&zone->lock, flags);
- for (order = 0; order <= MAX_ORDER; order++) {
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct page *page_head = page - (pfn & ((1 << order) - 1));
int page_order = buddy_order(page_head);
@@ -6815,9 +6807,9 @@ static bool try_to_accept_memory_one(struct zone *zone)
__mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
spin_unlock_irqrestore(&zone->lock, flags);
- accept_page(page, MAX_ORDER);
+ accept_page(page, MAX_PAGE_ORDER);
- __free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL);
+ __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
if (last)
static_branch_dec(&zones_with_unaccepted_pages);
diff --git a/mm/page_io.c b/mm/page_io.c
index cb559ae324c6..ae2b49055e43 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -201,7 +201,12 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
folio_end_writeback(folio);
return 0;
}
- __swap_writepage(&folio->page, wbc);
+ if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio))) {
+ folio_mark_dirty(folio);
+ return AOP_WRITEPAGE_ACTIVATE;
+ }
+
+ __swap_writepage(folio, wbc);
return 0;
}
@@ -288,16 +293,16 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
mempool_free(sio, sio_pool);
}
-static void swap_writepage_fs(struct page *page, struct writeback_control *wbc)
+static void swap_writepage_fs(struct folio *folio, struct writeback_control *wbc)
{
struct swap_iocb *sio = NULL;
- struct swap_info_struct *sis = page_swap_info(page);
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
struct file *swap_file = sis->swap_file;
- loff_t pos = page_file_offset(page);
+ loff_t pos = folio_file_pos(folio);
- count_swpout_vm_event(page_folio(page));
- set_page_writeback(page);
- unlock_page(page);
+ count_swpout_vm_event(folio);
+ folio_start_writeback(folio);
+ folio_unlock(folio);
if (wbc->swap_plug)
sio = *wbc->swap_plug;
if (sio) {
@@ -315,8 +320,8 @@ static void swap_writepage_fs(struct page *page, struct writeback_control *wbc)
sio->pages = 0;
sio->len = 0;
}
- bvec_set_page(&sio->bvec[sio->pages], page, thp_size(page), 0);
- sio->len += thp_size(page);
+ bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
+ sio->len += folio_size(folio);
sio->pages += 1;
if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
swap_write_unplug(sio);
@@ -326,17 +331,16 @@ static void swap_writepage_fs(struct page *page, struct writeback_control *wbc)
*wbc->swap_plug = sio;
}
-static void swap_writepage_bdev_sync(struct page *page,
+static void swap_writepage_bdev_sync(struct folio *folio,
struct writeback_control *wbc, struct swap_info_struct *sis)
{
struct bio_vec bv;
struct bio bio;
- struct folio *folio = page_folio(page);
bio_init(&bio, sis->bdev, &bv, 1,
REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc));
- bio.bi_iter.bi_sector = swap_page_sector(page);
- __bio_add_page(&bio, page, thp_size(page), 0);
+ bio.bi_iter.bi_sector = swap_folio_sector(folio);
+ bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
bio_associate_blkg_from_page(&bio, folio);
count_swpout_vm_event(folio);
@@ -348,18 +352,17 @@ static void swap_writepage_bdev_sync(struct page *page,
__end_swap_bio_write(&bio);
}
-static void swap_writepage_bdev_async(struct page *page,
+static void swap_writepage_bdev_async(struct folio *folio,
struct writeback_control *wbc, struct swap_info_struct *sis)
{
struct bio *bio;
- struct folio *folio = page_folio(page);
bio = bio_alloc(sis->bdev, 1,
REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
GFP_NOIO);
- bio->bi_iter.bi_sector = swap_page_sector(page);
+ bio->bi_iter.bi_sector = swap_folio_sector(folio);
bio->bi_end_io = end_swap_bio_write;
- __bio_add_page(bio, page, thp_size(page), 0);
+ bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
bio_associate_blkg_from_page(bio, folio);
count_swpout_vm_event(folio);
@@ -368,22 +371,22 @@ static void swap_writepage_bdev_async(struct page *page,
submit_bio(bio);
}
-void __swap_writepage(struct page *page, struct writeback_control *wbc)
+void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
{
- struct swap_info_struct *sis = page_swap_info(page);
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
- VM_BUG_ON_PAGE(!PageSwapCache(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
/*
* ->flags can be updated non-atomicially (scan_swap_map_slots),
* but that will never affect SWP_FS_OPS, so the data_race
* is safe.
*/
if (data_race(sis->flags & SWP_FS_OPS))
- swap_writepage_fs(page, wbc);
+ swap_writepage_fs(folio, wbc);
else if (sis->flags & SWP_SYNCHRONOUS_IO)
- swap_writepage_bdev_sync(page, wbc, sis);
+ swap_writepage_bdev_sync(folio, wbc, sis);
else
- swap_writepage_bdev_async(page, wbc, sis);
+ swap_writepage_bdev_async(folio, wbc, sis);
}
void swap_write_unplug(struct swap_iocb *sio)
@@ -422,12 +425,11 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
mempool_free(sio, sio_pool);
}
-static void swap_readpage_fs(struct page *page,
- struct swap_iocb **plug)
+static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
{
- struct swap_info_struct *sis = page_swap_info(page);
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
struct swap_iocb *sio = NULL;
- loff_t pos = page_file_offset(page);
+ loff_t pos = folio_file_pos(folio);
if (plug)
sio = *plug;
@@ -446,8 +448,8 @@ static void swap_readpage_fs(struct page *page,
sio->pages = 0;
sio->len = 0;
}
- bvec_set_page(&sio->bvec[sio->pages], page, thp_size(page), 0);
- sio->len += thp_size(page);
+ bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
+ sio->len += folio_size(folio);
sio->pages += 1;
if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
swap_read_unplug(sio);
@@ -457,15 +459,15 @@ static void swap_readpage_fs(struct page *page,
*plug = sio;
}
-static void swap_readpage_bdev_sync(struct page *page,
+static void swap_read_folio_bdev_sync(struct folio *folio,
struct swap_info_struct *sis)
{
struct bio_vec bv;
struct bio bio;
bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
- bio.bi_iter.bi_sector = swap_page_sector(page);
- __bio_add_page(&bio, page, thp_size(page), 0);
+ bio.bi_iter.bi_sector = swap_folio_sector(folio);
+ bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
/*
* Keep this task valid during swap readpage because the oom killer may
* attempt to access it in the page fault retry time check.
@@ -477,23 +479,23 @@ static void swap_readpage_bdev_sync(struct page *page,
put_task_struct(current);
}
-static void swap_readpage_bdev_async(struct page *page,
+static void swap_read_folio_bdev_async(struct folio *folio,
struct swap_info_struct *sis)
{
struct bio *bio;
bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
- bio->bi_iter.bi_sector = swap_page_sector(page);
+ bio->bi_iter.bi_sector = swap_folio_sector(folio);
bio->bi_end_io = end_swap_bio_read;
- __bio_add_page(bio, page, thp_size(page), 0);
+ bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
count_vm_event(PSWPIN);
submit_bio(bio);
}
-void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
+void swap_read_folio(struct folio *folio, bool synchronous,
+ struct swap_iocb **plug)
{
- struct folio *folio = page_folio(page);
- struct swap_info_struct *sis = page_swap_info(page);
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
bool workingset = folio_test_workingset(folio);
unsigned long pflags;
bool in_thrashing;
@@ -517,11 +519,11 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
folio_mark_uptodate(folio);
folio_unlock(folio);
} else if (data_race(sis->flags & SWP_FS_OPS)) {
- swap_readpage_fs(page, plug);
+ swap_read_folio_fs(folio, plug);
} else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) {
- swap_readpage_bdev_sync(page, sis);
+ swap_read_folio_bdev_sync(folio, sis);
} else {
- swap_readpage_bdev_async(page, sis);
+ swap_read_folio_bdev_async(folio, sis);
}
if (workingset) {
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index bcf99ba747a0..cd0ea3668253 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -226,7 +226,7 @@ static void unset_migratetype_isolate(struct page *page, int migratetype)
*/
if (PageBuddy(page)) {
order = buddy_order(page);
- if (order >= pageblock_order && order < MAX_ORDER) {
+ if (order >= pageblock_order && order < MAX_PAGE_ORDER) {
buddy = find_buddy_page_pfn(page, page_to_pfn(page),
order, NULL);
if (buddy && !is_migrate_isolate_page(buddy)) {
@@ -290,11 +290,12 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* isolate_single_pageblock()
* @migratetype: migrate type to set in error recovery.
*
- * Free and in-use pages can be as big as MAX_ORDER and contain more than one
+ * Free and in-use pages can be as big as MAX_PAGE_ORDER and contain more than one
* pageblock. When not all pageblocks within a page are isolated at the same
* time, free page accounting can go wrong. For example, in the case of
- * MAX_ORDER = pageblock_order + 1, a MAX_ORDER page has two pagelbocks.
- * [ MAX_ORDER ]
+ * MAX_PAGE_ORDER = pageblock_order + 1, a MAX_PAGE_ORDER page has two
+ * pagelbocks.
+ * [ MAX_PAGE_ORDER ]
* [ pageblock0 | pageblock1 ]
* When either pageblock is isolated, if it is a free page, the page is not
* split into separate migratetype lists, which is supposed to; if it is an
@@ -451,7 +452,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
* the free page to the right migratetype list.
*
* head_pfn is not used here as a hugetlb page order
- * can be bigger than MAX_ORDER, but after it is
+ * can be bigger than MAX_PAGE_ORDER, but after it is
* freed, the free page order is not. Use pfn within
* the range to find the head of the free page.
*/
@@ -459,7 +460,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
outer_pfn = pfn;
while (!PageBuddy(pfn_to_page(outer_pfn))) {
/* stop if we cannot find the free page */
- if (++order > MAX_ORDER)
+ if (++order > MAX_PAGE_ORDER)
goto failed;
outer_pfn &= ~0UL << order;
}
@@ -660,8 +661,8 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
int ret;
/*
- * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
- * are not aligned to pageblock_nr_pages.
+ * Note: pageblock_nr_pages != MAX_PAGE_ORDER. Then, chunks of free
+ * pages are not aligned to pageblock_nr_pages.
* Then we just check migratetype first.
*/
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 4f13ce7d2452..5634e5d890f8 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -32,6 +32,8 @@ struct page_owner {
char comm[TASK_COMM_LEN];
pid_t pid;
pid_t tgid;
+ pid_t free_pid;
+ pid_t free_tgid;
};
static bool page_owner_enabled __initdata;
@@ -119,7 +121,6 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
* Sometimes page metadata allocation tracking requires more
* memory to be allocated:
* - when new stack trace is saved to stack depot
- * - when backtrace itself is calculated (ia64)
*/
if (current->in_page_owner)
return dummy_handle;
@@ -152,6 +153,8 @@ void __reset_page_owner(struct page *page, unsigned short order)
page_owner = get_page_owner(page_ext);
page_owner->free_handle = handle;
page_owner->free_ts_nsec = free_ts_nsec;
+ page_owner->free_pid = current->pid;
+ page_owner->free_tgid = current->tgid;
page_ext = page_ext_next(page_ext);
}
page_ext_put(page_ext);
@@ -253,6 +256,8 @@ void __folio_copy_owner(struct folio *newfolio, struct folio *old)
new_page_owner->handle = old_page_owner->handle;
new_page_owner->pid = old_page_owner->pid;
new_page_owner->tgid = old_page_owner->tgid;
+ new_page_owner->free_pid = old_page_owner->free_pid;
+ new_page_owner->free_tgid = old_page_owner->free_tgid;
new_page_owner->ts_nsec = old_page_owner->ts_nsec;
new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
strcpy(new_page_owner->comm, old_page_owner->comm);
@@ -315,7 +320,7 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
unsigned long freepage_order;
freepage_order = buddy_order_unsafe(page);
- if (freepage_order <= MAX_ORDER)
+ if (freepage_order <= MAX_PAGE_ORDER)
pfn += (1UL << freepage_order) - 1;
continue;
}
@@ -495,7 +500,8 @@ void __dump_page_owner(const struct page *page)
if (!handle) {
pr_alert("page_owner free stack trace missing\n");
} else {
- pr_alert("page last free stack trace:\n");
+ pr_alert("page last free pid %d tgid %d stack trace:\n",
+ page_owner->free_pid, page_owner->free_tgid);
stack_depot_print(handle);
}
@@ -549,7 +555,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
if (PageBuddy(page)) {
unsigned long freepage_order = buddy_order_unsafe(page);
- if (freepage_order <= MAX_ORDER)
+ if (freepage_order <= MAX_PAGE_ORDER)
pfn += (1UL << freepage_order) - 1;
continue;
}
@@ -657,7 +663,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
if (PageBuddy(page)) {
unsigned long order = buddy_order_unsafe(page);
- if (order > 0 && order <= MAX_ORDER)
+ if (order > 0 && order <= MAX_PAGE_ORDER)
pfn += (1UL << order) - 1;
continue;
}
diff --git a/mm/page_poison.c b/mm/page_poison.c
index b4f456437b7e..3e9037363cf9 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -21,13 +21,13 @@ early_param("page_poison", early_page_poison_param);
static void poison_page(struct page *page)
{
- void *addr = kmap_atomic(page);
+ void *addr = kmap_local_page(page);
/* KASAN still think the page is in-use, so skip it. */
kasan_disable_current();
memset(kasan_reset_tag(addr), PAGE_POISON, PAGE_SIZE);
kasan_enable_current();
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
void __kernel_poison_pages(struct page *page, int n)
@@ -77,7 +77,7 @@ static void unpoison_page(struct page *page)
{
void *addr;
- addr = kmap_atomic(page);
+ addr = kmap_local_page(page);
kasan_disable_current();
/*
* Page poisoning when enabled poisons each and every page
@@ -86,7 +86,7 @@ static void unpoison_page(struct page *page)
*/
check_poison_mem(page, kasan_reset_tag(addr), PAGE_SIZE);
kasan_enable_current();
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
void __kernel_unpoison_pages(struct page *page, int n)
diff --git a/mm/page_reporting.c b/mm/page_reporting.c
index b021f482a4cb..e4c428e61d8c 100644
--- a/mm/page_reporting.c
+++ b/mm/page_reporting.c
@@ -20,7 +20,7 @@ static int page_order_update_notify(const char *val, const struct kernel_param *
* If param is set beyond this limit, order is set to default
* pageblock_order value
*/
- return param_set_uint_minmax(val, kp, 0, MAX_ORDER);
+ return param_set_uint_minmax(val, kp, 0, MAX_PAGE_ORDER);
}
static const struct kernel_param_ops page_reporting_param_ops = {
@@ -276,7 +276,7 @@ page_reporting_process_zone(struct page_reporting_dev_info *prdev,
return err;
/* Process each free list starting from lowest order/mt */
- for (order = page_reporting_order; order <= MAX_ORDER; order++) {
+ for (order = page_reporting_order; order < NR_PAGE_ORDERS; order++) {
for (mt = 0; mt < MIGRATE_TYPES; mt++) {
/* We do not pull pages from the isolate free list */
if (is_migrate_isolate(mt))
@@ -370,7 +370,7 @@ int page_reporting_register(struct page_reporting_dev_info *prdev)
*/
if (page_reporting_order == -1) {
- if (prdev->order > 0 && prdev->order <= MAX_ORDER)
+ if (prdev->order > 0 && prdev->order <= MAX_PAGE_ORDER)
page_reporting_order = prdev->order;
else
page_reporting_order = pageblock_order;
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index e0b368e545ed..74d2de15fb5e 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -268,7 +268,8 @@ restart:
* cleared *pmd but not decremented compound_mapcount().
*/
if ((pvmw->flags & PVMW_SYNC) &&
- transhuge_vma_suitable(vma, pvmw->address) &&
+ thp_vma_suitable_order(vma, pvmw->address,
+ PMD_ORDER) &&
(pvmw->nr_pages >= HPAGE_PMD_NR)) {
spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index b7d7e4fcfad7..f46c80b18ce4 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -539,6 +539,11 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
* not backed by VMAs. Because 'unusual' entries may be walked this function
* will also not lock the PTEs for the pte_entry() callback. This is useful for
* walking the kernel pages tables or page tables for firmware.
+ *
+ * Note: Be careful to walk the kernel pages tables, the caller may be need to
+ * take other effective approache (mmap lock may be insufficient) to prevent
+ * the intermediate kernel page tables belonging to the specified address range
+ * from being freed (e.g. memory hot-remove).
*/
int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
@@ -556,7 +561,29 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
if (start >= end || !walk.mm)
return -EINVAL;
- mmap_assert_write_locked(walk.mm);
+ /*
+ * 1) For walking the user virtual address space:
+ *
+ * The mmap lock protects the page walker from changes to the page
+ * tables during the walk. However a read lock is insufficient to
+ * protect those areas which don't have a VMA as munmap() detaches
+ * the VMAs before downgrading to a read lock and actually tearing
+ * down PTEs/page tables. In which case, the mmap write lock should
+ * be hold.
+ *
+ * 2) For walking the kernel virtual address space:
+ *
+ * The kernel intermediate page tables usually do not be freed, so
+ * the mmap map read lock is sufficient. But there are some exceptions.
+ * E.g. memory hot-remove. In which case, the mmap lock is insufficient
+ * to prevent the intermediate kernel pages tables belonging to the
+ * specified address range from being freed. The caller should take
+ * other actions to prevent this race.
+ */
+ if (mm == &init_mm)
+ mmap_assert_locked(walk.mm);
+ else
+ mmap_assert_write_locked(walk.mm);
return walk_pgd_range(start, end, &walk);
}
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 0523edab03a6..b308e96cd05a 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -53,7 +53,10 @@ static int process_vm_rw_pages(struct page **pages,
}
/* Maximum number of pages kmalloc'd to hold struct page's during copy */
-#define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
+#define PVM_MAX_KMALLOC_PAGES 2
+
+/* Maximum number of pages that can be stored at a time */
+#define PVM_MAX_USER_PAGES (PVM_MAX_KMALLOC_PAGES * PAGE_SIZE / sizeof(struct page *))
/**
* process_vm_rw_single_vec - read/write pages from task specified
@@ -79,8 +82,6 @@ static int process_vm_rw_single_vec(unsigned long addr,
unsigned long start_offset = addr - pa;
unsigned long nr_pages;
ssize_t rc = 0;
- unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
- / sizeof(struct pages *);
unsigned int flags = 0;
/* Work out address and page range required */
@@ -92,7 +93,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
flags |= FOLL_WRITE;
while (!rc && nr_pages && iov_iter_count(iter)) {
- int pinned_pages = min(nr_pages, max_pages_per_loop);
+ int pinned_pages = min_t(unsigned long, nr_pages, PVM_MAX_USER_PAGES);
int locked = 1;
size_t bytes;
@@ -171,7 +172,7 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
iov_len = rvec[i].iov_len;
if (iov_len > 0) {
nr_pages_iov = ((unsigned long)rvec[i].iov_base
- + iov_len)
+ + iov_len - 1)
/ PAGE_SIZE - (unsigned long)rvec[i].iov_base
/ PAGE_SIZE + 1;
nr_pages = max(nr_pages, nr_pages_iov);
@@ -184,8 +185,8 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
/* For reliability don't try to kmalloc more than
2 pages worth */
- process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
- sizeof(struct pages *)*nr_pages),
+ process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES * PAGE_SIZE,
+ sizeof(struct page *)*nr_pages),
GFP_KERNEL);
if (!process_pages)
diff --git a/mm/readahead.c b/mm/readahead.c
index 6925e6959fd3..23620c57c122 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -511,16 +511,14 @@ void page_cache_ra_order(struct readahead_control *ractl,
unsigned int order = new_order;
/* Align with smaller pages if needed */
- if (index & ((1UL << order) - 1)) {
+ if (index & ((1UL << order) - 1))
order = __ffs(index);
- if (order == 1)
- order = 0;
- }
/* Don't allocate pages past EOF */
- while (index + (1UL << order) - 1 > limit) {
- if (--order == 1)
- order = 0;
- }
+ while (index + (1UL << order) - 1 > limit)
+ order--;
+ /* THP machinery does not support order-1 */
+ if (order == 1)
+ order = 0;
err = ra_alloc_folio(ractl, index, mark, order, gfp);
if (err)
break;
diff --git a/mm/rmap.c b/mm/rmap.c
index 7a27a2b41802..f5d43edad529 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -470,7 +470,7 @@ void __init anon_vma_init(void)
/*
* Getting a lock on a stable anon_vma from a page off the LRU is tricky!
*
- * Since there is no serialization what so ever against page_remove_rmap()
+ * Since there is no serialization what so ever against folio_remove_rmap_*()
* the best this function can do is return a refcount increased anon_vma
* that might have been relevant to this page.
*
@@ -487,9 +487,15 @@ void __init anon_vma_init(void)
* [ something equivalent to page_mapped_in_vma() ].
*
* Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
- * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
+ * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid
* if there is a mapcount, we can dereference the anon_vma after observing
* those.
+ *
+ * NOTE: the caller should normally hold folio lock when calling this. If
+ * not, the caller needs to double check the anon_vma didn't change after
+ * taking the anon_vma lock for either read or write (UFFDIO_MOVE can modify it
+ * concurrently without folio lock protection). See folio_lock_anon_vma_read()
+ * which has already covered that, and comment above remap_pages().
*/
struct anon_vma *folio_get_anon_vma(struct folio *folio)
{
@@ -542,6 +548,7 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
struct anon_vma *root_anon_vma;
unsigned long anon_mapping;
+retry:
rcu_read_lock();
anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
@@ -553,6 +560,17 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
root_anon_vma = READ_ONCE(anon_vma->root);
if (down_read_trylock(&root_anon_vma->rwsem)) {
/*
+ * folio_move_anon_rmap() might have changed the anon_vma as we
+ * might not hold the folio lock here.
+ */
+ if (unlikely((unsigned long)READ_ONCE(folio->mapping) !=
+ anon_mapping)) {
+ up_read(&root_anon_vma->rwsem);
+ rcu_read_unlock();
+ goto retry;
+ }
+
+ /*
* If the folio is still mapped, then this anon_vma is still
* its anon_vma, and holding the mutex ensures that it will
* not go away, see anon_vma_free().
@@ -586,6 +604,18 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
rcu_read_unlock();
anon_vma_lock_read(anon_vma);
+ /*
+ * folio_move_anon_rmap() might have changed the anon_vma as we might
+ * not hold the folio lock here.
+ */
+ if (unlikely((unsigned long)READ_ONCE(folio->mapping) !=
+ anon_mapping)) {
+ anon_vma_unlock_read(anon_vma);
+ put_anon_vma(anon_vma);
+ anon_vma = NULL;
+ goto retry;
+ }
+
if (atomic_dec_and_test(&anon_vma->refcount)) {
/*
* Oops, we held the last refcount, release the lock
@@ -1127,6 +1157,48 @@ int folio_total_mapcount(struct folio *folio)
return mapcount;
}
+static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
+ struct page *page, int nr_pages, enum rmap_level level,
+ int *nr_pmdmapped)
+{
+ atomic_t *mapped = &folio->_nr_pages_mapped;
+ int first, nr = 0;
+
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
+
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ do {
+ first = atomic_inc_and_test(&page->_mapcount);
+ if (first && folio_test_large(folio)) {
+ first = atomic_inc_return_relaxed(mapped);
+ first = (first < ENTIRELY_MAPPED);
+ }
+
+ if (first)
+ nr++;
+ } while (page++, --nr_pages > 0);
+ break;
+ case RMAP_LEVEL_PMD:
+ first = atomic_inc_and_test(&folio->_entire_mapcount);
+ if (first) {
+ nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped);
+ if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) {
+ *nr_pmdmapped = folio_nr_pages(folio);
+ nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
+ /* Raced ahead of a remove and another add? */
+ if (unlikely(nr < 0))
+ nr = 0;
+ } else {
+ /* Raced ahead of a remove of ENTIRELY_MAPPED */
+ nr = 0;
+ }
+ }
+ break;
+ }
+ return nr;
+}
+
/**
* folio_move_anon_rmap - move a folio to our anon_vma
* @folio: The folio to move to our anon_vma
@@ -1198,12 +1270,12 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page,
* The page's anon-rmap details (mapping and index) are guaranteed to
* be set up correctly at this point.
*
- * We have exclusion against page_add_anon_rmap because the caller
+ * We have exclusion against folio_add_anon_rmap_*() because the caller
* always holds the page locked.
*
- * We have exclusion against page_add_new_anon_rmap because those pages
+ * We have exclusion against folio_add_new_anon_rmap because those pages
* are initially only visible via the pagetables, and the pte is locked
- * over the call to page_add_new_anon_rmap.
+ * over the call to folio_add_new_anon_rmap.
*/
VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
folio);
@@ -1211,54 +1283,13 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page,
page);
}
-/**
- * page_add_anon_rmap - add pte mapping to an anonymous page
- * @page: the page to add the mapping to
- * @vma: the vm area in which the mapping is added
- * @address: the user virtual address mapped
- * @flags: the rmap flags
- *
- * The caller needs to hold the pte lock, and the page must be locked in
- * the anon_vma case: to serialize mapping,index checking after setting,
- * and to ensure that PageAnon is not being upgraded racily to PageKsm
- * (but PageKsm is never downgraded to PageAnon).
- */
-void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
- unsigned long address, rmap_t flags)
+static __always_inline void __folio_add_anon_rmap(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *vma,
+ unsigned long address, rmap_t flags, enum rmap_level level)
{
- struct folio *folio = page_folio(page);
- atomic_t *mapped = &folio->_nr_pages_mapped;
- int nr = 0, nr_pmdmapped = 0;
- bool compound = flags & RMAP_COMPOUND;
- bool first;
-
- /* Is page being mapped by PTE? Is this its first map to be added? */
- if (likely(!compound)) {
- first = atomic_inc_and_test(&page->_mapcount);
- nr = first;
- if (first && folio_test_large(folio)) {
- nr = atomic_inc_return_relaxed(mapped);
- nr = (nr < COMPOUND_MAPPED);
- }
- } else if (folio_test_pmd_mappable(folio)) {
- /* That test is redundant: it's for safety or to optimize out */
-
- first = atomic_inc_and_test(&folio->_entire_mapcount);
- if (first) {
- nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
- if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
- nr_pmdmapped = folio_nr_pages(folio);
- nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
- /* Raced ahead of a remove and another add? */
- if (unlikely(nr < 0))
- nr = 0;
- } else {
- /* Raced ahead of a remove of COMPOUND_MAPPED */
- nr = 0;
- }
- }
- }
+ int i, nr, nr_pmdmapped = 0;
+ nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
if (nr_pmdmapped)
__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped);
if (nr)
@@ -1272,18 +1303,34 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
* folio->index right when not given the address of the head
* page.
*/
- VM_WARN_ON_FOLIO(folio_test_large(folio) && !compound, folio);
+ VM_WARN_ON_FOLIO(folio_test_large(folio) &&
+ level != RMAP_LEVEL_PMD, folio);
__folio_set_anon(folio, vma, address,
!!(flags & RMAP_EXCLUSIVE));
} else if (likely(!folio_test_ksm(folio))) {
__page_check_anon_rmap(folio, page, vma, address);
}
- if (flags & RMAP_EXCLUSIVE)
- SetPageAnonExclusive(page);
- /* While PTE-mapping a THP we have a PMD and a PTE mapping. */
- VM_WARN_ON_FOLIO((atomic_read(&page->_mapcount) > 0 ||
- (folio_test_large(folio) && folio_entire_mapcount(folio) > 1)) &&
- PageAnonExclusive(page), folio);
+
+ if (flags & RMAP_EXCLUSIVE) {
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ for (i = 0; i < nr_pages; i++)
+ SetPageAnonExclusive(page + i);
+ break;
+ case RMAP_LEVEL_PMD:
+ SetPageAnonExclusive(page);
+ break;
+ }
+ }
+ for (i = 0; i < nr_pages; i++) {
+ struct page *cur_page = page + i;
+
+ /* While PTE-mapping a THP we have a PMD and a PTE mapping. */
+ VM_WARN_ON_FOLIO((atomic_read(&cur_page->_mapcount) > 0 ||
+ (folio_test_large(folio) &&
+ folio_entire_mapcount(folio) > 1)) &&
+ PageAnonExclusive(cur_page), folio);
+ }
/*
* For large folio, only mlock it if it's fully mapped to VMA. It's
@@ -1296,182 +1343,200 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
}
/**
+ * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio
+ * @folio: The folio to add the mappings to
+ * @page: The first page to add
+ * @nr_pages: The number of pages which will be mapped
+ * @vma: The vm area in which the mappings are added
+ * @address: The user virtual address of the first page to map
+ * @flags: The rmap flags
+ *
+ * The page range of folio is defined by [first_page, first_page + nr_pages)
+ *
+ * The caller needs to hold the page table lock, and the page must be locked in
+ * the anon_vma case: to serialize mapping,index checking after setting,
+ * and to ensure that an anon folio is not being upgraded racily to a KSM folio
+ * (but KSM folios are never downgraded).
+ */
+void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page,
+ int nr_pages, struct vm_area_struct *vma, unsigned long address,
+ rmap_t flags)
+{
+ __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags,
+ RMAP_LEVEL_PTE);
+}
+
+/**
+ * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio
+ * @folio: The folio to add the mapping to
+ * @page: The first page to add
+ * @vma: The vm area in which the mapping is added
+ * @address: The user virtual address of the first page to map
+ * @flags: The rmap flags
+ *
+ * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock, and the page must be locked in
+ * the anon_vma case: to serialize mapping,index checking after setting.
+ */
+void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page,
+ struct vm_area_struct *vma, unsigned long address, rmap_t flags)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags,
+ RMAP_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+#endif
+}
+
+/**
* folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
* @folio: The folio to add the mapping to.
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*
- * Like page_add_anon_rmap() but must only be called on *new* folios.
+ * Like folio_add_anon_rmap_*() but must only be called on *new* folios.
* This means the inc-and-test can be bypassed.
* The folio does not have to be locked.
*
- * If the folio is large, it is accounted as a THP. As the folio
+ * If the folio is pmd-mappable, it is accounted as a THP. As the folio
* is new, it's assumed to be mapped exclusively by a single process.
*/
void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
unsigned long address)
{
- int nr;
+ int nr = folio_nr_pages(folio);
- VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
+ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
+ VM_BUG_ON_VMA(address < vma->vm_start ||
+ address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
__folio_set_swapbacked(folio);
+ __folio_set_anon(folio, vma, address, true);
- if (likely(!folio_test_pmd_mappable(folio))) {
+ if (likely(!folio_test_large(folio))) {
/* increment count (starts at -1) */
atomic_set(&folio->_mapcount, 0);
- nr = 1;
+ SetPageAnonExclusive(&folio->page);
+ } else if (!folio_test_pmd_mappable(folio)) {
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ struct page *page = folio_page(folio, i);
+
+ /* increment count (starts at -1) */
+ atomic_set(&page->_mapcount, 0);
+ SetPageAnonExclusive(page);
+ }
+
+ atomic_set(&folio->_nr_pages_mapped, nr);
} else {
/* increment count (starts at -1) */
atomic_set(&folio->_entire_mapcount, 0);
- atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED);
- nr = folio_nr_pages(folio);
+ atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED);
+ SetPageAnonExclusive(&folio->page);
__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr);
}
__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
- __folio_set_anon(folio, vma, address, true);
- SetPageAnonExclusive(&folio->page);
}
-/**
- * folio_add_file_rmap_range - add pte mapping to page range of a folio
- * @folio: The folio to add the mapping to
- * @page: The first page to add
- * @nr_pages: The number of pages which will be mapped
- * @vma: the vm area in which the mapping is added
- * @compound: charge the page as compound or small page
- *
- * The page range of folio is defined by [first_page, first_page + nr_pages)
- *
- * The caller needs to hold the pte lock.
- */
-void folio_add_file_rmap_range(struct folio *folio, struct page *page,
- unsigned int nr_pages, struct vm_area_struct *vma,
- bool compound)
+static __always_inline void __folio_add_file_rmap(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *vma,
+ enum rmap_level level)
{
- atomic_t *mapped = &folio->_nr_pages_mapped;
- unsigned int nr_pmdmapped = 0, first;
- int nr = 0;
-
- VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio);
-
- /* Is page being mapped by PTE? Is this its first map to be added? */
- if (likely(!compound)) {
- do {
- first = atomic_inc_and_test(&page->_mapcount);
- if (first && folio_test_large(folio)) {
- first = atomic_inc_return_relaxed(mapped);
- first = (first < COMPOUND_MAPPED);
- }
-
- if (first)
- nr++;
- } while (page++, --nr_pages > 0);
- } else if (folio_test_pmd_mappable(folio)) {
- /* That test is redundant: it's for safety or to optimize out */
+ int nr, nr_pmdmapped = 0;
- first = atomic_inc_and_test(&folio->_entire_mapcount);
- if (first) {
- nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
- if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
- nr_pmdmapped = folio_nr_pages(folio);
- nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
- /* Raced ahead of a remove and another add? */
- if (unlikely(nr < 0))
- nr = 0;
- } else {
- /* Raced ahead of a remove of COMPOUND_MAPPED */
- nr = 0;
- }
- }
- }
+ VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
+ nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
if (nr_pmdmapped)
__lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ?
NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
if (nr)
__lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr);
- /* See comments in page_add_anon_rmap() */
+ /* See comments in folio_add_anon_rmap_*() */
if (!folio_test_large(folio))
mlock_vma_folio(folio, vma);
}
/**
- * page_add_file_rmap - add pte mapping to a file page
- * @page: the page to add the mapping to
- * @vma: the vm area in which the mapping is added
- * @compound: charge the page as compound or small page
+ * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio
+ * @folio: The folio to add the mappings to
+ * @page: The first page to add
+ * @nr_pages: The number of pages that will be mapped using PTEs
+ * @vma: The vm area in which the mappings are added
+ *
+ * The page range of the folio is defined by [page, page + nr_pages)
*
- * The caller needs to hold the pte lock.
+ * The caller needs to hold the page table lock.
*/
-void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
- bool compound)
+void folio_add_file_rmap_ptes(struct folio *folio, struct page *page,
+ int nr_pages, struct vm_area_struct *vma)
{
- struct folio *folio = page_folio(page);
- unsigned int nr_pages;
-
- VM_WARN_ON_ONCE_PAGE(compound && !PageTransHuge(page), page);
-
- if (likely(!compound))
- nr_pages = 1;
- else
- nr_pages = folio_nr_pages(folio);
-
- folio_add_file_rmap_range(folio, page, nr_pages, vma, compound);
+ __folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE);
}
/**
- * page_remove_rmap - take down pte mapping from a page
- * @page: page to remove mapping from
- * @vma: the vm area from which the mapping is removed
- * @compound: uncharge the page as compound or small page
+ * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio
+ * @folio: The folio to add the mapping to
+ * @page: The first page to add
+ * @vma: The vm area in which the mapping is added
*
- * The caller needs to hold the pte lock.
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock.
*/
-void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
- bool compound)
+void folio_add_file_rmap_pmd(struct folio *folio, struct page *page,
+ struct vm_area_struct *vma)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+#endif
+}
+
+static __always_inline void __folio_remove_rmap(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *vma,
+ enum rmap_level level)
{
- struct folio *folio = page_folio(page);
atomic_t *mapped = &folio->_nr_pages_mapped;
- int nr = 0, nr_pmdmapped = 0;
- bool last;
+ int last, nr = 0, nr_pmdmapped = 0;
enum node_stat_item idx;
- VM_BUG_ON_PAGE(compound && !PageHead(page), page);
-
- /* Hugetlb pages are not counted in NR_*MAPPED */
- if (unlikely(folio_test_hugetlb(folio))) {
- /* hugetlb pages are always mapped with pmds */
- atomic_dec(&folio->_entire_mapcount);
- return;
- }
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
- /* Is page being unmapped by PTE? Is this its last map to be removed? */
- if (likely(!compound)) {
- last = atomic_add_negative(-1, &page->_mapcount);
- nr = last;
- if (last && folio_test_large(folio)) {
- nr = atomic_dec_return_relaxed(mapped);
- nr = (nr < COMPOUND_MAPPED);
- }
- } else if (folio_test_pmd_mappable(folio)) {
- /* That test is redundant: it's for safety or to optimize out */
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ do {
+ last = atomic_add_negative(-1, &page->_mapcount);
+ if (last && folio_test_large(folio)) {
+ last = atomic_dec_return_relaxed(mapped);
+ last = (last < ENTIRELY_MAPPED);
+ }
+ if (last)
+ nr++;
+ } while (page++, --nr_pages > 0);
+ break;
+ case RMAP_LEVEL_PMD:
last = atomic_add_negative(-1, &folio->_entire_mapcount);
if (last) {
- nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped);
- if (likely(nr < COMPOUND_MAPPED)) {
+ nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped);
+ if (likely(nr < ENTIRELY_MAPPED)) {
nr_pmdmapped = folio_nr_pages(folio);
nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
/* Raced ahead of another remove and an add? */
if (unlikely(nr < 0))
nr = 0;
} else {
- /* An add of COMPOUND_MAPPED raced ahead */
+ /* An add of ENTIRELY_MAPPED raced ahead */
nr = 0;
}
}
+ break;
}
if (nr_pmdmapped) {
@@ -1488,18 +1553,18 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
__lruvec_stat_mod_folio(folio, idx, -nr);
/*
- * Queue anon THP for deferred split if at least one
+ * Queue anon large folio for deferred split if at least one
* page of the folio is unmapped and at least one page
* is still mapped.
*/
- if (folio_test_pmd_mappable(folio) && folio_test_anon(folio))
- if (!compound || nr < nr_pmdmapped)
+ if (folio_test_large(folio) && folio_test_anon(folio))
+ if (level == RMAP_LEVEL_PTE || nr < nr_pmdmapped)
deferred_split_folio(folio);
}
/*
* It would be tidy to reset folio_test_anon mapping when fully
- * unmapped, but that might overwrite a racing page_add_anon_rmap
+ * unmapped, but that might overwrite a racing folio_add_anon_rmap_*()
* which increments mapcount after us but sets mapping before us:
* so leave the reset to free_pages_prepare, and remember that
* it's only reliable while mapped.
@@ -1508,6 +1573,43 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
munlock_vma_folio(folio, vma);
}
+/**
+ * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio
+ * @folio: The folio to remove the mappings from
+ * @page: The first page to remove
+ * @nr_pages: The number of pages that will be removed from the mapping
+ * @vma: The vm area from which the mappings are removed
+ *
+ * The page range of the folio is defined by [page, page + nr_pages)
+ *
+ * The caller needs to hold the page table lock.
+ */
+void folio_remove_rmap_ptes(struct folio *folio, struct page *page,
+ int nr_pages, struct vm_area_struct *vma)
+{
+ __folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE);
+}
+
+/**
+ * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio
+ * @folio: The folio to remove the mapping from
+ * @page: The first page to remove
+ * @vma: The vm area from which the mapping is removed
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock.
+ */
+void folio_remove_rmap_pmd(struct folio *folio, struct page *page,
+ struct vm_area_struct *vma)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+#endif
+}
+
/*
* @arg: enum ttu_flags will be passed to this argument
*/
@@ -1526,7 +1628,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
/*
* When racing against e.g. zap_pte_range() on another cpu,
- * in between its ptep_get_and_clear_full() and page_remove_rmap(),
+ * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
* try_to_unmap() may return before page_mapped() has become false,
* if page table locking is skipped: use TTU_SYNC to wait for that.
*/
@@ -1764,9 +1866,9 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
break;
}
- /* See page_try_share_anon_rmap(): clear PTE first. */
+ /* See folio_try_share_anon_rmap(): clear PTE first. */
if (anon_exclusive &&
- page_try_share_anon_rmap(subpage)) {
+ folio_try_share_anon_rmap_pte(folio, subpage)) {
swap_free(entry);
set_pte_at(mm, address, pvmw.pte, pteval);
ret = false;
@@ -1804,7 +1906,10 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
dec_mm_counter(mm, mm_counter_file(&folio->page));
}
discard:
- page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
+ if (unlikely(folio_test_hugetlb(folio)))
+ hugetlb_remove_rmap(folio);
+ else
+ folio_remove_rmap_pte(folio, subpage, vma);
if (vma->vm_flags & VM_LOCKED)
mlock_drain_local();
folio_put(folio);
@@ -1872,7 +1977,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
/*
* When racing against e.g. zap_pte_range() on another cpu,
- * in between its ptep_get_and_clear_full() and page_remove_rmap(),
+ * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
* try_to_migrate() may return before page_mapped() has become false,
* if page table locking is skipped: use TTU_SYNC to wait for that.
*/
@@ -2037,7 +2142,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
pte_t swp_pte;
if (anon_exclusive)
- BUG_ON(page_try_share_anon_rmap(subpage));
+ WARN_ON_ONCE(folio_try_share_anon_rmap_pte(folio,
+ subpage));
/*
* Store the pfn of the page in a special migration
@@ -2108,14 +2214,19 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) &&
!anon_exclusive, subpage);
- /* See page_try_share_anon_rmap(): clear PTE first. */
- if (anon_exclusive &&
- page_try_share_anon_rmap(subpage)) {
- if (folio_test_hugetlb(folio))
+ /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
+ if (folio_test_hugetlb(folio)) {
+ if (anon_exclusive &&
+ hugetlb_try_share_anon_rmap(folio)) {
set_huge_pte_at(mm, address, pvmw.pte,
pteval, hsz);
- else
- set_pte_at(mm, address, pvmw.pte, pteval);
+ ret = false;
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+ }
+ } else if (anon_exclusive &&
+ folio_try_share_anon_rmap_pte(folio, subpage)) {
+ set_pte_at(mm, address, pvmw.pte, pteval);
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
@@ -2157,7 +2268,10 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
*/
}
- page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
+ if (unlikely(folio_test_hugetlb(folio)))
+ hugetlb_remove_rmap(folio);
+ else
+ folio_remove_rmap_pte(folio, subpage, vma);
if (vma->vm_flags & VM_LOCKED)
mlock_drain_local();
folio_put(folio);
@@ -2296,7 +2410,7 @@ static bool page_make_device_exclusive_one(struct folio *folio,
* There is a reference on the page for the swap entry which has
* been removed, so shouldn't take another.
*/
- page_remove_rmap(subpage, vma, false);
+ folio_remove_rmap_pte(folio, subpage, vma);
}
mmu_notifier_invalidate_range_end(&range);
@@ -2580,12 +2694,11 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
* The following two functions are for anonymous (private mapped) hugepages.
* Unlike common anonymous pages, anonymous hugepages have no accounting code
* and no lru code, because we handle hugepages differently from common pages.
- *
- * RMAP_COMPOUND is ignored.
*/
-void hugepage_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
- unsigned long address, rmap_t flags)
+void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
+ unsigned long address, rmap_t flags)
{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
atomic_inc(&folio->_entire_mapcount);
@@ -2595,9 +2708,11 @@ void hugepage_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
PageAnonExclusive(&folio->page), folio);
}
-void hugepage_add_new_anon_rmap(struct folio *folio,
- struct vm_area_struct *vma, unsigned long address)
+void hugetlb_add_new_anon_rmap(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long address)
{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
/* increment count (starts at -1) */
atomic_set(&folio->_entire_mapcount, 0);
diff --git a/mm/shmem.c b/mm/shmem.c
index 0d1ce70bce38..928aa2304932 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1514,8 +1514,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
mutex_unlock(&shmem_swaplist_mutex);
BUG_ON(folio_mapped(folio));
- swap_writepage(&folio->page, wbc);
- return 0;
+ return swap_writepage(&folio->page, wbc);
}
mutex_unlock(&shmem_swaplist_mutex);
@@ -1570,15 +1569,13 @@ static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
{
struct mempolicy *mpol;
pgoff_t ilx;
- struct page *page;
+ struct folio *folio;
mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
- page = swap_cluster_readahead(swap, gfp, mpol, ilx);
+ folio = swap_cluster_readahead(swap, gfp, mpol, ilx);
mpol_cond_put(mpol);
- if (!page)
- return NULL;
- return page_folio(page);
+ return folio;
}
/*
@@ -4462,8 +4459,8 @@ static void __init shmem_destroy_inodecache(void)
}
/* Keep the page in page cache instead of truncating it */
-static int shmem_error_remove_page(struct address_space *mapping,
- struct page *page)
+static int shmem_error_remove_folio(struct address_space *mapping,
+ struct folio *folio)
{
return 0;
}
@@ -4478,7 +4475,7 @@ const struct address_space_operations shmem_aops = {
#ifdef CONFIG_MIGRATION
.migrate_folio = migrate_folio,
#endif
- .error_remove_page = shmem_error_remove_page,
+ .error_remove_folio = shmem_error_remove_folio,
};
EXPORT_SYMBOL(shmem_aops);
diff --git a/mm/show_mem.c b/mm/show_mem.c
index ba0808d6917f..8dcfafbd283c 100644
--- a/mm/show_mem.c
+++ b/mm/show_mem.c
@@ -352,8 +352,8 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
for_each_populated_zone(zone) {
unsigned int order;
- unsigned long nr[MAX_ORDER + 1], flags, total = 0;
- unsigned char types[MAX_ORDER + 1];
+ unsigned long nr[NR_PAGE_ORDERS], flags, total = 0;
+ unsigned char types[NR_PAGE_ORDERS];
if (zone_idx(zone) > max_zone_idx)
continue;
@@ -363,7 +363,7 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
printk(KERN_CONT "%s: ", zone->name);
spin_lock_irqsave(&zone->lock, flags);
- for (order = 0; order <= MAX_ORDER; order++) {
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct free_area *area = &zone->free_area[order];
int type;
@@ -377,7 +377,7 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
}
}
spin_unlock_irqrestore(&zone->lock, flags);
- for (order = 0; order <= MAX_ORDER; order++) {
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
printk(KERN_CONT "%lu*%lukB ",
nr[order], K(1UL) << order);
if (nr[order])
diff --git a/mm/shuffle.h b/mm/shuffle.h
index a6bdf54f96f1..61bbcddeeee6 100644
--- a/mm/shuffle.h
+++ b/mm/shuffle.h
@@ -4,7 +4,7 @@
#define _MM_SHUFFLE_H
#include <linux/jump_label.h>
-#define SHUFFLE_ORDER MAX_ORDER
+#define SHUFFLE_ORDER MAX_PAGE_ORDER
#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR
DECLARE_STATIC_KEY_FALSE(page_alloc_shuffle_key);
diff --git a/mm/slub.c b/mm/slub.c
index fac07382d3a6..2ef88bbf56a3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -975,20 +975,20 @@ static inline void set_orig_size(struct kmem_cache *s,
void *object, unsigned int orig_size)
{
void *p = kasan_reset_tag(object);
+ unsigned int kasan_meta_size;
if (!slub_debug_orig_size(s))
return;
-#ifdef CONFIG_KASAN_GENERIC
/*
- * KASAN could save its free meta data in object's data area at
- * offset 0, if the size is larger than 'orig_size', it will
- * overlap the data redzone in [orig_size+1, object_size], and
- * the check should be skipped.
+ * KASAN can save its free meta data inside of the object at offset 0.
+ * If this meta data size is larger than 'orig_size', it will overlap
+ * the data redzone in [orig_size+1, object_size]. Thus, we adjust
+ * 'orig_size' to be as at least as big as KASAN's meta data.
*/
- if (kasan_metadata_size(s, true) > orig_size)
- orig_size = s->object_size;
-#endif
+ kasan_meta_size = kasan_metadata_size(s, true);
+ if (kasan_meta_size > orig_size)
+ orig_size = kasan_meta_size;
p += get_info_end(s);
p += sizeof(struct track) * 2;
@@ -1297,7 +1297,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
{
u8 *p = object;
u8 *endobject = object + s->object_size;
- unsigned int orig_size;
+ unsigned int orig_size, kasan_meta_size;
if (s->flags & SLAB_RED_ZONE) {
if (!check_bytes_and_report(s, slab, object, "Left Redzone",
@@ -1327,12 +1327,23 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
}
if (s->flags & SLAB_POISON) {
- if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
- (!check_bytes_and_report(s, slab, p, "Poison", p,
- POISON_FREE, s->object_size - 1) ||
- !check_bytes_and_report(s, slab, p, "End Poison",
- p + s->object_size - 1, POISON_END, 1)))
- return 0;
+ if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) {
+ /*
+ * KASAN can save its free meta data inside of the
+ * object at offset 0. Thus, skip checking the part of
+ * the redzone that overlaps with the meta data.
+ */
+ kasan_meta_size = kasan_metadata_size(s, true);
+ if (kasan_meta_size < s->object_size - 1 &&
+ !check_bytes_and_report(s, slab, p, "Poison",
+ p + kasan_meta_size, POISON_FREE,
+ s->object_size - kasan_meta_size - 1))
+ return 0;
+ if (kasan_meta_size < s->object_size &&
+ !check_bytes_and_report(s, slab, p, "End Poison",
+ p + s->object_size - 1, POISON_END, 1))
+ return 0;
+ }
/*
* check_pad_bytes cleans up on its own.
*/
@@ -2159,9 +2170,9 @@ static void *setup_object(struct kmem_cache *s, void *object)
setup_object_debug(s, object);
object = kasan_init_slab_obj(s, object);
if (unlikely(s->ctor)) {
- kasan_unpoison_object_data(s, object);
+ kasan_unpoison_new_object(s, object);
s->ctor(object);
- kasan_poison_object_data(s, object);
+ kasan_poison_new_object(s, object);
}
return object;
}
@@ -2176,11 +2187,7 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
struct slab *slab;
unsigned int order = oo_order(oo);
- if (node == NUMA_NO_NODE)
- folio = (struct folio *)alloc_pages(flags, order);
- else
- folio = (struct folio *)__alloc_pages_node(node, flags, order);
-
+ folio = (struct folio *)alloc_pages_node(node, flags, order);
if (!folio)
return NULL;
@@ -3908,7 +3915,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
*/
static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
{
- struct page *page;
+ struct folio *folio;
void *ptr = NULL;
unsigned int order = get_order(size);
@@ -3916,10 +3923,10 @@ static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
flags = kmalloc_fix_flags(flags);
flags |= __GFP_COMP;
- page = alloc_pages_node(node, flags, order);
- if (page) {
- ptr = page_address(page);
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
+ folio = (struct folio *)alloc_pages_node(node, flags, order);
+ if (folio) {
+ ptr = folio_address(folio);
+ lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
PAGE_SIZE << order);
}
@@ -4368,9 +4375,9 @@ static void free_large_kmalloc(struct folio *folio, void *object)
kasan_kfree_large(object);
kmsan_kfree_large(object);
- mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
+ lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
-(PAGE_SIZE << order));
- __free_pages(folio_page(folio, 0), order);
+ folio_put(folio);
}
/**
@@ -4783,7 +4790,7 @@ static inline int calculate_order(unsigned int size)
* Doh this slab cannot be placed using slub_max_order.
*/
order = get_order(size);
- if (order <= MAX_ORDER)
+ if (order <= MAX_PAGE_ORDER)
return order;
return -ENOSYS;
}
@@ -5311,7 +5318,7 @@ __setup("slub_min_order=", setup_slub_min_order);
static int __init setup_slub_max_order(char *str)
{
get_option(&str, (int *)&slub_max_order);
- slub_max_order = min_t(unsigned int, slub_max_order, MAX_ORDER);
+ slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER);
if (slub_min_order > slub_max_order)
slub_min_order = slub_max_order;
diff --git a/mm/sparse.c b/mm/sparse.c
index 77d91e565045..338cf946dee8 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -792,6 +792,13 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
unsigned long section_nr = pfn_to_section_nr(pfn);
/*
+ * Mark the section invalid so that valid_section()
+ * return false. This prevents code from dereferencing
+ * ms->usage array.
+ */
+ ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
+
+ /*
* When removing an early section, the usage map is kept (as the
* usage maps of other sections fall into the same page). It
* will be re-used when re-adding the section - which is then no
@@ -799,16 +806,10 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
* was allocated during boot.
*/
if (!PageReserved(virt_to_page(ms->usage))) {
- kfree(ms->usage);
- ms->usage = NULL;
+ kfree_rcu(ms->usage, rcu);
+ WRITE_ONCE(ms->usage, NULL);
}
memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
- /*
- * Mark the section invalid so that valid_section()
- * return false. This prevents code from dereferencing
- * ms->usage array.
- */
- ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
}
/*
diff --git a/mm/swap.h b/mm/swap.h
index 73c332ee4d91..758c46ca671e 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -10,7 +10,8 @@ struct mempolicy;
/* linux/mm/page_io.c */
int sio_pool_init(void);
struct swap_iocb;
-void swap_readpage(struct page *page, bool do_poll, struct swap_iocb **plug);
+void swap_read_folio(struct folio *folio, bool do_poll,
+ struct swap_iocb **plug);
void __swap_read_unplug(struct swap_iocb *plug);
static inline void swap_read_unplug(struct swap_iocb *plug)
{
@@ -19,7 +20,7 @@ static inline void swap_read_unplug(struct swap_iocb *plug)
}
void swap_write_unplug(struct swap_iocb *sio);
int swap_writepage(struct page *page, struct writeback_control *wbc);
-void __swap_writepage(struct page *page, struct writeback_control *wbc);
+void __swap_writepage(struct folio *folio, struct writeback_control *wbc);
/* linux/mm/swap_state.c */
/* One swap address space for each 64M swap space */
@@ -45,25 +46,24 @@ struct folio *swap_cache_get_folio(swp_entry_t entry,
struct folio *filemap_get_incore_folio(struct address_space *mapping,
pgoff_t index);
-struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_area_struct *vma,
- unsigned long addr,
- struct swap_iocb **plug);
-struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
- struct mempolicy *mpol, pgoff_t ilx,
- bool *new_page_allocated);
-struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
- struct mempolicy *mpol, pgoff_t ilx);
+struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+ struct vm_area_struct *vma, unsigned long addr,
+ struct swap_iocb **plug);
+struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
+ struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
+ bool skip_if_exists);
+struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
+ struct mempolicy *mpol, pgoff_t ilx);
struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
struct vm_fault *vmf);
static inline unsigned int folio_swap_flags(struct folio *folio)
{
- return page_swap_info(&folio->page)->flags;
+ return swp_swap_info(folio->swap)->flags;
}
#else /* CONFIG_SWAP */
struct swap_iocb;
-static inline void swap_readpage(struct page *page, bool do_poll,
+static inline void swap_read_folio(struct folio *folio, bool do_poll,
struct swap_iocb **plug)
{
}
@@ -80,7 +80,7 @@ static inline void show_swap_cache_info(void)
{
}
-static inline struct page *swap_cluster_readahead(swp_entry_t entry,
+static inline struct folio *swap_cluster_readahead(swp_entry_t entry,
gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx)
{
return NULL;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 85d9e5806a6a..e671266ad772 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -410,13 +410,12 @@ struct folio *filemap_get_incore_folio(struct address_space *mapping,
return folio;
}
-struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
- struct mempolicy *mpol, pgoff_t ilx,
- bool *new_page_allocated)
+struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+ struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
+ bool skip_if_exists)
{
struct swap_info_struct *si;
struct folio *folio;
- struct page *page;
void *shadow = NULL;
*new_page_allocated = false;
@@ -433,10 +432,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
*/
folio = filemap_get_folio(swap_address_space(entry),
swp_offset(entry));
- if (!IS_ERR(folio)) {
- page = folio_file_page(folio, swp_offset(entry));
- goto got_page;
- }
+ if (!IS_ERR(folio))
+ goto got_folio;
/*
* Just skip read ahead for unused swap slot.
@@ -450,7 +447,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
goto fail_put_swap;
/*
- * Get a new page to read into from swap. Allocate it now,
+ * Get a new folio to read into from swap. Allocate it now,
* before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
* cause any racers to loop around until we add it to cache.
*/
@@ -471,17 +468,28 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
goto fail_put_swap;
/*
+ * Protect against a recursive call to __read_swap_cache_async()
+ * on the same entry waiting forever here because SWAP_HAS_CACHE
+ * is set but the folio is not the swap cache yet. This can
+ * happen today if mem_cgroup_swapin_charge_folio() below
+ * triggers reclaim through zswap, which may call
+ * __read_swap_cache_async() in the writeback path.
+ */
+ if (skip_if_exists)
+ goto fail_put_swap;
+
+ /*
* We might race against __delete_from_swap_cache(), and
* stumble across a swap_map entry whose SWAP_HAS_CACHE
* has not yet been cleared. Or race against another
* __read_swap_cache_async(), which has set SWAP_HAS_CACHE
- * in swap_map, but not yet added its page to swap cache.
+ * in swap_map, but not yet added its folio to swap cache.
*/
schedule_timeout_uninterruptible(1);
}
/*
- * The swap entry is ours to swap in. Prepare the new page.
+ * The swap entry is ours to swap in. Prepare the new folio.
*/
__folio_set_locked(folio);
@@ -502,10 +510,9 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
/* Caller will initiate read into locked folio */
folio_add_lru(folio);
*new_page_allocated = true;
- page = &folio->page;
-got_page:
+got_folio:
put_swap_device(si);
- return page;
+ return folio;
fail_unlock:
put_swap_folio(folio, entry);
@@ -523,26 +530,26 @@ fail_put_swap:
* the swap entry is no longer in use.
*
* get/put_swap_device() aren't needed to call this function, because
- * __read_swap_cache_async() call them and swap_readpage() holds the
+ * __read_swap_cache_async() call them and swap_read_folio() holds the
* swap cache folio lock.
*/
-struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_area_struct *vma,
- unsigned long addr, struct swap_iocb **plug)
+struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+ struct vm_area_struct *vma, unsigned long addr,
+ struct swap_iocb **plug)
{
bool page_allocated;
struct mempolicy *mpol;
pgoff_t ilx;
- struct page *page;
+ struct folio *folio;
mpol = get_vma_policy(vma, addr, 0, &ilx);
- page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
- &page_allocated);
+ folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
+ &page_allocated, false);
mpol_cond_put(mpol);
if (page_allocated)
- swap_readpage(page, false, plug);
- return page;
+ swap_read_folio(folio, false, plug);
+ return folio;
}
static unsigned int __swapin_nr_pages(unsigned long prev_offset,
@@ -613,7 +620,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
* @mpol: NUMA memory allocation policy to be applied
* @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
*
- * Returns the struct page for entry and addr, after queueing swapin.
+ * Returns the struct folio for entry and addr, after queueing swapin.
*
* Primitive swap readahead code. We simply read an aligned block of
* (1 << page_cluster) entries in the swap area. This method is chosen
@@ -624,10 +631,10 @@ static unsigned long swapin_nr_pages(unsigned long offset)
* are used for every page of the readahead: neighbouring pages on swap
* are fairly likely to have been swapped out from the same node.
*/
-struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
+struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct mempolicy *mpol, pgoff_t ilx)
{
- struct page *page;
+ struct folio *folio;
unsigned long entry_offset = swp_offset(entry);
unsigned long offset = entry_offset;
unsigned long start_offset, end_offset;
@@ -652,30 +659,31 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
blk_start_plug(&plug);
for (offset = start_offset; offset <= end_offset ; offset++) {
/* Ok, do the async read-ahead now */
- page = __read_swap_cache_async(
+ folio = __read_swap_cache_async(
swp_entry(swp_type(entry), offset),
- gfp_mask, mpol, ilx, &page_allocated);
- if (!page)
+ gfp_mask, mpol, ilx, &page_allocated, false);
+ if (!folio)
continue;
if (page_allocated) {
- swap_readpage(page, false, &splug);
+ swap_read_folio(folio, false, &splug);
if (offset != entry_offset) {
- SetPageReadahead(page);
+ folio_set_readahead(folio);
count_vm_event(SWAP_RA);
}
}
- put_page(page);
+ folio_put(folio);
}
blk_finish_plug(&plug);
swap_read_unplug(splug);
lru_add_drain(); /* Push any new pages onto the LRU now */
skip:
/* The page was likely read above, so no need for plugging here */
- page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
- &page_allocated);
+ folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
+ &page_allocated, false);
if (unlikely(page_allocated))
- swap_readpage(page, false, NULL);
- return page;
+ swap_read_folio(folio, false, NULL);
+ zswap_folio_swapin(folio);
+ return folio;
}
int init_swap_address_space(unsigned int type, unsigned long nr_pages)
@@ -779,7 +787,7 @@ static void swap_ra_info(struct vm_fault *vmf,
* @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
* @vmf: fault information
*
- * Returns the struct page for entry and addr, after queueing swapin.
+ * Returns the struct folio for entry and addr, after queueing swapin.
*
* Primitive swap readahead code. We simply read in a few pages whose
* virtual addresses are around the fault address in the same vma.
@@ -787,13 +795,12 @@ static void swap_ra_info(struct vm_fault *vmf,
* Caller must hold read mmap_lock if vmf->vma is not NULL.
*
*/
-static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
- struct mempolicy *mpol, pgoff_t targ_ilx,
- struct vm_fault *vmf)
+static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
+ struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
{
struct blk_plug plug;
struct swap_iocb *splug = NULL;
- struct page *page;
+ struct folio *folio;
pte_t *pte = NULL, pentry;
unsigned long addr;
swp_entry_t entry;
@@ -826,18 +833,18 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
continue;
pte_unmap(pte);
pte = NULL;
- page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
- &page_allocated);
- if (!page)
+ folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
+ &page_allocated, false);
+ if (!folio)
continue;
if (page_allocated) {
- swap_readpage(page, false, &splug);
+ swap_read_folio(folio, false, &splug);
if (i != ra_info.offset) {
- SetPageReadahead(page);
+ folio_set_readahead(folio);
count_vm_event(SWAP_RA);
}
}
- put_page(page);
+ folio_put(folio);
}
if (pte)
pte_unmap(pte);
@@ -845,12 +852,13 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
swap_read_unplug(splug);
lru_add_drain();
skip:
- /* The page was likely read above, so no need for plugging here */
- page = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
- &page_allocated);
+ /* The folio was likely read above, so no need for plugging here */
+ folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
+ &page_allocated, false);
if (unlikely(page_allocated))
- swap_readpage(page, false, NULL);
- return page;
+ swap_read_folio(folio, false, NULL);
+ zswap_folio_swapin(folio);
+ return folio;
}
/**
@@ -870,14 +878,17 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
{
struct mempolicy *mpol;
pgoff_t ilx;
- struct page *page;
+ struct folio *folio;
mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
- page = swap_use_vma_readahead() ?
+ folio = swap_use_vma_readahead() ?
swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
mpol_cond_put(mpol);
- return page;
+
+ if (!folio)
+ return NULL;
+ return folio_file_page(folio, swp_offset(entry));
}
#ifdef CONFIG_SYSFS
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 4bc70f459164..3eec686484ef 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -227,14 +227,14 @@ offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
BUG();
}
-sector_t swap_page_sector(struct page *page)
+sector_t swap_folio_sector(struct folio *folio)
{
- struct swap_info_struct *sis = page_swap_info(page);
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
struct swap_extent *se;
sector_t sector;
pgoff_t offset;
- offset = __page_file_index(page);
+ offset = swp_offset(folio->swap);
se = offset_to_swap_extent(sis, offset);
sector = se->start_block + (offset - se->start_page);
return sector << (PAGE_SHIFT - 9);
@@ -1495,9 +1495,9 @@ int swp_swapcount(swp_entry_t entry)
do {
page = list_next_entry(page, lru);
- map = kmap_atomic(page);
+ map = kmap_local_page(page);
tmp_count = map[offset];
- kunmap_atomic(map);
+ kunmap_local(map);
count += (tmp_count & ~COUNT_CONTINUED) * n;
n *= (SWAP_CONT_MAX + 1);
@@ -1741,18 +1741,24 @@ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, swp_entry_t entry, struct folio *folio)
{
- struct page *page = folio_file_page(folio, swp_offset(entry));
- struct page *swapcache;
+ struct page *page;
+ struct folio *swapcache;
spinlock_t *ptl;
pte_t *pte, new_pte, old_pte;
- bool hwpoisoned = PageHWPoison(page);
+ bool hwpoisoned = false;
int ret = 1;
- swapcache = page;
- page = ksm_might_need_to_copy(page, vma, addr);
- if (unlikely(!page))
+ swapcache = folio;
+ folio = ksm_might_need_to_copy(folio, vma, addr);
+ if (unlikely(!folio))
return -ENOMEM;
- else if (unlikely(PTR_ERR(page) == -EHWPOISON))
+ else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
+ hwpoisoned = true;
+ folio = swapcache;
+ }
+
+ page = folio_file_page(folio, swp_offset(entry));
+ if (PageHWPoison(page))
hwpoisoned = true;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
@@ -1764,13 +1770,12 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
old_pte = ptep_get(pte);
- if (unlikely(hwpoisoned || !PageUptodate(page))) {
+ if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) {
swp_entry_t swp_entry;
dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
if (hwpoisoned) {
- swp_entry = make_hwpoison_entry(swapcache);
- page = swapcache;
+ swp_entry = make_hwpoison_entry(page);
} else {
swp_entry = make_poisoned_swp_entry();
}
@@ -1784,31 +1789,27 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
* when reading from swap. This metadata may be indexed by swap entry
* so this must be called before swap_free().
*/
- arch_swap_restore(entry, page_folio(page));
-
- /* See do_swap_page() */
- BUG_ON(!PageAnon(page) && PageMappedToDisk(page));
- BUG_ON(PageAnon(page) && PageAnonExclusive(page));
+ arch_swap_restore(entry, folio);
dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
- get_page(page);
- if (page == swapcache) {
+ folio_get(folio);
+ if (folio == swapcache) {
rmap_t rmap_flags = RMAP_NONE;
/*
- * See do_swap_page(): PageWriteback() would be problematic.
- * However, we do a wait_on_page_writeback() just before this
- * call and have the page locked.
+ * See do_swap_page(): writeback would be problematic.
+ * However, we do a folio_wait_writeback() just before this
+ * call and have the folio locked.
*/
- VM_BUG_ON_PAGE(PageWriteback(page), page);
+ VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
if (pte_swp_exclusive(old_pte))
rmap_flags |= RMAP_EXCLUSIVE;
- page_add_anon_rmap(page, vma, addr, rmap_flags);
+ folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags);
} else { /* ksm created a completely new copy */
- page_add_new_anon_rmap(page, vma, addr);
- lru_cache_add_inactive_or_unevictable(page, vma);
+ folio_add_new_anon_rmap(folio, vma, addr);
+ folio_add_lru_vma(folio, vma);
}
new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
if (pte_swp_soft_dirty(old_pte))
@@ -1821,9 +1822,9 @@ setpte:
out:
if (pte)
pte_unmap_unlock(pte, ptl);
- if (page != swapcache) {
- unlock_page(page);
- put_page(page);
+ if (folio != swapcache) {
+ folio_unlock(folio);
+ folio_put(folio);
}
return ret;
}
@@ -2224,7 +2225,7 @@ EXPORT_SYMBOL_GPL(add_swap_extent);
/*
* A `swap extent' is a simple thing which maps a contiguous range of pages
* onto a contiguous range of disk blocks. A rbtree of swap extents is
- * built at swapon time and is then used at swap_writepage/swap_readpage
+ * built at swapon time and is then used at swap_writepage/swap_read_folio
* time for locating where on disk a page belongs.
*
* If the swapfile is an S_ISBLK block device, a single extent is installed.
@@ -3368,18 +3369,12 @@ struct swap_info_struct *swp_swap_info(swp_entry_t entry)
return swap_type_to_swap_info(swp_type(entry));
}
-struct swap_info_struct *page_swap_info(struct page *page)
-{
- swp_entry_t entry = page_swap_entry(page);
- return swp_swap_info(entry);
-}
-
/*
* out-of-line methods to avoid include hell.
*/
struct address_space *swapcache_mapping(struct folio *folio)
{
- return page_swap_info(&folio->page)->swap_file->f_mapping;
+ return swp_swap_info(folio->swap)->swap_file->f_mapping;
}
EXPORT_SYMBOL_GPL(swapcache_mapping);
@@ -3477,9 +3472,9 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
if (!(count & COUNT_CONTINUED))
goto out_unlock_cont;
- map = kmap_atomic(list_page) + offset;
+ map = kmap_local_page(list_page) + offset;
count = *map;
- kunmap_atomic(map);
+ kunmap_local(map);
/*
* If this continuation count now has some space in it,
@@ -3529,7 +3524,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
spin_lock(&si->cont_lock);
offset &= ~PAGE_MASK;
page = list_next_entry(head, lru);
- map = kmap_atomic(page) + offset;
+ map = kmap_local_page(page) + offset;
if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
goto init_map; /* jump over SWAP_CONT_MAX checks */
@@ -3539,27 +3534,27 @@ static bool swap_count_continued(struct swap_info_struct *si,
* Think of how you add 1 to 999
*/
while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
- kunmap_atomic(map);
+ kunmap_local(map);
page = list_next_entry(page, lru);
BUG_ON(page == head);
- map = kmap_atomic(page) + offset;
+ map = kmap_local_page(page) + offset;
}
if (*map == SWAP_CONT_MAX) {
- kunmap_atomic(map);
+ kunmap_local(map);
page = list_next_entry(page, lru);
if (page == head) {
ret = false; /* add count continuation */
goto out;
}
- map = kmap_atomic(page) + offset;
+ map = kmap_local_page(page) + offset;
init_map: *map = 0; /* we didn't zero the page */
}
*map += 1;
- kunmap_atomic(map);
+ kunmap_local(map);
while ((page = list_prev_entry(page, lru)) != head) {
- map = kmap_atomic(page) + offset;
+ map = kmap_local_page(page) + offset;
*map = COUNT_CONTINUED;
- kunmap_atomic(map);
+ kunmap_local(map);
}
ret = true; /* incremented */
@@ -3569,21 +3564,21 @@ init_map: *map = 0; /* we didn't zero the page */
*/
BUG_ON(count != COUNT_CONTINUED);
while (*map == COUNT_CONTINUED) {
- kunmap_atomic(map);
+ kunmap_local(map);
page = list_next_entry(page, lru);
BUG_ON(page == head);
- map = kmap_atomic(page) + offset;
+ map = kmap_local_page(page) + offset;
}
BUG_ON(*map == 0);
*map -= 1;
if (*map == 0)
count = 0;
- kunmap_atomic(map);
+ kunmap_local(map);
while ((page = list_prev_entry(page, lru)) != head) {
- map = kmap_atomic(page) + offset;
+ map = kmap_local_page(page) + offset;
*map = SWAP_CONT_MAX | count;
count = COUNT_CONTINUED;
- kunmap_atomic(map);
+ kunmap_local(map);
}
ret = count == COUNT_CONTINUED;
}
diff --git a/mm/truncate.c b/mm/truncate.c
index 8e3aa9e8618e..725b150e47ac 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -250,10 +250,9 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
/*
* Used to get rid of pages on hardware memory corruption.
*/
-int generic_error_remove_page(struct address_space *mapping, struct page *page)
+int generic_error_remove_folio(struct address_space *mapping,
+ struct folio *folio)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
-
if (!mapping)
return -EINVAL;
/*
@@ -262,13 +261,26 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page)
*/
if (!S_ISREG(mapping->host->i_mode))
return -EIO;
- return truncate_inode_folio(mapping, page_folio(page));
+ return truncate_inode_folio(mapping, folio);
}
-EXPORT_SYMBOL(generic_error_remove_page);
+EXPORT_SYMBOL(generic_error_remove_folio);
-static long mapping_evict_folio(struct address_space *mapping,
- struct folio *folio)
+/**
+ * mapping_evict_folio() - Remove an unused folio from the page-cache.
+ * @mapping: The mapping this folio belongs to.
+ * @folio: The folio to remove.
+ *
+ * Safely remove one folio from the page cache.
+ * It only drops clean, unused folios.
+ *
+ * Context: Folio must be locked.
+ * Return: The number of pages successfully removed.
+ */
+long mapping_evict_folio(struct address_space *mapping, struct folio *folio)
{
+ /* The page may have been truncated before it was locked */
+ if (!mapping)
+ return 0;
if (folio_test_dirty(folio) || folio_test_writeback(folio))
return 0;
/* The refcount will be elevated if any page in the folio is mapped */
@@ -282,27 +294,6 @@ static long mapping_evict_folio(struct address_space *mapping,
}
/**
- * invalidate_inode_page() - Remove an unused page from the pagecache.
- * @page: The page to remove.
- *
- * Safely invalidate one page from its pagecache mapping.
- * It only drops clean, unused pages.
- *
- * Context: Page must be locked.
- * Return: The number of pages successfully removed.
- */
-long invalidate_inode_page(struct page *page)
-{
- struct folio *folio = page_folio(page);
- struct address_space *mapping = folio_mapping(folio);
-
- /* The page may have been truncated before it was locked */
- if (!mapping)
- return 0;
- return mapping_evict_folio(mapping, folio);
-}
-
-/**
* truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
* @mapping: mapping to truncate
* @lstart: offset from which to truncate
@@ -560,9 +551,9 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
EXPORT_SYMBOL(invalidate_mapping_pages);
/*
- * This is like invalidate_inode_page(), except it ignores the page's
+ * This is like mapping_evict_folio(), except it ignores the folio's
* refcount. We do this because invalidate_inode_pages2() needs stronger
- * invalidation guarantees, and cannot afford to leave pages behind because
+ * invalidation guarantees, and cannot afford to leave folios behind because
* shrink_page_list() has a temp ref on them, or because they're transiently
* sitting in the folio_add_lru() caches.
*/
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 0b6ca553bebe..216ab4c8621f 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -114,9 +114,9 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd,
/* Usually, cache pages are already added to LRU */
if (newly_allocated)
folio_add_lru(folio);
- page_add_file_rmap(page, dst_vma, false);
+ folio_add_file_rmap_pte(folio, page, dst_vma);
} else {
- page_add_new_anon_rmap(page, dst_vma, dst_addr);
+ folio_add_new_anon_rmap(folio, dst_vma, dst_addr);
folio_add_lru_vma(folio, dst_vma);
}
@@ -842,3 +842,626 @@ out_unlock:
mmap_read_unlock(dst_mm);
return err;
}
+
+
+void double_pt_lock(spinlock_t *ptl1,
+ spinlock_t *ptl2)
+ __acquires(ptl1)
+ __acquires(ptl2)
+{
+ spinlock_t *ptl_tmp;
+
+ if (ptl1 > ptl2) {
+ /* exchange ptl1 and ptl2 */
+ ptl_tmp = ptl1;
+ ptl1 = ptl2;
+ ptl2 = ptl_tmp;
+ }
+ /* lock in virtual address order to avoid lock inversion */
+ spin_lock(ptl1);
+ if (ptl1 != ptl2)
+ spin_lock_nested(ptl2, SINGLE_DEPTH_NESTING);
+ else
+ __acquire(ptl2);
+}
+
+void double_pt_unlock(spinlock_t *ptl1,
+ spinlock_t *ptl2)
+ __releases(ptl1)
+ __releases(ptl2)
+{
+ spin_unlock(ptl1);
+ if (ptl1 != ptl2)
+ spin_unlock(ptl2);
+ else
+ __release(ptl2);
+}
+
+
+static int move_present_pte(struct mm_struct *mm,
+ struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma,
+ unsigned long dst_addr, unsigned long src_addr,
+ pte_t *dst_pte, pte_t *src_pte,
+ pte_t orig_dst_pte, pte_t orig_src_pte,
+ spinlock_t *dst_ptl, spinlock_t *src_ptl,
+ struct folio *src_folio)
+{
+ int err = 0;
+
+ double_pt_lock(dst_ptl, src_ptl);
+
+ if (!pte_same(*src_pte, orig_src_pte) ||
+ !pte_same(*dst_pte, orig_dst_pte)) {
+ err = -EAGAIN;
+ goto out;
+ }
+ if (folio_test_large(src_folio) ||
+ folio_maybe_dma_pinned(src_folio) ||
+ !PageAnonExclusive(&src_folio->page)) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ folio_move_anon_rmap(src_folio, dst_vma);
+ WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
+
+ orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte);
+ /* Folio got pinned from under us. Put it back and fail the move. */
+ if (folio_maybe_dma_pinned(src_folio)) {
+ set_pte_at(mm, src_addr, src_pte, orig_src_pte);
+ err = -EBUSY;
+ goto out;
+ }
+
+ orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot);
+ /* Follow mremap() behavior and treat the entry dirty after the move */
+ orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma);
+
+ set_pte_at(mm, dst_addr, dst_pte, orig_dst_pte);
+out:
+ double_pt_unlock(dst_ptl, src_ptl);
+ return err;
+}
+
+static int move_swap_pte(struct mm_struct *mm,
+ unsigned long dst_addr, unsigned long src_addr,
+ pte_t *dst_pte, pte_t *src_pte,
+ pte_t orig_dst_pte, pte_t orig_src_pte,
+ spinlock_t *dst_ptl, spinlock_t *src_ptl)
+{
+ if (!pte_swp_exclusive(orig_src_pte))
+ return -EBUSY;
+
+ double_pt_lock(dst_ptl, src_ptl);
+
+ if (!pte_same(*src_pte, orig_src_pte) ||
+ !pte_same(*dst_pte, orig_dst_pte)) {
+ double_pt_unlock(dst_ptl, src_ptl);
+ return -EAGAIN;
+ }
+
+ orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte);
+ set_pte_at(mm, dst_addr, dst_pte, orig_src_pte);
+ double_pt_unlock(dst_ptl, src_ptl);
+
+ return 0;
+}
+
+/*
+ * The mmap_lock for reading is held by the caller. Just move the page
+ * from src_pmd to dst_pmd if possible, and return true if succeeded
+ * in moving the page.
+ */
+static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
+ struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma,
+ unsigned long dst_addr, unsigned long src_addr,
+ __u64 mode)
+{
+ swp_entry_t entry;
+ pte_t orig_src_pte, orig_dst_pte;
+ pte_t src_folio_pte;
+ spinlock_t *src_ptl, *dst_ptl;
+ pte_t *src_pte = NULL;
+ pte_t *dst_pte = NULL;
+
+ struct folio *src_folio = NULL;
+ struct anon_vma *src_anon_vma = NULL;
+ struct mmu_notifier_range range;
+ int err = 0;
+
+ flush_cache_range(src_vma, src_addr, src_addr + PAGE_SIZE);
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
+ src_addr, src_addr + PAGE_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
+retry:
+ dst_pte = pte_offset_map_nolock(mm, dst_pmd, dst_addr, &dst_ptl);
+
+ /* Retry if a huge pmd materialized from under us */
+ if (unlikely(!dst_pte)) {
+ err = -EAGAIN;
+ goto out;
+ }
+
+ src_pte = pte_offset_map_nolock(mm, src_pmd, src_addr, &src_ptl);
+
+ /*
+ * We held the mmap_lock for reading so MADV_DONTNEED
+ * can zap transparent huge pages under us, or the
+ * transparent huge page fault can establish new
+ * transparent huge pages under us.
+ */
+ if (unlikely(!src_pte)) {
+ err = -EAGAIN;
+ goto out;
+ }
+
+ /* Sanity checks before the operation */
+ if (WARN_ON_ONCE(pmd_none(*dst_pmd)) || WARN_ON_ONCE(pmd_none(*src_pmd)) ||
+ WARN_ON_ONCE(pmd_trans_huge(*dst_pmd)) || WARN_ON_ONCE(pmd_trans_huge(*src_pmd))) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ spin_lock(dst_ptl);
+ orig_dst_pte = *dst_pte;
+ spin_unlock(dst_ptl);
+ if (!pte_none(orig_dst_pte)) {
+ err = -EEXIST;
+ goto out;
+ }
+
+ spin_lock(src_ptl);
+ orig_src_pte = *src_pte;
+ spin_unlock(src_ptl);
+ if (pte_none(orig_src_pte)) {
+ if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES))
+ err = -ENOENT;
+ else /* nothing to do to move a hole */
+ err = 0;
+ goto out;
+ }
+
+ /* If PTE changed after we locked the folio them start over */
+ if (src_folio && unlikely(!pte_same(src_folio_pte, orig_src_pte))) {
+ err = -EAGAIN;
+ goto out;
+ }
+
+ if (pte_present(orig_src_pte)) {
+ /*
+ * Pin and lock both source folio and anon_vma. Since we are in
+ * RCU read section, we can't block, so on contention have to
+ * unmap the ptes, obtain the lock and retry.
+ */
+ if (!src_folio) {
+ struct folio *folio;
+
+ /*
+ * Pin the page while holding the lock to be sure the
+ * page isn't freed under us
+ */
+ spin_lock(src_ptl);
+ if (!pte_same(orig_src_pte, *src_pte)) {
+ spin_unlock(src_ptl);
+ err = -EAGAIN;
+ goto out;
+ }
+
+ folio = vm_normal_folio(src_vma, src_addr, orig_src_pte);
+ if (!folio || !PageAnonExclusive(&folio->page)) {
+ spin_unlock(src_ptl);
+ err = -EBUSY;
+ goto out;
+ }
+
+ folio_get(folio);
+ src_folio = folio;
+ src_folio_pte = orig_src_pte;
+ spin_unlock(src_ptl);
+
+ if (!folio_trylock(src_folio)) {
+ pte_unmap(&orig_src_pte);
+ pte_unmap(&orig_dst_pte);
+ src_pte = dst_pte = NULL;
+ /* now we can block and wait */
+ folio_lock(src_folio);
+ goto retry;
+ }
+
+ if (WARN_ON_ONCE(!folio_test_anon(src_folio))) {
+ err = -EBUSY;
+ goto out;
+ }
+ }
+
+ /* at this point we have src_folio locked */
+ if (folio_test_large(src_folio)) {
+ /* split_folio() can block */
+ pte_unmap(&orig_src_pte);
+ pte_unmap(&orig_dst_pte);
+ src_pte = dst_pte = NULL;
+ err = split_folio(src_folio);
+ if (err)
+ goto out;
+ /* have to reacquire the folio after it got split */
+ folio_unlock(src_folio);
+ folio_put(src_folio);
+ src_folio = NULL;
+ goto retry;
+ }
+
+ if (!src_anon_vma) {
+ /*
+ * folio_referenced walks the anon_vma chain
+ * without the folio lock. Serialize against it with
+ * the anon_vma lock, the folio lock is not enough.
+ */
+ src_anon_vma = folio_get_anon_vma(src_folio);
+ if (!src_anon_vma) {
+ /* page was unmapped from under us */
+ err = -EAGAIN;
+ goto out;
+ }
+ if (!anon_vma_trylock_write(src_anon_vma)) {
+ pte_unmap(&orig_src_pte);
+ pte_unmap(&orig_dst_pte);
+ src_pte = dst_pte = NULL;
+ /* now we can block and wait */
+ anon_vma_lock_write(src_anon_vma);
+ goto retry;
+ }
+ }
+
+ err = move_present_pte(mm, dst_vma, src_vma,
+ dst_addr, src_addr, dst_pte, src_pte,
+ orig_dst_pte, orig_src_pte,
+ dst_ptl, src_ptl, src_folio);
+ } else {
+ entry = pte_to_swp_entry(orig_src_pte);
+ if (non_swap_entry(entry)) {
+ if (is_migration_entry(entry)) {
+ pte_unmap(&orig_src_pte);
+ pte_unmap(&orig_dst_pte);
+ src_pte = dst_pte = NULL;
+ migration_entry_wait(mm, src_pmd, src_addr);
+ err = -EAGAIN;
+ } else
+ err = -EFAULT;
+ goto out;
+ }
+
+ err = move_swap_pte(mm, dst_addr, src_addr,
+ dst_pte, src_pte,
+ orig_dst_pte, orig_src_pte,
+ dst_ptl, src_ptl);
+ }
+
+out:
+ if (src_anon_vma) {
+ anon_vma_unlock_write(src_anon_vma);
+ put_anon_vma(src_anon_vma);
+ }
+ if (src_folio) {
+ folio_unlock(src_folio);
+ folio_put(src_folio);
+ }
+ if (dst_pte)
+ pte_unmap(dst_pte);
+ if (src_pte)
+ pte_unmap(src_pte);
+ mmu_notifier_invalidate_range_end(&range);
+
+ return err;
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline bool move_splits_huge_pmd(unsigned long dst_addr,
+ unsigned long src_addr,
+ unsigned long src_end)
+{
+ return (src_addr & ~HPAGE_PMD_MASK) || (dst_addr & ~HPAGE_PMD_MASK) ||
+ src_end - src_addr < HPAGE_PMD_SIZE;
+}
+#else
+static inline bool move_splits_huge_pmd(unsigned long dst_addr,
+ unsigned long src_addr,
+ unsigned long src_end)
+{
+ /* This is unreachable anyway, just to avoid warnings when HPAGE_PMD_SIZE==0 */
+ return false;
+}
+#endif
+
+static inline bool vma_move_compatible(struct vm_area_struct *vma)
+{
+ return !(vma->vm_flags & (VM_PFNMAP | VM_IO | VM_HUGETLB |
+ VM_MIXEDMAP | VM_SHADOW_STACK));
+}
+
+static int validate_move_areas(struct userfaultfd_ctx *ctx,
+ struct vm_area_struct *src_vma,
+ struct vm_area_struct *dst_vma)
+{
+ /* Only allow moving if both have the same access and protection */
+ if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) ||
+ pgprot_val(src_vma->vm_page_prot) != pgprot_val(dst_vma->vm_page_prot))
+ return -EINVAL;
+
+ /* Only allow moving if both are mlocked or both aren't */
+ if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED))
+ return -EINVAL;
+
+ /*
+ * For now, we keep it simple and only move between writable VMAs.
+ * Access flags are equal, therefore cheching only the source is enough.
+ */
+ if (!(src_vma->vm_flags & VM_WRITE))
+ return -EINVAL;
+
+ /* Check if vma flags indicate content which can be moved */
+ if (!vma_move_compatible(src_vma) || !vma_move_compatible(dst_vma))
+ return -EINVAL;
+
+ /* Ensure dst_vma is registered in uffd we are operating on */
+ if (!dst_vma->vm_userfaultfd_ctx.ctx ||
+ dst_vma->vm_userfaultfd_ctx.ctx != ctx)
+ return -EINVAL;
+
+ /* Only allow moving across anonymous vmas */
+ if (!vma_is_anonymous(src_vma) || !vma_is_anonymous(dst_vma))
+ return -EINVAL;
+
+ /*
+ * Ensure the dst_vma has a anon_vma or this page
+ * would get a NULL anon_vma when moved in the
+ * dst_vma.
+ */
+ if (unlikely(anon_vma_prepare(dst_vma)))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * move_pages - move arbitrary anonymous pages of an existing vma
+ * @ctx: pointer to the userfaultfd context
+ * @mm: the address space to move pages
+ * @dst_start: start of the destination virtual memory range
+ * @src_start: start of the source virtual memory range
+ * @len: length of the virtual memory range
+ * @mode: flags from uffdio_move.mode
+ *
+ * Must be called with mmap_lock held for read.
+ *
+ * move_pages() remaps arbitrary anonymous pages atomically in zero
+ * copy. It only works on non shared anonymous pages because those can
+ * be relocated without generating non linear anon_vmas in the rmap
+ * code.
+ *
+ * It provides a zero copy mechanism to handle userspace page faults.
+ * The source vma pages should have mapcount == 1, which can be
+ * enforced by using madvise(MADV_DONTFORK) on src vma.
+ *
+ * The thread receiving the page during the userland page fault
+ * will receive the faulting page in the source vma through the network,
+ * storage or any other I/O device (MADV_DONTFORK in the source vma
+ * avoids move_pages() to fail with -EBUSY if the process forks before
+ * move_pages() is called), then it will call move_pages() to map the
+ * page in the faulting address in the destination vma.
+ *
+ * This userfaultfd command works purely via pagetables, so it's the
+ * most efficient way to move physical non shared anonymous pages
+ * across different virtual addresses. Unlike mremap()/mmap()/munmap()
+ * it does not create any new vmas. The mapping in the destination
+ * address is atomic.
+ *
+ * It only works if the vma protection bits are identical from the
+ * source and destination vma.
+ *
+ * It can remap non shared anonymous pages within the same vma too.
+ *
+ * If the source virtual memory range has any unmapped holes, or if
+ * the destination virtual memory range is not a whole unmapped hole,
+ * move_pages() will fail respectively with -ENOENT or -EEXIST. This
+ * provides a very strict behavior to avoid any chance of memory
+ * corruption going unnoticed if there are userland race conditions.
+ * Only one thread should resolve the userland page fault at any given
+ * time for any given faulting address. This means that if two threads
+ * try to both call move_pages() on the same destination address at the
+ * same time, the second thread will get an explicit error from this
+ * command.
+ *
+ * The command retval will return "len" is successful. The command
+ * however can be interrupted by fatal signals or errors. If
+ * interrupted it will return the number of bytes successfully
+ * remapped before the interruption if any, or the negative error if
+ * none. It will never return zero. Either it will return an error or
+ * an amount of bytes successfully moved. If the retval reports a
+ * "short" remap, the move_pages() command should be repeated by
+ * userland with src+retval, dst+reval, len-retval if it wants to know
+ * about the error that interrupted it.
+ *
+ * The UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES flag can be specified to
+ * prevent -ENOENT errors to materialize if there are holes in the
+ * source virtual range that is being remapped. The holes will be
+ * accounted as successfully remapped in the retval of the
+ * command. This is mostly useful to remap hugepage naturally aligned
+ * virtual regions without knowing if there are transparent hugepage
+ * in the regions or not, but preventing the risk of having to split
+ * the hugepmd during the remap.
+ *
+ * If there's any rmap walk that is taking the anon_vma locks without
+ * first obtaining the folio lock (the only current instance is
+ * folio_referenced), they will have to verify if the folio->mapping
+ * has changed after taking the anon_vma lock. If it changed they
+ * should release the lock and retry obtaining a new anon_vma, because
+ * it means the anon_vma was changed by move_pages() before the lock
+ * could be obtained. This is the only additional complexity added to
+ * the rmap code to provide this anonymous page remapping functionality.
+ */
+ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
+ unsigned long dst_start, unsigned long src_start,
+ unsigned long len, __u64 mode)
+{
+ struct vm_area_struct *src_vma, *dst_vma;
+ unsigned long src_addr, dst_addr;
+ pmd_t *src_pmd, *dst_pmd;
+ long err = -EINVAL;
+ ssize_t moved = 0;
+
+ /* Sanitize the command parameters. */
+ if (WARN_ON_ONCE(src_start & ~PAGE_MASK) ||
+ WARN_ON_ONCE(dst_start & ~PAGE_MASK) ||
+ WARN_ON_ONCE(len & ~PAGE_MASK))
+ goto out;
+
+ /* Does the address range wrap, or is the span zero-sized? */
+ if (WARN_ON_ONCE(src_start + len <= src_start) ||
+ WARN_ON_ONCE(dst_start + len <= dst_start))
+ goto out;
+
+ /*
+ * Make sure the vma is not shared, that the src and dst remap
+ * ranges are both valid and fully within a single existing
+ * vma.
+ */
+ src_vma = find_vma(mm, src_start);
+ if (!src_vma || (src_vma->vm_flags & VM_SHARED))
+ goto out;
+ if (src_start < src_vma->vm_start ||
+ src_start + len > src_vma->vm_end)
+ goto out;
+
+ dst_vma = find_vma(mm, dst_start);
+ if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
+ goto out;
+ if (dst_start < dst_vma->vm_start ||
+ dst_start + len > dst_vma->vm_end)
+ goto out;
+
+ err = validate_move_areas(ctx, src_vma, dst_vma);
+ if (err)
+ goto out;
+
+ for (src_addr = src_start, dst_addr = dst_start;
+ src_addr < src_start + len;) {
+ spinlock_t *ptl;
+ pmd_t dst_pmdval;
+ unsigned long step_size;
+
+ /*
+ * Below works because anonymous area would not have a
+ * transparent huge PUD. If file-backed support is added,
+ * that case would need to be handled here.
+ */
+ src_pmd = mm_find_pmd(mm, src_addr);
+ if (unlikely(!src_pmd)) {
+ if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
+ err = -ENOENT;
+ break;
+ }
+ src_pmd = mm_alloc_pmd(mm, src_addr);
+ if (unlikely(!src_pmd)) {
+ err = -ENOMEM;
+ break;
+ }
+ }
+ dst_pmd = mm_alloc_pmd(mm, dst_addr);
+ if (unlikely(!dst_pmd)) {
+ err = -ENOMEM;
+ break;
+ }
+
+ dst_pmdval = pmdp_get_lockless(dst_pmd);
+ /*
+ * If the dst_pmd is mapped as THP don't override it and just
+ * be strict. If dst_pmd changes into TPH after this check, the
+ * move_pages_huge_pmd() will detect the change and retry
+ * while move_pages_pte() will detect the change and fail.
+ */
+ if (unlikely(pmd_trans_huge(dst_pmdval))) {
+ err = -EEXIST;
+ break;
+ }
+
+ ptl = pmd_trans_huge_lock(src_pmd, src_vma);
+ if (ptl) {
+ if (pmd_devmap(*src_pmd)) {
+ spin_unlock(ptl);
+ err = -ENOENT;
+ break;
+ }
+
+ /* Check if we can move the pmd without splitting it. */
+ if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) ||
+ !pmd_none(dst_pmdval)) {
+ struct folio *folio = pfn_folio(pmd_pfn(*src_pmd));
+
+ if (!folio || !PageAnonExclusive(&folio->page)) {
+ spin_unlock(ptl);
+ err = -EBUSY;
+ break;
+ }
+
+ spin_unlock(ptl);
+ split_huge_pmd(src_vma, src_pmd, src_addr);
+ /* The folio will be split by move_pages_pte() */
+ continue;
+ }
+
+ err = move_pages_huge_pmd(mm, dst_pmd, src_pmd,
+ dst_pmdval, dst_vma, src_vma,
+ dst_addr, src_addr);
+ step_size = HPAGE_PMD_SIZE;
+ } else {
+ if (pmd_none(*src_pmd)) {
+ if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
+ err = -ENOENT;
+ break;
+ }
+ if (unlikely(__pte_alloc(mm, src_pmd))) {
+ err = -ENOMEM;
+ break;
+ }
+ }
+
+ if (unlikely(pte_alloc(mm, dst_pmd))) {
+ err = -ENOMEM;
+ break;
+ }
+
+ err = move_pages_pte(mm, dst_pmd, src_pmd,
+ dst_vma, src_vma,
+ dst_addr, src_addr, mode);
+ step_size = PAGE_SIZE;
+ }
+
+ cond_resched();
+
+ if (fatal_signal_pending(current)) {
+ /* Do not override an error */
+ if (!err || err == -EAGAIN)
+ err = -EINTR;
+ break;
+ }
+
+ if (err) {
+ if (err == -EAGAIN)
+ continue;
+ break;
+ }
+
+ /* Proceed to the next page */
+ dst_addr += step_size;
+ src_addr += step_size;
+ moved += step_size;
+ }
+
+out:
+ VM_WARN_ON(moved < 0);
+ VM_WARN_ON(err > 0);
+ VM_WARN_ON(!moved && !err);
+ return moved ? moved : err;
+}
diff --git a/mm/util.c b/mm/util.c
index 744b4d7e3fae..5a6a9802583b 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -1047,11 +1047,11 @@ int __weak memcmp_pages(struct page *page1, struct page *page2)
char *addr1, *addr2;
int ret;
- addr1 = kmap_atomic(page1);
- addr2 = kmap_atomic(page2);
+ addr1 = kmap_local_page(page1);
+ addr2 = kmap_local_page(page2);
ret = memcmp(addr1, addr2, PAGE_SIZE);
- kunmap_atomic(addr2);
- kunmap_atomic(addr1);
+ kunmap_local(addr2);
+ kunmap_local(addr1);
return ret;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bba207f41b14..4f9c854ce6cc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -411,10 +411,10 @@ static int reclaimer_offset(void)
{
BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD !=
PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD);
- BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD !=
- PGSCAN_DIRECT - PGSCAN_KSWAPD);
BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD !=
PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD);
+ BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD !=
+ PGSCAN_DIRECT - PGSCAN_KSWAPD);
BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD !=
PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD);
@@ -977,7 +977,8 @@ static unsigned int demote_folio_list(struct list_head *demote_folios,
(unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION,
&nr_succeeded);
- __count_vm_events(PGDEMOTE_KSWAPD + reclaimer_offset(), nr_succeeded);
+ mod_node_page_state(pgdat, PGDEMOTE_KSWAPD + reclaimer_offset(),
+ nr_succeeded);
return nr_succeeded;
}
@@ -2222,7 +2223,7 @@ static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc)
* Flush the memory cgroup stats, so that we read accurate per-memcg
* lruvec stats for heuristics.
*/
- mem_cgroup_flush_stats();
+ mem_cgroup_flush_stats(sc->target_mem_cgroup);
/*
* Determine the scan balance between anon and file LRUs.
@@ -2667,13 +2668,14 @@ static void get_item_key(void *item, int *key)
key[1] = hash >> BLOOM_FILTER_SHIFT;
}
-static bool test_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
+static bool test_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq,
+ void *item)
{
int key[2];
unsigned long *filter;
int gen = filter_gen_from_seq(seq);
- filter = READ_ONCE(lruvec->mm_state.filters[gen]);
+ filter = READ_ONCE(mm_state->filters[gen]);
if (!filter)
return true;
@@ -2682,13 +2684,14 @@ static bool test_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *it
return test_bit(key[0], filter) && test_bit(key[1], filter);
}
-static void update_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
+static void update_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq,
+ void *item)
{
int key[2];
unsigned long *filter;
int gen = filter_gen_from_seq(seq);
- filter = READ_ONCE(lruvec->mm_state.filters[gen]);
+ filter = READ_ONCE(mm_state->filters[gen]);
if (!filter)
return;
@@ -2700,12 +2703,12 @@ static void update_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *
set_bit(key[1], filter);
}
-static void reset_bloom_filter(struct lruvec *lruvec, unsigned long seq)
+static void reset_bloom_filter(struct lru_gen_mm_state *mm_state, unsigned long seq)
{
unsigned long *filter;
int gen = filter_gen_from_seq(seq);
- filter = lruvec->mm_state.filters[gen];
+ filter = mm_state->filters[gen];
if (filter) {
bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT));
return;
@@ -2713,13 +2716,15 @@ static void reset_bloom_filter(struct lruvec *lruvec, unsigned long seq)
filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT),
__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
- WRITE_ONCE(lruvec->mm_state.filters[gen], filter);
+ WRITE_ONCE(mm_state->filters[gen], filter);
}
/******************************************************************************
* mm_struct list
******************************************************************************/
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
+
static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
{
static struct lru_gen_mm_list mm_list = {
@@ -2736,6 +2741,29 @@ static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
return &mm_list;
}
+static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec)
+{
+ return &lruvec->mm_state;
+}
+
+static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk)
+{
+ int key;
+ struct mm_struct *mm;
+ struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
+ struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec);
+
+ mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list);
+ key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap);
+
+ if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap))
+ return NULL;
+
+ clear_bit(key, &mm->lru_gen.bitmap);
+
+ return mmget_not_zero(mm) ? mm : NULL;
+}
+
void lru_gen_add_mm(struct mm_struct *mm)
{
int nid;
@@ -2751,10 +2779,11 @@ void lru_gen_add_mm(struct mm_struct *mm)
for_each_node_state(nid, N_MEMORY) {
struct lruvec *lruvec = get_lruvec(memcg, nid);
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
/* the first addition since the last iteration */
- if (lruvec->mm_state.tail == &mm_list->fifo)
- lruvec->mm_state.tail = &mm->lru_gen.list;
+ if (mm_state->tail == &mm_list->fifo)
+ mm_state->tail = &mm->lru_gen.list;
}
list_add_tail(&mm->lru_gen.list, &mm_list->fifo);
@@ -2780,14 +2809,15 @@ void lru_gen_del_mm(struct mm_struct *mm)
for_each_node(nid) {
struct lruvec *lruvec = get_lruvec(memcg, nid);
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
/* where the current iteration continues after */
- if (lruvec->mm_state.head == &mm->lru_gen.list)
- lruvec->mm_state.head = lruvec->mm_state.head->prev;
+ if (mm_state->head == &mm->lru_gen.list)
+ mm_state->head = mm_state->head->prev;
/* where the last iteration ended before */
- if (lruvec->mm_state.tail == &mm->lru_gen.list)
- lruvec->mm_state.tail = lruvec->mm_state.tail->next;
+ if (mm_state->tail == &mm->lru_gen.list)
+ mm_state->tail = mm_state->tail->next;
}
list_del_init(&mm->lru_gen.list);
@@ -2830,10 +2860,30 @@ void lru_gen_migrate_mm(struct mm_struct *mm)
}
#endif
+#else /* !CONFIG_LRU_GEN_WALKS_MMU */
+
+static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
+{
+ return NULL;
+}
+
+static struct lru_gen_mm_state *get_mm_state(struct lruvec *lruvec)
+{
+ return NULL;
+}
+
+static struct mm_struct *get_next_mm(struct lru_gen_mm_walk *walk)
+{
+ return NULL;
+}
+
+#endif
+
static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, bool last)
{
int i;
int hist;
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock);
@@ -2841,44 +2891,20 @@ static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
hist = lru_hist_from_seq(walk->max_seq);
for (i = 0; i < NR_MM_STATS; i++) {
- WRITE_ONCE(lruvec->mm_state.stats[hist][i],
- lruvec->mm_state.stats[hist][i] + walk->mm_stats[i]);
+ WRITE_ONCE(mm_state->stats[hist][i],
+ mm_state->stats[hist][i] + walk->mm_stats[i]);
walk->mm_stats[i] = 0;
}
}
if (NR_HIST_GENS > 1 && last) {
- hist = lru_hist_from_seq(lruvec->mm_state.seq + 1);
+ hist = lru_hist_from_seq(mm_state->seq + 1);
for (i = 0; i < NR_MM_STATS; i++)
- WRITE_ONCE(lruvec->mm_state.stats[hist][i], 0);
+ WRITE_ONCE(mm_state->stats[hist][i], 0);
}
}
-static bool should_skip_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
-{
- int type;
- unsigned long size = 0;
- struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
- int key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap);
-
- if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap))
- return true;
-
- clear_bit(key, &mm->lru_gen.bitmap);
-
- for (type = !walk->can_swap; type < ANON_AND_FILE; type++) {
- size += type ? get_mm_counter(mm, MM_FILEPAGES) :
- get_mm_counter(mm, MM_ANONPAGES) +
- get_mm_counter(mm, MM_SHMEMPAGES);
- }
-
- if (size < MIN_LRU_BATCH)
- return true;
-
- return !mmget_not_zero(mm);
-}
-
static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
struct mm_struct **iter)
{
@@ -2887,7 +2913,7 @@ static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
struct mm_struct *mm = NULL;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
- struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
/*
* mm_state->seq is incremented after each iteration of mm_list. There
@@ -2925,11 +2951,7 @@ static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
mm_state->tail = mm_state->head->next;
walk->force_scan = true;
}
-
- mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list);
- if (should_skip_mm(mm, walk))
- mm = NULL;
- } while (!mm);
+ } while (!(mm = get_next_mm(walk)));
done:
if (*iter || last)
reset_mm_stats(lruvec, walk, last);
@@ -2937,7 +2959,7 @@ done:
spin_unlock(&mm_list->lock);
if (mm && first)
- reset_bloom_filter(lruvec, walk->max_seq + 1);
+ reset_bloom_filter(mm_state, walk->max_seq + 1);
if (*iter)
mmput_async(*iter);
@@ -2952,7 +2974,7 @@ static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq)
bool success = false;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
- struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
spin_lock(&mm_list->lock);
@@ -3248,7 +3270,6 @@ static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned
return pfn;
}
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr)
{
unsigned long pfn = pmd_pfn(pmd);
@@ -3266,7 +3287,6 @@ static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned
return pfn;
}
-#endif
static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
struct pglist_data *pgdat, bool can_swap)
@@ -3369,7 +3389,6 @@ restart:
return suitable_to_scan(total, young);
}
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma,
struct mm_walk *args, unsigned long *bitmap, unsigned long *first)
{
@@ -3447,12 +3466,6 @@ next:
done:
*first = -1;
}
-#else
-static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma,
- struct mm_walk *args, unsigned long *bitmap, unsigned long *first)
-{
-}
-#endif
static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
struct mm_walk *args)
@@ -3465,6 +3478,7 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
DECLARE_BITMAP(bitmap, MIN_LRU_BATCH);
unsigned long first = -1;
struct lru_gen_mm_walk *walk = args->private;
+ struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec);
VM_WARN_ON_ONCE(pud_leaf(*pud));
@@ -3487,7 +3501,6 @@ restart:
continue;
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (pmd_trans_huge(val)) {
unsigned long pfn = pmd_pfn(val);
struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
@@ -3506,7 +3519,7 @@ restart:
walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first);
continue;
}
-#endif
+
walk->mm_stats[MM_NONLEAF_TOTAL]++;
if (should_clear_pmd_young()) {
@@ -3516,7 +3529,7 @@ restart:
walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first);
}
- if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i))
+ if (!walk->force_scan && !test_bloom_filter(mm_state, walk->max_seq, pmd + i))
continue;
walk->mm_stats[MM_NONLEAF_FOUND]++;
@@ -3527,7 +3540,7 @@ restart:
walk->mm_stats[MM_NONLEAF_ADDED]++;
/* carry over to the next generation */
- update_bloom_filter(walk->lruvec, walk->max_seq + 1, pmd + i);
+ update_bloom_filter(mm_state, walk->max_seq + 1, pmd + i);
}
walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first);
@@ -3734,16 +3747,25 @@ next:
return success;
}
-static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)
+static bool inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
+ bool can_swap, bool force_scan)
{
+ bool success;
int prev, next;
int type, zone;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
restart:
+ if (max_seq < READ_ONCE(lrugen->max_seq))
+ return false;
+
spin_lock_irq(&lruvec->lru_lock);
VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
+ success = max_seq == lrugen->max_seq;
+ if (!success)
+ goto unlock;
+
for (type = ANON_AND_FILE - 1; type >= 0; type--) {
if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
continue;
@@ -3787,8 +3809,10 @@ restart:
WRITE_ONCE(lrugen->timestamps[next], jiffies);
/* make sure preceding modifications appear */
smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
-
+unlock:
spin_unlock_irq(&lruvec->lru_lock);
+
+ return success;
}
static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
@@ -3798,14 +3822,16 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
struct lru_gen_mm_walk *walk;
struct mm_struct *mm = NULL;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
+ if (!mm_state)
+ return inc_max_seq(lruvec, max_seq, can_swap, force_scan);
+
/* see the comment in iterate_mm_list() */
- if (max_seq <= READ_ONCE(lruvec->mm_state.seq)) {
- success = false;
- goto done;
- }
+ if (max_seq <= READ_ONCE(mm_state->seq))
+ return false;
/*
* If the hardware doesn't automatically set the accessed bit, fallback
@@ -3835,8 +3861,10 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
walk_mm(lruvec, mm, walk);
} while (mm);
done:
- if (success)
- inc_max_seq(lruvec, can_swap, force_scan);
+ if (success) {
+ success = inc_max_seq(lruvec, max_seq, can_swap, force_scan);
+ WARN_ON_ONCE(!success);
+ }
return success;
}
@@ -3961,6 +3989,7 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
struct mem_cgroup *memcg = folio_memcg(folio);
struct pglist_data *pgdat = folio_pgdat(folio);
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
DEFINE_MAX_SEQ(lruvec);
int old_gen, new_gen = lru_gen_from_seq(max_seq);
@@ -4043,8 +4072,8 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
mem_cgroup_unlock_pages();
/* feedback from rmap walkers to page table walkers */
- if (suitable_to_scan(i, young))
- update_bloom_filter(lruvec, max_seq, pvmw->pmd);
+ if (mm_state && suitable_to_scan(i, young))
+ update_bloom_filter(mm_state, max_seq, pvmw->pmd);
}
/******************************************************************************
@@ -4060,13 +4089,6 @@ enum {
MEMCG_LRU_YOUNG,
};
-#ifdef CONFIG_MEMCG
-
-static int lru_gen_memcg_seg(struct lruvec *lruvec)
-{
- return READ_ONCE(lruvec->lrugen.seg);
-}
-
static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
{
int seg;
@@ -4113,6 +4135,8 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags);
}
+#ifdef CONFIG_MEMCG
+
void lru_gen_online_memcg(struct mem_cgroup *memcg)
{
int gen;
@@ -4180,18 +4204,11 @@ void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
struct lruvec *lruvec = get_lruvec(memcg, nid);
/* see the comment on MEMCG_NR_GENS */
- if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_HEAD)
+ if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_HEAD)
lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD);
}
-#else /* !CONFIG_MEMCG */
-
-static int lru_gen_memcg_seg(struct lruvec *lruvec)
-{
- return 0;
-}
-
-#endif
+#endif /* CONFIG_MEMCG */
/******************************************************************************
* the eviction
@@ -4739,7 +4756,7 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
if (mem_cgroup_below_low(NULL, memcg)) {
/* see the comment on MEMCG_NR_GENS */
- if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_TAIL)
+ if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL)
return MEMCG_LRU_TAIL;
memcg_memory_event(memcg, MEMCG_LOW);
@@ -4762,12 +4779,10 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
return 0;
/* one retry if offlined or too small */
- return lru_gen_memcg_seg(lruvec) != MEMCG_LRU_TAIL ?
+ return READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL ?
MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG;
}
-#ifdef CONFIG_MEMCG
-
static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
{
int op;
@@ -4859,20 +4874,6 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc
blk_finish_plug(&plug);
}
-#else /* !CONFIG_MEMCG */
-
-static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
-{
- BUILD_BUG();
-}
-
-static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
-{
- BUILD_BUG();
-}
-
-#endif
-
static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc)
{
int priority;
@@ -5220,6 +5221,7 @@ static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
int type, tier;
int hist = lru_hist_from_seq(seq);
struct lru_gen_folio *lrugen = &lruvec->lrugen;
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
for (tier = 0; tier < MAX_NR_TIERS; tier++) {
seq_printf(m, " %10d", tier);
@@ -5245,6 +5247,9 @@ static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
seq_putc(m, '\n');
}
+ if (!mm_state)
+ return;
+
seq_puts(m, " ");
for (i = 0; i < NR_MM_STATS; i++) {
const char *s = " ";
@@ -5252,10 +5257,10 @@ static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
if (seq == max_seq && NR_HIST_GENS == 1) {
s = "LOYNFA";
- n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
+ n = READ_ONCE(mm_state->stats[hist][i]);
} else if (seq != max_seq && NR_HIST_GENS > 1) {
s = "loynfa";
- n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
+ n = READ_ONCE(mm_state->stats[hist][i]);
}
seq_printf(m, " %10lu%c", n, s[i]);
@@ -5519,11 +5524,24 @@ static const struct file_operations lru_gen_ro_fops = {
* initialization
******************************************************************************/
+void lru_gen_init_pgdat(struct pglist_data *pgdat)
+{
+ int i, j;
+
+ spin_lock_init(&pgdat->memcg_lru.lock);
+
+ for (i = 0; i < MEMCG_NR_GENS; i++) {
+ for (j = 0; j < MEMCG_NR_BINS; j++)
+ INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i);
+ }
+}
+
void lru_gen_init_lruvec(struct lruvec *lruvec)
{
int i;
int gen, type, zone;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
lrugen->max_seq = MIN_NR_GENS + 1;
lrugen->enabled = lru_gen_enabled();
@@ -5534,47 +5552,46 @@ void lru_gen_init_lruvec(struct lruvec *lruvec)
for_each_gen_type_zone(gen, type, zone)
INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]);
- lruvec->mm_state.seq = MIN_NR_GENS;
+ if (mm_state)
+ mm_state->seq = MIN_NR_GENS;
}
#ifdef CONFIG_MEMCG
-void lru_gen_init_pgdat(struct pglist_data *pgdat)
+void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
- int i, j;
+ struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
- spin_lock_init(&pgdat->memcg_lru.lock);
+ if (!mm_list)
+ return;
- for (i = 0; i < MEMCG_NR_GENS; i++) {
- for (j = 0; j < MEMCG_NR_BINS; j++)
- INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i);
- }
-}
-
-void lru_gen_init_memcg(struct mem_cgroup *memcg)
-{
- INIT_LIST_HEAD(&memcg->mm_list.fifo);
- spin_lock_init(&memcg->mm_list.lock);
+ INIT_LIST_HEAD(&mm_list->fifo);
+ spin_lock_init(&mm_list->lock);
}
void lru_gen_exit_memcg(struct mem_cgroup *memcg)
{
int i;
int nid;
+ struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
- VM_WARN_ON_ONCE(!list_empty(&memcg->mm_list.fifo));
+ VM_WARN_ON_ONCE(mm_list && !list_empty(&mm_list->fifo));
for_each_node(nid) {
struct lruvec *lruvec = get_lruvec(memcg, nid);
+ struct lru_gen_mm_state *mm_state = get_mm_state(lruvec);
VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0,
sizeof(lruvec->lrugen.nr_pages)));
lruvec->lrugen.list.next = LIST_POISON1;
+ if (!mm_state)
+ continue;
+
for (i = 0; i < NR_BLOOM_FILTERS; i++) {
- bitmap_free(lruvec->mm_state.filters[i]);
- lruvec->mm_state.filters[i] = NULL;
+ bitmap_free(mm_state->filters[i]);
+ mm_state->filters[i] = NULL;
}
}
}
@@ -5600,14 +5617,17 @@ late_initcall(init_lru_gen);
static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
{
+ BUILD_BUG();
}
static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
+ BUILD_BUG();
}
static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc)
{
+ BUILD_BUG();
}
#endif /* CONFIG_LRU_GEN */
@@ -6400,7 +6420,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
* scan_control uses s8 fields for order, priority, and reclaim_idx.
* Confirm they are large enough for max values.
*/
- BUILD_BUG_ON(MAX_ORDER >= S8_MAX);
+ BUILD_BUG_ON(MAX_PAGE_ORDER >= S8_MAX);
BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 359460deb377..db79935e4a54 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1059,7 +1059,7 @@ static void fill_contig_page_info(struct zone *zone,
info->free_blocks_total = 0;
info->free_blocks_suitable = 0;
- for (order = 0; order <= MAX_ORDER; order++) {
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
unsigned long blocks;
/*
@@ -1092,7 +1092,7 @@ static int __fragmentation_index(unsigned int order, struct contig_page_info *in
{
unsigned long requested = 1UL << order;
- if (WARN_ON_ONCE(order > MAX_ORDER))
+ if (WARN_ON_ONCE(order > MAX_PAGE_ORDER))
return 0;
if (!info->free_blocks_total)
@@ -1249,6 +1249,9 @@ const char * const vmstat_text[] = {
"pgpromote_success",
"pgpromote_candidate",
#endif
+ "pgdemote_kswapd",
+ "pgdemote_direct",
+ "pgdemote_khugepaged",
/* enum writeback_stat_item counters */
"nr_dirty_threshold",
@@ -1279,9 +1282,6 @@ const char * const vmstat_text[] = {
"pgsteal_kswapd",
"pgsteal_direct",
"pgsteal_khugepaged",
- "pgdemote_kswapd",
- "pgdemote_direct",
- "pgdemote_khugepaged",
"pgscan_kswapd",
"pgscan_direct",
"pgscan_khugepaged",
@@ -1401,6 +1401,7 @@ const char * const vmstat_text[] = {
#ifdef CONFIG_ZSWAP
"zswpin",
"zswpout",
+ "zswpwb",
#endif
#ifdef CONFIG_X86
"direct_map_level2_splits",
@@ -1475,7 +1476,7 @@ static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
int order;
seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
- for (order = 0; order <= MAX_ORDER; ++order)
+ for (order = 0; order < NR_PAGE_ORDERS; ++order)
/*
* Access to nr_free is lockless as nr_free is used only for
* printing purposes. Use data_race to avoid KCSAN warning.
@@ -1504,7 +1505,7 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
pgdat->node_id,
zone->name,
migratetype_names[mtype]);
- for (order = 0; order <= MAX_ORDER; ++order) {
+ for (order = 0; order < NR_PAGE_ORDERS; ++order) {
unsigned long freecount = 0;
struct free_area *area;
struct list_head *curr;
@@ -1544,7 +1545,7 @@ static void pagetypeinfo_showfree(struct seq_file *m, void *arg)
/* Print header */
seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
- for (order = 0; order <= MAX_ORDER; ++order)
+ for (order = 0; order < NR_PAGE_ORDERS; ++order)
seq_printf(m, "%6d ", order);
seq_putc(m, '\n');
@@ -2180,7 +2181,7 @@ static void unusable_show_print(struct seq_file *m,
seq_printf(m, "Node %d, zone %8s ",
pgdat->node_id,
zone->name);
- for (order = 0; order <= MAX_ORDER; ++order) {
+ for (order = 0; order < NR_PAGE_ORDERS; ++order) {
fill_contig_page_info(zone, order, &info);
index = unusable_free_index(order, &info);
seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
@@ -2232,7 +2233,7 @@ static void extfrag_show_print(struct seq_file *m,
seq_printf(m, "Node %d, zone %8s ",
pgdat->node_id,
zone->name);
- for (order = 0; order <= MAX_ORDER; ++order) {
+ for (order = 0; order < NR_PAGE_ORDERS; ++order) {
fill_contig_page_info(zone, order, &info);
index = __fragmentation_index(order, &info);
seq_printf(m, "%2d.%03d ", index / 1000, index % 1000);
diff --git a/mm/workingset.c b/mm/workingset.c
index 33baad203277..226012974328 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -425,8 +425,16 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset)
struct pglist_data *pgdat;
unsigned long eviction;
- if (lru_gen_enabled())
- return lru_gen_test_recent(shadow, file, &eviction_lruvec, &eviction, workingset);
+ rcu_read_lock();
+
+ if (lru_gen_enabled()) {
+ bool recent = lru_gen_test_recent(shadow, file,
+ &eviction_lruvec, &eviction, workingset);
+
+ rcu_read_unlock();
+ return recent;
+ }
+
unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset);
eviction <<= bucket_order;
@@ -448,8 +456,20 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset)
* configurations instead.
*/
eviction_memcg = mem_cgroup_from_id(memcgid);
- if (!mem_cgroup_disabled() && !eviction_memcg)
+ if (!mem_cgroup_disabled() &&
+ (!eviction_memcg || !mem_cgroup_tryget(eviction_memcg))) {
+ rcu_read_unlock();
return false;
+ }
+
+ rcu_read_unlock();
+
+ /*
+ * Flush stats (and potentially sleep) outside the RCU read section.
+ * XXX: With per-memcg flushing and thresholding, is ratelimiting
+ * still needed here?
+ */
+ mem_cgroup_flush_stats_ratelimited(eviction_memcg);
eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
refault = atomic_long_read(&eviction_lruvec->nonresident_age);
@@ -493,6 +513,7 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset)
}
}
+ mem_cgroup_put(eviction_memcg);
return refault_distance <= workingset_size;
}
@@ -519,19 +540,16 @@ void workingset_refault(struct folio *folio, void *shadow)
return;
}
- /* Flush stats (and potentially sleep) before holding RCU read lock */
- mem_cgroup_flush_stats_ratelimited();
-
- rcu_read_lock();
-
/*
* The activation decision for this folio is made at the level
* where the eviction occurred, as that is where the LRU order
* during folio reclaim is being determined.
*
* However, the cgroup that will own the folio is the one that
- * is actually experiencing the refault event.
+ * is actually experiencing the refault event. Make sure the folio is
+ * locked to guarantee folio_memcg() stability throughout.
*/
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
nr = folio_nr_pages(folio);
memcg = folio_memcg(folio);
pgdat = folio_pgdat(folio);
@@ -540,7 +558,7 @@ void workingset_refault(struct folio *folio, void *shadow)
mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
if (!workingset_test_recent(shadow, file, &workingset))
- goto out;
+ return;
folio_set_active(folio);
workingset_age_nonresident(lruvec, nr);
@@ -556,8 +574,6 @@ void workingset_refault(struct folio *folio, void *shadow)
lru_note_cost_refault(folio);
mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr);
}
-out:
- rcu_read_unlock();
}
/**
@@ -615,12 +631,12 @@ void workingset_update_node(struct xa_node *node)
if (node->count && node->count == node->nr_values) {
if (list_empty(&node->private_list)) {
- list_lru_add(&shadow_nodes, &node->private_list);
+ list_lru_add_obj(&shadow_nodes, &node->private_list);
__inc_lruvec_kmem_state(node, WORKINGSET_NODES);
}
} else {
if (!list_empty(&node->private_list)) {
- list_lru_del(&shadow_nodes, &node->private_list);
+ list_lru_del_obj(&shadow_nodes, &node->private_list);
__dec_lruvec_kmem_state(node, WORKINGSET_NODES);
}
}
@@ -664,7 +680,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
struct lruvec *lruvec;
int i;
- mem_cgroup_flush_stats();
+ mem_cgroup_flush_stats_ratelimited(sc->memcg);
lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
pages += lruvec_page_state_local(lruvec,
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b1c0dad7f4cf..c937635e0ad1 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1364,9 +1364,12 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
int newfg;
struct zspage *zspage;
- if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
+ if (unlikely(!size))
return (unsigned long)ERR_PTR(-EINVAL);
+ if (unlikely(size > ZS_MAX_ALLOC_SIZE))
+ return (unsigned long)ERR_PTR(-ENOSPC);
+
handle = cache_alloc_handle(pool, gfp);
if (!handle)
return (unsigned long)ERR_PTR(-ENOMEM);
diff --git a/mm/zswap.c b/mm/zswap.c
index 74411dfdad92..ca25b676048e 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -35,6 +35,7 @@
#include <linux/writeback.h>
#include <linux/pagemap.h>
#include <linux/workqueue.h>
+#include <linux/list_lru.h>
#include "swap.h"
#include "internal.h"
@@ -147,6 +148,16 @@ module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
/* Number of zpools in zswap_pool (empirically determined for scalability) */
#define ZSWAP_NR_ZPOOLS 32
+/* Enable/disable memory pressure-based shrinker. */
+static bool zswap_shrinker_enabled = IS_ENABLED(
+ CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
+module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
+
+bool is_zswap_enabled(void)
+{
+ return zswap_enabled;
+}
+
/*********************************
* data structures
**********************************/
@@ -155,8 +166,8 @@ struct crypto_acomp_ctx {
struct crypto_acomp *acomp;
struct acomp_req *req;
struct crypto_wait wait;
- u8 *dstmem;
- struct mutex *mutex;
+ u8 *buffer;
+ struct mutex mutex;
};
/*
@@ -174,8 +185,10 @@ struct zswap_pool {
struct work_struct shrink_work;
struct hlist_node node;
char tfm_name[CRYPTO_MAX_ALG_NAME];
- struct list_head lru;
- spinlock_t lru_lock;
+ struct list_lru list_lru;
+ struct mem_cgroup *next_shrink;
+ struct shrinker *shrinker;
+ atomic_t nr_stored;
};
/*
@@ -274,32 +287,72 @@ static bool zswap_can_accept(void)
DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
}
+static u64 get_zswap_pool_size(struct zswap_pool *pool)
+{
+ u64 pool_size = 0;
+ int i;
+
+ for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
+ pool_size += zpool_get_total_size(pool->zpools[i]);
+
+ return pool_size;
+}
+
static void zswap_update_total_size(void)
{
struct zswap_pool *pool;
u64 total = 0;
- int i;
rcu_read_lock();
list_for_each_entry_rcu(pool, &zswap_pools, list)
- for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
- total += zpool_get_total_size(pool->zpools[i]);
+ total += get_zswap_pool_size(pool);
rcu_read_unlock();
zswap_pool_total_size = total;
}
+/* should be called under RCU */
+#ifdef CONFIG_MEMCG
+static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
+{
+ return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
+}
+#else
+static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
+{
+ return NULL;
+}
+#endif
+
+static inline int entry_to_nid(struct zswap_entry *entry)
+{
+ return page_to_nid(virt_to_page(entry));
+}
+
+void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
+{
+ struct zswap_pool *pool;
+
+ /* lock out zswap pools list modification */
+ spin_lock(&zswap_pools_lock);
+ list_for_each_entry(pool, &zswap_pools, list) {
+ if (pool->next_shrink == memcg)
+ pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
+ }
+ spin_unlock(&zswap_pools_lock);
+}
+
/*********************************
* zswap entry functions
**********************************/
static struct kmem_cache *zswap_entry_cache;
-static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
+static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
{
struct zswap_entry *entry;
- entry = kmem_cache_alloc(zswap_entry_cache, gfp);
+ entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
if (!entry)
return NULL;
entry->refcount = 1;
@@ -313,6 +366,100 @@ static void zswap_entry_cache_free(struct zswap_entry *entry)
}
/*********************************
+* zswap lruvec functions
+**********************************/
+void zswap_lruvec_state_init(struct lruvec *lruvec)
+{
+ atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
+}
+
+void zswap_folio_swapin(struct folio *folio)
+{
+ struct lruvec *lruvec;
+
+ if (folio) {
+ lruvec = folio_lruvec(folio);
+ atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
+ }
+}
+
+/*********************************
+* lru functions
+**********************************/
+static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
+{
+ atomic_long_t *nr_zswap_protected;
+ unsigned long lru_size, old, new;
+ int nid = entry_to_nid(entry);
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
+
+ /*
+ * Note that it is safe to use rcu_read_lock() here, even in the face of
+ * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
+ * used in list_lru lookup, only two scenarios are possible:
+ *
+ * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
+ * new entry will be reparented to memcg's parent's list_lru.
+ * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
+ * new entry will be added directly to memcg's parent's list_lru.
+ *
+ * Similar reasoning holds for list_lru_del() and list_lru_putback().
+ */
+ rcu_read_lock();
+ memcg = mem_cgroup_from_entry(entry);
+ /* will always succeed */
+ list_lru_add(list_lru, &entry->lru, nid, memcg);
+
+ /* Update the protection area */
+ lru_size = list_lru_count_one(list_lru, nid, memcg);
+ lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
+ nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
+ old = atomic_long_inc_return(nr_zswap_protected);
+ /*
+ * Decay to avoid overflow and adapt to changing workloads.
+ * This is based on LRU reclaim cost decaying heuristics.
+ */
+ do {
+ new = old > lru_size / 4 ? old / 2 : old;
+ } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
+ rcu_read_unlock();
+}
+
+static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
+{
+ int nid = entry_to_nid(entry);
+ struct mem_cgroup *memcg;
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_entry(entry);
+ /* will always succeed */
+ list_lru_del(list_lru, &entry->lru, nid, memcg);
+ rcu_read_unlock();
+}
+
+static void zswap_lru_putback(struct list_lru *list_lru,
+ struct zswap_entry *entry)
+{
+ int nid = entry_to_nid(entry);
+ spinlock_t *lock = &list_lru->node[nid].lock;
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_entry(entry);
+ spin_lock(lock);
+ /* we cannot use list_lru_add here, because it increments node's lru count */
+ list_lru_putback(list_lru, &entry->lru, nid, memcg);
+ spin_unlock(lock);
+
+ lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(entry_to_nid(entry)));
+ /* increment the protection area to account for the LRU rotation. */
+ atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
+ rcu_read_unlock();
+}
+
+/*********************************
* rbtree functions
**********************************/
static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
@@ -396,10 +543,9 @@ static void zswap_free_entry(struct zswap_entry *entry)
if (!entry->length)
atomic_dec(&zswap_same_filled_pages);
else {
- spin_lock(&entry->pool->lru_lock);
- list_del(&entry->lru);
- spin_unlock(&entry->pool->lru_lock);
+ zswap_lru_del(&entry->pool->list_lru, entry);
zpool_free(zswap_find_zpool(entry), entry->handle);
+ atomic_dec(&entry->pool->nr_stored);
zswap_pool_put(entry->pool);
}
zswap_entry_cache_free(entry);
@@ -442,65 +588,132 @@ static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
}
/*********************************
-* per-cpu code
+* shrinker functions
**********************************/
-static DEFINE_PER_CPU(u8 *, zswap_dstmem);
-/*
- * If users dynamically change the zpool type and compressor at runtime, i.e.
- * zswap is running, zswap can have more than one zpool on one cpu, but they
- * are sharing dtsmem. So we need this mutex to be per-cpu.
- */
-static DEFINE_PER_CPU(struct mutex *, zswap_mutex);
+static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
+ spinlock_t *lock, void *arg);
-static int zswap_dstmem_prepare(unsigned int cpu)
+static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
+ struct shrink_control *sc)
{
- struct mutex *mutex;
- u8 *dst;
+ struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
+ unsigned long shrink_ret, nr_protected, lru_size;
+ struct zswap_pool *pool = shrinker->private_data;
+ bool encountered_page_in_swapcache = false;
+
+ if (!zswap_shrinker_enabled ||
+ !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
+ sc->nr_scanned = 0;
+ return SHRINK_STOP;
+ }
- dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
- if (!dst)
- return -ENOMEM;
+ nr_protected =
+ atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
+ lru_size = list_lru_shrink_count(&pool->list_lru, sc);
- mutex = kmalloc_node(sizeof(*mutex), GFP_KERNEL, cpu_to_node(cpu));
- if (!mutex) {
- kfree(dst);
- return -ENOMEM;
+ /*
+ * Abort if we are shrinking into the protected region.
+ *
+ * This short-circuiting is necessary because if we have too many multiple
+ * concurrent reclaimers getting the freeable zswap object counts at the
+ * same time (before any of them made reasonable progress), the total
+ * number of reclaimed objects might be more than the number of unprotected
+ * objects (i.e the reclaimers will reclaim into the protected area of the
+ * zswap LRU).
+ */
+ if (nr_protected >= lru_size - sc->nr_to_scan) {
+ sc->nr_scanned = 0;
+ return SHRINK_STOP;
}
- mutex_init(mutex);
- per_cpu(zswap_dstmem, cpu) = dst;
- per_cpu(zswap_mutex, cpu) = mutex;
- return 0;
+ shrink_ret = list_lru_shrink_walk(&pool->list_lru, sc, &shrink_memcg_cb,
+ &encountered_page_in_swapcache);
+
+ if (encountered_page_in_swapcache)
+ return SHRINK_STOP;
+
+ return shrink_ret ? shrink_ret : SHRINK_STOP;
}
-static int zswap_dstmem_dead(unsigned int cpu)
+static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
+ struct shrink_control *sc)
{
- struct mutex *mutex;
- u8 *dst;
+ struct zswap_pool *pool = shrinker->private_data;
+ struct mem_cgroup *memcg = sc->memcg;
+ struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
+ unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
- mutex = per_cpu(zswap_mutex, cpu);
- kfree(mutex);
- per_cpu(zswap_mutex, cpu) = NULL;
+ if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
+ return 0;
- dst = per_cpu(zswap_dstmem, cpu);
- kfree(dst);
- per_cpu(zswap_dstmem, cpu) = NULL;
+#ifdef CONFIG_MEMCG_KMEM
+ mem_cgroup_flush_stats(memcg);
+ nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
+ nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
+#else
+ /* use pool stats instead of memcg stats */
+ nr_backing = get_zswap_pool_size(pool) >> PAGE_SHIFT;
+ nr_stored = atomic_read(&pool->nr_stored);
+#endif
- return 0;
+ if (!nr_stored)
+ return 0;
+
+ nr_protected =
+ atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
+ nr_freeable = list_lru_shrink_count(&pool->list_lru, sc);
+ /*
+ * Subtract the lru size by an estimate of the number of pages
+ * that should be protected.
+ */
+ nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
+
+ /*
+ * Scale the number of freeable pages by the memory saving factor.
+ * This ensures that the better zswap compresses memory, the fewer
+ * pages we will evict to swap (as it will otherwise incur IO for
+ * relatively small memory saving).
+ */
+ return mult_frac(nr_freeable, nr_backing, nr_stored);
+}
+
+static void zswap_alloc_shrinker(struct zswap_pool *pool)
+{
+ pool->shrinker =
+ shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
+ if (!pool->shrinker)
+ return;
+
+ pool->shrinker->private_data = pool;
+ pool->shrinker->scan_objects = zswap_shrinker_scan;
+ pool->shrinker->count_objects = zswap_shrinker_count;
+ pool->shrinker->batch = 0;
+ pool->shrinker->seeks = DEFAULT_SEEKS;
}
+/*********************************
+* per-cpu code
+**********************************/
static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
{
struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
struct crypto_acomp *acomp;
struct acomp_req *req;
+ int ret;
+
+ mutex_init(&acomp_ctx->mutex);
+
+ acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
+ if (!acomp_ctx->buffer)
+ return -ENOMEM;
acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
if (IS_ERR(acomp)) {
pr_err("could not alloc crypto acomp %s : %ld\n",
pool->tfm_name, PTR_ERR(acomp));
- return PTR_ERR(acomp);
+ ret = PTR_ERR(acomp);
+ goto acomp_fail;
}
acomp_ctx->acomp = acomp;
@@ -508,8 +721,8 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
if (!req) {
pr_err("could not alloc crypto acomp_request %s\n",
pool->tfm_name);
- crypto_free_acomp(acomp_ctx->acomp);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto req_fail;
}
acomp_ctx->req = req;
@@ -522,10 +735,13 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &acomp_ctx->wait);
- acomp_ctx->mutex = per_cpu(zswap_mutex, cpu);
- acomp_ctx->dstmem = per_cpu(zswap_dstmem, cpu);
-
return 0;
+
+req_fail:
+ crypto_free_acomp(acomp_ctx->acomp);
+acomp_fail:
+ kfree(acomp_ctx->buffer);
+ return ret;
}
static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
@@ -538,6 +754,7 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
acomp_request_free(acomp_ctx->req);
if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
crypto_free_acomp(acomp_ctx->acomp);
+ kfree(acomp_ctx->buffer);
}
return 0;
@@ -632,21 +849,16 @@ static void zswap_invalidate_entry(struct zswap_tree *tree,
zswap_entry_put(tree, entry);
}
-static int zswap_reclaim_entry(struct zswap_pool *pool)
+static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
+ spinlock_t *lock, void *arg)
{
- struct zswap_entry *entry;
+ struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
+ bool *encountered_page_in_swapcache = (bool *)arg;
struct zswap_tree *tree;
pgoff_t swpoffset;
- int ret;
+ enum lru_status ret = LRU_REMOVED_RETRY;
+ int writeback_result;
- /* Get an entry off the LRU */
- spin_lock(&pool->lru_lock);
- if (list_empty(&pool->lru)) {
- spin_unlock(&pool->lru_lock);
- return -EINVAL;
- }
- entry = list_last_entry(&pool->lru, struct zswap_entry, lru);
- list_del_init(&entry->lru);
/*
* Once the lru lock is dropped, the entry might get freed. The
* swpoffset is copied to the stack, and entry isn't deref'd again
@@ -654,29 +866,48 @@ static int zswap_reclaim_entry(struct zswap_pool *pool)
*/
swpoffset = swp_offset(entry->swpentry);
tree = zswap_trees[swp_type(entry->swpentry)];
- spin_unlock(&pool->lru_lock);
+ list_lru_isolate(l, item);
+ /*
+ * It's safe to drop the lock here because we return either
+ * LRU_REMOVED_RETRY or LRU_RETRY.
+ */
+ spin_unlock(lock);
/* Check for invalidate() race */
spin_lock(&tree->lock);
- if (entry != zswap_rb_search(&tree->rbroot, swpoffset)) {
- ret = -EAGAIN;
+ if (entry != zswap_rb_search(&tree->rbroot, swpoffset))
goto unlock;
- }
+
/* Hold a reference to prevent a free during writeback */
zswap_entry_get(entry);
spin_unlock(&tree->lock);
- ret = zswap_writeback_entry(entry, tree);
+ writeback_result = zswap_writeback_entry(entry, tree);
spin_lock(&tree->lock);
- if (ret) {
- /* Writeback failed, put entry back on LRU */
- spin_lock(&pool->lru_lock);
- list_move(&entry->lru, &pool->lru);
- spin_unlock(&pool->lru_lock);
+ if (writeback_result) {
+ zswap_reject_reclaim_fail++;
+ zswap_lru_putback(&entry->pool->list_lru, entry);
+ ret = LRU_RETRY;
+
+ /*
+ * Encountering a page already in swap cache is a sign that we are shrinking
+ * into the warmer region. We should terminate shrinking (if we're in the dynamic
+ * shrinker context).
+ */
+ if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
+ ret = LRU_SKIP;
+ *encountered_page_in_swapcache = true;
+ }
+
goto put_unlock;
}
+ zswap_written_back_pages++;
+
+ if (entry->objcg)
+ count_objcg_event(entry->objcg, ZSWPWB);
+ count_vm_event(ZSWPWB);
/*
* Writeback started successfully, the page now belongs to the
* swapcache. Drop the entry from zswap - unless invalidate already
@@ -689,24 +920,94 @@ put_unlock:
zswap_entry_put(tree, entry);
unlock:
spin_unlock(&tree->lock);
- return ret ? -EAGAIN : 0;
+ spin_lock(lock);
+ return ret;
+}
+
+static int shrink_memcg(struct mem_cgroup *memcg)
+{
+ struct zswap_pool *pool;
+ int nid, shrunk = 0;
+
+ if (!mem_cgroup_zswap_writeback_enabled(memcg))
+ return -EINVAL;
+
+ /*
+ * Skip zombies because their LRUs are reparented and we would be
+ * reclaiming from the parent instead of the dead memcg.
+ */
+ if (memcg && !mem_cgroup_online(memcg))
+ return -ENOENT;
+
+ pool = zswap_pool_current_get();
+ if (!pool)
+ return -EINVAL;
+
+ for_each_node_state(nid, N_NORMAL_MEMORY) {
+ unsigned long nr_to_walk = 1;
+
+ shrunk += list_lru_walk_one(&pool->list_lru, nid, memcg,
+ &shrink_memcg_cb, NULL, &nr_to_walk);
+ }
+ zswap_pool_put(pool);
+ return shrunk ? 0 : -EAGAIN;
}
static void shrink_worker(struct work_struct *w)
{
struct zswap_pool *pool = container_of(w, typeof(*pool),
shrink_work);
+ struct mem_cgroup *memcg;
int ret, failures = 0;
+ /* global reclaim will select cgroup in a round-robin fashion. */
do {
- ret = zswap_reclaim_entry(pool);
- if (ret) {
- zswap_reject_reclaim_fail++;
- if (ret != -EAGAIN)
+ spin_lock(&zswap_pools_lock);
+ pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
+ memcg = pool->next_shrink;
+
+ /*
+ * We need to retry if we have gone through a full round trip, or if we
+ * got an offline memcg (or else we risk undoing the effect of the
+ * zswap memcg offlining cleanup callback). This is not catastrophic
+ * per se, but it will keep the now offlined memcg hostage for a while.
+ *
+ * Note that if we got an online memcg, we will keep the extra
+ * reference in case the original reference obtained by mem_cgroup_iter
+ * is dropped by the zswap memcg offlining callback, ensuring that the
+ * memcg is not killed when we are reclaiming.
+ */
+ if (!memcg) {
+ spin_unlock(&zswap_pools_lock);
+ if (++failures == MAX_RECLAIM_RETRIES)
break;
+
+ goto resched;
+ }
+
+ if (!mem_cgroup_tryget_online(memcg)) {
+ /* drop the reference from mem_cgroup_iter() */
+ mem_cgroup_iter_break(NULL, memcg);
+ pool->next_shrink = NULL;
+ spin_unlock(&zswap_pools_lock);
+
if (++failures == MAX_RECLAIM_RETRIES)
break;
+
+ goto resched;
}
+ spin_unlock(&zswap_pools_lock);
+
+ ret = shrink_memcg(memcg);
+ /* drop the extra reference */
+ mem_cgroup_put(memcg);
+
+ if (ret == -EINVAL)
+ break;
+ if (ret && ++failures == MAX_RECLAIM_RETRIES)
+ break;
+
+resched:
cond_resched();
} while (!zswap_can_accept());
zswap_pool_put(pool);
@@ -760,6 +1061,11 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
&pool->node);
if (ret)
goto error;
+
+ zswap_alloc_shrinker(pool);
+ if (!pool->shrinker)
+ goto error;
+
pr_debug("using %s compressor\n", pool->tfm_name);
/* being the current pool takes 1 ref; this func expects the
@@ -767,14 +1073,19 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
*/
kref_init(&pool->kref);
INIT_LIST_HEAD(&pool->list);
- INIT_LIST_HEAD(&pool->lru);
- spin_lock_init(&pool->lru_lock);
+ if (list_lru_init_memcg(&pool->list_lru, pool->shrinker))
+ goto lru_fail;
+ shrinker_register(pool->shrinker);
INIT_WORK(&pool->shrink_work, shrink_worker);
+ atomic_set(&pool->nr_stored, 0);
zswap_pool_debug("created", pool);
return pool;
+lru_fail:
+ list_lru_destroy(&pool->list_lru);
+ shrinker_free(pool->shrinker);
error:
if (pool->acomp_ctx)
free_percpu(pool->acomp_ctx);
@@ -832,8 +1143,16 @@ static void zswap_pool_destroy(struct zswap_pool *pool)
zswap_pool_debug("destroying", pool);
+ shrinker_free(pool->shrinker);
cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
free_percpu(pool->acomp_ctx);
+ list_lru_destroy(&pool->list_lru);
+
+ spin_lock(&zswap_pools_lock);
+ mem_cgroup_iter_break(NULL, pool->next_shrink);
+ pool->next_shrink = NULL;
+ spin_unlock(&zswap_pools_lock);
+
for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
zpool_destroy_pool(pool->zpools[i]);
kfree(pool);
@@ -1040,18 +1359,47 @@ static int zswap_enabled_param_set(const char *val,
return ret;
}
+static void __zswap_load(struct zswap_entry *entry, struct page *page)
+{
+ struct zpool *zpool = zswap_find_zpool(entry);
+ struct scatterlist input, output;
+ struct crypto_acomp_ctx *acomp_ctx;
+ u8 *src;
+
+ acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
+ mutex_lock(&acomp_ctx->mutex);
+
+ src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
+ if (!zpool_can_sleep_mapped(zpool)) {
+ memcpy(acomp_ctx->buffer, src, entry->length);
+ src = acomp_ctx->buffer;
+ zpool_unmap_handle(zpool, entry->handle);
+ }
+
+ sg_init_one(&input, src, entry->length);
+ sg_init_table(&output, 1);
+ sg_set_page(&output, page, PAGE_SIZE, 0);
+ acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
+ BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
+ BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
+ mutex_unlock(&acomp_ctx->mutex);
+
+ if (zpool_can_sleep_mapped(zpool))
+ zpool_unmap_handle(zpool, entry->handle);
+}
+
/*********************************
* writeback code
**********************************/
/*
- * Attempts to free an entry by adding a page to the swap cache,
- * decompressing the entry data into the page, and issuing a
- * bio write to write the page back to the swap device.
+ * Attempts to free an entry by adding a folio to the swap cache,
+ * decompressing the entry data into the folio, and issuing a
+ * bio write to write the folio back to the swap device.
*
- * This can be thought of as a "resumed writeback" of the page
+ * This can be thought of as a "resumed writeback" of the folio
* to the swap device. We are basically resuming the same swap
* writeback path that was intercepted with the zswap_store()
- * in the first place. After the page has been decompressed into
+ * in the first place. After the folio has been decompressed into
* the swap cache, the compressed version stored by zswap can be
* freed.
*/
@@ -1059,108 +1407,58 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
struct zswap_tree *tree)
{
swp_entry_t swpentry = entry->swpentry;
- struct page *page;
+ struct folio *folio;
struct mempolicy *mpol;
- struct scatterlist input, output;
- struct crypto_acomp_ctx *acomp_ctx;
- struct zpool *pool = zswap_find_zpool(entry);
- bool page_was_allocated;
- u8 *src, *tmp = NULL;
- unsigned int dlen;
- int ret;
+ bool folio_was_allocated;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
};
- if (!zpool_can_sleep_mapped(pool)) {
- tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
- }
-
- /* try to allocate swap cache page */
+ /* try to allocate swap cache folio */
mpol = get_task_policy(current);
- page = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
- NO_INTERLEAVE_INDEX, &page_was_allocated);
- if (!page) {
- ret = -ENOMEM;
- goto fail;
- }
+ folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
+ NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
+ if (!folio)
+ return -ENOMEM;
- /* Found an existing page, we raced with load/swapin */
- if (!page_was_allocated) {
- put_page(page);
- ret = -EEXIST;
- goto fail;
+ /*
+ * Found an existing folio, we raced with load/swapin. We generally
+ * writeback cold folios from zswap, and swapin means the folio just
+ * became hot. Skip this folio and let the caller find another one.
+ */
+ if (!folio_was_allocated) {
+ folio_put(folio);
+ return -EEXIST;
}
/*
- * Page is locked, and the swapcache is now secured against
+ * folio is locked, and the swapcache is now secured against
* concurrent swapping to and from the slot. Verify that the
* swap entry hasn't been invalidated and recycled behind our
* backs (our zswap_entry reference doesn't prevent that), to
- * avoid overwriting a new swap page with old compressed data.
+ * avoid overwriting a new swap folio with old compressed data.
*/
spin_lock(&tree->lock);
if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
spin_unlock(&tree->lock);
- delete_from_swap_cache(page_folio(page));
- ret = -ENOMEM;
- goto fail;
+ delete_from_swap_cache(folio);
+ return -ENOMEM;
}
spin_unlock(&tree->lock);
- /* decompress */
- acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
- dlen = PAGE_SIZE;
-
- src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO);
- if (!zpool_can_sleep_mapped(pool)) {
- memcpy(tmp, src, entry->length);
- src = tmp;
- zpool_unmap_handle(pool, entry->handle);
- }
-
- mutex_lock(acomp_ctx->mutex);
- sg_init_one(&input, src, entry->length);
- sg_init_table(&output, 1);
- sg_set_page(&output, page, PAGE_SIZE, 0);
- acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
- ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
- dlen = acomp_ctx->req->dlen;
- mutex_unlock(acomp_ctx->mutex);
+ __zswap_load(entry, &folio->page);
- if (!zpool_can_sleep_mapped(pool))
- kfree(tmp);
- else
- zpool_unmap_handle(pool, entry->handle);
-
- BUG_ON(ret);
- BUG_ON(dlen != PAGE_SIZE);
-
- /* page is up to date */
- SetPageUptodate(page);
+ /* folio is up to date */
+ folio_mark_uptodate(folio);
/* move it to the tail of the inactive list after end_writeback */
- SetPageReclaim(page);
+ folio_set_reclaim(folio);
/* start writeback */
- __swap_writepage(page, &wbc);
- put_page(page);
- zswap_written_back_pages++;
+ __swap_writepage(folio, &wbc);
+ folio_put(folio);
- return ret;
-
-fail:
- if (!zpool_can_sleep_mapped(pool))
- kfree(tmp);
-
- /*
- * If we get here because the page is already in swapcache, a
- * load may be happening concurrently. It is safe and okay to
- * not free the entry. It is also okay to return !0.
- */
- return ret;
+ return 0;
}
static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
@@ -1204,6 +1502,7 @@ bool zswap_store(struct folio *folio)
struct scatterlist input, output;
struct crypto_acomp_ctx *acomp_ctx;
struct obj_cgroup *objcg = NULL;
+ struct mem_cgroup *memcg = NULL;
struct zswap_pool *pool;
struct zpool *zpool;
unsigned int dlen = PAGE_SIZE;
@@ -1235,15 +1534,15 @@ bool zswap_store(struct folio *folio)
zswap_invalidate_entry(tree, dupentry);
}
spin_unlock(&tree->lock);
-
- /*
- * XXX: zswap reclaim does not work with cgroups yet. Without a
- * cgroup-aware entry LRU, we will push out entries system-wide based on
- * local cgroup limits.
- */
objcg = get_obj_cgroup_from_folio(folio);
- if (objcg && !obj_cgroup_may_zswap(objcg))
- goto reject;
+ if (objcg && !obj_cgroup_may_zswap(objcg)) {
+ memcg = get_mem_cgroup_from_objcg(objcg);
+ if (shrink_memcg(memcg)) {
+ mem_cgroup_put(memcg);
+ goto reject;
+ }
+ mem_cgroup_put(memcg);
+ }
/* reclaim space if needed */
if (zswap_is_full()) {
@@ -1260,23 +1559,23 @@ bool zswap_store(struct folio *folio)
}
/* allocate entry */
- entry = zswap_entry_cache_alloc(GFP_KERNEL);
+ entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
if (!entry) {
zswap_reject_kmemcache_fail++;
goto reject;
}
if (zswap_same_filled_pages_enabled) {
- src = kmap_atomic(page);
+ src = kmap_local_page(page);
if (zswap_is_page_same_filled(src, &value)) {
- kunmap_atomic(src);
+ kunmap_local(src);
entry->swpentry = swp_entry(type, offset);
entry->length = 0;
entry->value = value;
atomic_inc(&zswap_same_filled_pages);
goto insert_entry;
}
- kunmap_atomic(src);
+ kunmap_local(src);
}
if (!zswap_non_same_filled_pages_enabled)
@@ -1287,16 +1586,29 @@ bool zswap_store(struct folio *folio)
if (!entry->pool)
goto freepage;
+ if (objcg) {
+ memcg = get_mem_cgroup_from_objcg(objcg);
+ if (memcg_list_lru_alloc(memcg, &entry->pool->list_lru, GFP_KERNEL)) {
+ mem_cgroup_put(memcg);
+ goto put_pool;
+ }
+ mem_cgroup_put(memcg);
+ }
+
/* compress */
acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
- mutex_lock(acomp_ctx->mutex);
+ mutex_lock(&acomp_ctx->mutex);
- dst = acomp_ctx->dstmem;
+ dst = acomp_ctx->buffer;
sg_init_table(&input, 1);
- sg_set_page(&input, page, PAGE_SIZE, 0);
+ sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
- /* zswap_dstmem is of size (PAGE_SIZE * 2). Reflect same in sg_list */
+ /*
+ * We need PAGE_SIZE * 2 here since there maybe over-compression case,
+ * and hardware-accelerators may won't check the dst buffer size, so
+ * giving the dst buffer with enough length to avoid buffer overflow.
+ */
sg_init_one(&output, dst, PAGE_SIZE * 2);
acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
/*
@@ -1336,7 +1648,7 @@ bool zswap_store(struct folio *folio)
buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
memcpy(buf, dst, dlen);
zpool_unmap_handle(zpool, handle);
- mutex_unlock(acomp_ctx->mutex);
+ mutex_unlock(&acomp_ctx->mutex);
/* populate entry */
entry->swpentry = swp_entry(type, offset);
@@ -1365,9 +1677,9 @@ insert_entry:
zswap_invalidate_entry(tree, dupentry);
}
if (entry->length) {
- spin_lock(&entry->pool->lru_lock);
- list_add(&entry->lru, &entry->pool->lru);
- spin_unlock(&entry->pool->lru_lock);
+ INIT_LIST_HEAD(&entry->lru);
+ zswap_lru_add(&entry->pool->list_lru, entry);
+ atomic_inc(&entry->pool->nr_stored);
}
spin_unlock(&tree->lock);
@@ -1379,7 +1691,8 @@ insert_entry:
return true;
put_dstmem:
- mutex_unlock(acomp_ctx->mutex);
+ mutex_unlock(&acomp_ctx->mutex);
+put_pool:
zswap_pool_put(entry->pool);
freepage:
zswap_entry_cache_free(entry);
@@ -1403,12 +1716,7 @@ bool zswap_load(struct folio *folio)
struct page *page = &folio->page;
struct zswap_tree *tree = zswap_trees[type];
struct zswap_entry *entry;
- struct scatterlist input, output;
- struct crypto_acomp_ctx *acomp_ctx;
- u8 *src, *dst, *tmp;
- struct zpool *zpool;
- unsigned int dlen;
- bool ret;
+ u8 *dst;
VM_WARN_ON_ONCE(!folio_test_locked(folio));
@@ -1421,67 +1729,30 @@ bool zswap_load(struct folio *folio)
}
spin_unlock(&tree->lock);
- if (!entry->length) {
- dst = kmap_atomic(page);
+ if (entry->length)
+ __zswap_load(entry, page);
+ else {
+ dst = kmap_local_page(page);
zswap_fill_page(dst, entry->value);
- kunmap_atomic(dst);
- ret = true;
- goto stats;
- }
-
- zpool = zswap_find_zpool(entry);
- if (!zpool_can_sleep_mapped(zpool)) {
- tmp = kmalloc(entry->length, GFP_KERNEL);
- if (!tmp) {
- ret = false;
- goto freeentry;
- }
- }
-
- /* decompress */
- dlen = PAGE_SIZE;
- src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
-
- if (!zpool_can_sleep_mapped(zpool)) {
- memcpy(tmp, src, entry->length);
- src = tmp;
- zpool_unmap_handle(zpool, entry->handle);
+ kunmap_local(dst);
}
- acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
- mutex_lock(acomp_ctx->mutex);
- sg_init_one(&input, src, entry->length);
- sg_init_table(&output, 1);
- sg_set_page(&output, page, PAGE_SIZE, 0);
- acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
- if (crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait))
- WARN_ON(1);
- mutex_unlock(acomp_ctx->mutex);
-
- if (zpool_can_sleep_mapped(zpool))
- zpool_unmap_handle(zpool, entry->handle);
- else
- kfree(tmp);
-
- ret = true;
-stats:
count_vm_event(ZSWPIN);
if (entry->objcg)
count_objcg_event(entry->objcg, ZSWPIN);
-freeentry:
+
spin_lock(&tree->lock);
- if (ret && zswap_exclusive_loads_enabled) {
+ if (zswap_exclusive_loads_enabled) {
zswap_invalidate_entry(tree, entry);
folio_mark_dirty(folio);
} else if (entry->length) {
- spin_lock(&entry->pool->lru_lock);
- list_move(&entry->lru, &entry->pool->lru);
- spin_unlock(&entry->pool->lru_lock);
+ zswap_lru_del(&entry->pool->list_lru, entry);
+ zswap_lru_add(&entry->pool->list_lru, entry);
}
zswap_entry_put(tree, entry);
spin_unlock(&tree->lock);
- return ret;
+ return true;
}
void zswap_invalidate(int type, pgoff_t offset)
@@ -1595,13 +1866,6 @@ static int zswap_setup(void)
goto cache_fail;
}
- ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
- zswap_dstmem_prepare, zswap_dstmem_dead);
- if (ret) {
- pr_err("dstmem alloc failed\n");
- goto dstmem_fail;
- }
-
ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
"mm/zswap_pool:prepare",
zswap_cpu_comp_prepare,
@@ -1633,8 +1897,6 @@ fallback_fail:
if (pool)
zswap_pool_destroy(pool);
hp_fail:
- cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
-dstmem_fail:
kmem_cache_destroy(zswap_entry_cache);
cache_fail:
/* if built-in, we aren't unloaded on failure; don't allow use */