summaryrefslogtreecommitdiffstats
path: root/mm/damon/reclaim.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-26 21:32:41 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-26 21:32:41 +0200
commit98931dd95fd489fcbfa97da563505a6f071d7c77 (patch)
tree44683fc4a92efa614acdca2742a7ff19d26da1e3 /mm/damon/reclaim.c
parentMerge tag 'kbuild-v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/mas... (diff)
parentmm: kfence: use PAGE_ALIGNED helper (diff)
downloadlinux-98931dd95fd489fcbfa97da563505a6f071d7c77.tar.xz
linux-98931dd95fd489fcbfa97da563505a6f071d7c77.zip
Merge tag 'mm-stable-2022-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton: "Almost all of MM here. A few things are still getting finished off, reviewed, etc. - Yang Shi has improved the behaviour of khugepaged collapsing of readonly file-backed transparent hugepages. - Johannes Weiner has arranged for zswap memory use to be tracked and managed on a per-cgroup basis. - Munchun Song adds a /proc knob ("hugetlb_optimize_vmemmap") for runtime enablement of the recent huge page vmemmap optimization feature. - Baolin Wang contributes a series to fix some issues around hugetlb pagetable invalidation. - Zhenwei Pi has fixed some interactions between hwpoisoned pages and virtualization. - Tong Tiangen has enabled the use of the presently x86-only page_table_check debugging feature on arm64 and riscv. - David Vernet has done some fixup work on the memcg selftests. - Peter Xu has taught userfaultfd to handle write protection faults against shmem- and hugetlbfs-backed files. - More DAMON development from SeongJae Park - adding online tuning of the feature and support for monitoring of fixed virtual address ranges. Also easier discovery of which monitoring operations are available. - Nadav Amit has done some optimization of TLB flushing during mprotect(). - Neil Brown continues to labor away at improving our swap-over-NFS support. - David Hildenbrand has some fixes to anon page COWing versus get_user_pages(). - Peng Liu fixed some errors in the core hugetlb code. - Joao Martins has reduced the amount of memory consumed by device-dax's compound devmaps. - Some cleanups of the arch-specific pagemap code from Anshuman Khandual. - Muchun Song has found and fixed some errors in the TLB flushing of transparent hugepages. - Roman Gushchin has done more work on the memcg selftests. ... and, of course, many smaller fixes and cleanups. Notably, the customary million cleanup serieses from Miaohe Lin" * tag 'mm-stable-2022-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (381 commits) mm: kfence: use PAGE_ALIGNED helper selftests: vm: add the "settings" file with timeout variable selftests: vm: add "test_hmm.sh" to TEST_FILES selftests: vm: check numa_available() before operating "merge_across_nodes" in ksm_tests selftests: vm: add migration to the .gitignore selftests/vm/pkeys: fix typo in comment ksm: fix typo in comment selftests: vm: add process_mrelease tests Revert "mm/vmscan: never demote for memcg reclaim" mm/kfence: print disabling or re-enabling message include/trace/events/percpu.h: cleanup for "percpu: improve percpu_alloc_percpu event trace" include/trace/events/mmflags.h: cleanup for "tracing: incorrect gfp_t conversion" mm: fix a potential infinite loop in start_isolate_page_range() MAINTAINERS: add Muchun as co-maintainer for HugeTLB zram: fix Kconfig dependency warning mm/shmem: fix shmem folio swapoff hang cgroup: fix an error handling path in alloc_pagecache_max_30M() mm: damon: use HPAGE_PMD_SIZE tracing: incorrect isolate_mote_t cast in mm_vmscan_lru_isolate nodemask.h: fix compilation error with GCC12 ...
Diffstat (limited to 'mm/damon/reclaim.c')
-rw-r--r--mm/damon/reclaim.c124
1 files changed, 88 insertions, 36 deletions
diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
index e34c4d0c4d93..8efbfb24f3a1 100644
--- a/mm/damon/reclaim.c
+++ b/mm/damon/reclaim.c
@@ -28,7 +28,18 @@
* this.
*/
static bool enabled __read_mostly;
-module_param(enabled, bool, 0600);
+
+/*
+ * Make DAMON_RECLAIM reads the input parameters again, except ``enabled``.
+ *
+ * Input parameters that updated while DAMON_RECLAIM is running are not applied
+ * by default. Once this parameter is set as ``Y``, DAMON_RECLAIM reads values
+ * of parametrs except ``enabled`` again. Once the re-reading is done, this
+ * parameter is set as ``N``. If invalid parameters are found while the
+ * re-reading, DAMON_RECLAIM will be disabled.
+ */
+static bool commit_inputs __read_mostly;
+module_param(commit_inputs, bool, 0600);
/*
* Time threshold for cold memory regions identification in microseconds.
@@ -227,7 +238,7 @@ static int walk_system_ram(struct resource *res, void *arg)
{
struct damon_reclaim_ram_walk_arg *a = arg;
- if (a->end - a->start < res->end - res->start) {
+ if (a->end - a->start < resource_size(res)) {
a->start = res->start;
a->end = res->end;
}
@@ -290,57 +301,56 @@ static struct damos *damon_reclaim_new_scheme(void)
return scheme;
}
-static int damon_reclaim_turn(bool on)
+static int damon_reclaim_apply_parameters(void)
{
- struct damon_region *region;
struct damos *scheme;
- int err;
-
- if (!on) {
- err = damon_stop(&ctx, 1);
- if (!err)
- kdamond_pid = -1;
- return err;
- }
+ struct damon_addr_range addr_range;
+ int err = 0;
err = damon_set_attrs(ctx, sample_interval, aggr_interval, 0,
min_nr_regions, max_nr_regions);
if (err)
return err;
+ /* Will be freed by next 'damon_set_schemes()' below */
+ scheme = damon_reclaim_new_scheme();
+ if (!scheme)
+ return -ENOMEM;
+ err = damon_set_schemes(ctx, &scheme, 1);
+ if (err)
+ return err;
+
if (monitor_region_start > monitor_region_end)
return -EINVAL;
if (!monitor_region_start && !monitor_region_end &&
!get_monitoring_region(&monitor_region_start,
&monitor_region_end))
return -EINVAL;
- /* DAMON will free this on its own when finish monitoring */
- region = damon_new_region(monitor_region_start, monitor_region_end);
- if (!region)
- return -ENOMEM;
- damon_add_region(region, target);
+ addr_range.start = monitor_region_start;
+ addr_range.end = monitor_region_end;
+ return damon_set_regions(target, &addr_range, 1);
+}
- /* Will be freed by 'damon_set_schemes()' below */
- scheme = damon_reclaim_new_scheme();
- if (!scheme) {
- err = -ENOMEM;
- goto free_region_out;
+static int damon_reclaim_turn(bool on)
+{
+ int err;
+
+ if (!on) {
+ err = damon_stop(&ctx, 1);
+ if (!err)
+ kdamond_pid = -1;
+ return err;
}
- err = damon_set_schemes(ctx, &scheme, 1);
+
+ err = damon_reclaim_apply_parameters();
if (err)
- goto free_scheme_out;
+ return err;
err = damon_start(&ctx, 1, true);
- if (!err) {
- kdamond_pid = ctx->kdamond->pid;
- return 0;
- }
-
-free_scheme_out:
- damon_destroy_scheme(scheme);
-free_region_out:
- damon_destroy_region(region, target);
- return err;
+ if (err)
+ return err;
+ kdamond_pid = ctx->kdamond->pid;
+ return 0;
}
#define ENABLE_CHECK_INTERVAL_MS 1000
@@ -358,14 +368,39 @@ static void damon_reclaim_timer_fn(struct work_struct *work)
enabled = last_enabled;
}
- schedule_delayed_work(&damon_reclaim_timer,
+ if (enabled)
+ schedule_delayed_work(&damon_reclaim_timer,
msecs_to_jiffies(ENABLE_CHECK_INTERVAL_MS));
}
static DECLARE_DELAYED_WORK(damon_reclaim_timer, damon_reclaim_timer_fn);
+static int enabled_store(const char *val,
+ const struct kernel_param *kp)
+{
+ int rc = param_set_bool(val, kp);
+
+ if (rc < 0)
+ return rc;
+
+ if (enabled)
+ schedule_delayed_work(&damon_reclaim_timer, 0);
+
+ return 0;
+}
+
+static const struct kernel_param_ops enabled_param_ops = {
+ .set = enabled_store,
+ .get = param_get_bool,
+};
+
+module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
+MODULE_PARM_DESC(enabled,
+ "Enable or disable DAMON_RECLAIM (default: disabled)");
+
static int damon_reclaim_after_aggregation(struct damon_ctx *c)
{
struct damos *s;
+ int err = 0;
/* update the stats parameter */
damon_for_each_scheme(s, c) {
@@ -375,7 +410,23 @@ static int damon_reclaim_after_aggregation(struct damon_ctx *c)
bytes_reclaimed_regions = s->stat.sz_applied;
nr_quota_exceeds = s->stat.qt_exceeds;
}
- return 0;
+
+ if (commit_inputs) {
+ err = damon_reclaim_apply_parameters();
+ commit_inputs = false;
+ }
+ return err;
+}
+
+static int damon_reclaim_after_wmarks_check(struct damon_ctx *c)
+{
+ int err = 0;
+
+ if (commit_inputs) {
+ err = damon_reclaim_apply_parameters();
+ commit_inputs = false;
+ }
+ return err;
}
static int __init damon_reclaim_init(void)
@@ -387,6 +438,7 @@ static int __init damon_reclaim_init(void)
if (damon_select_ops(ctx, DAMON_OPS_PADDR))
return -EINVAL;
+ ctx->callback.after_wmarks_check = damon_reclaim_after_wmarks_check;
ctx->callback.after_aggregation = damon_reclaim_after_aggregation;
target = damon_new_target();