summaryrefslogtreecommitdiffstats
path: root/mm/ksm.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c388
1 files changed, 349 insertions, 39 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index 6a831009b4cb..8c001819cf10 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -21,6 +21,7 @@
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/coredump.h>
+#include <linux/sched/cputime.h>
#include <linux/rwsem.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
@@ -248,6 +249,9 @@ static struct kmem_cache *rmap_item_cache;
static struct kmem_cache *stable_node_cache;
static struct kmem_cache *mm_slot_cache;
+/* Default number of pages to scan per batch */
+#define DEFAULT_PAGES_TO_SCAN 100
+
/* The number of pages scanned */
static unsigned long ksm_pages_scanned;
@@ -276,7 +280,7 @@ static unsigned int ksm_stable_node_chains_prune_millisecs = 2000;
static int ksm_max_page_sharing = 256;
/* Number of pages ksmd should scan in one batch */
-static unsigned int ksm_thread_pages_to_scan = 100;
+static unsigned int ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
/* Milliseconds ksmd should sleep between batches */
static unsigned int ksm_thread_sleep_millisecs = 20;
@@ -297,6 +301,172 @@ unsigned long ksm_zero_pages;
/* The number of pages that have been skipped due to "smart scanning" */
static unsigned long ksm_pages_skipped;
+/* Don't scan more than max pages per batch. */
+static unsigned long ksm_advisor_max_pages_to_scan = 30000;
+
+/* Min CPU for scanning pages per scan */
+#define KSM_ADVISOR_MIN_CPU 10
+
+/* Max CPU for scanning pages per scan */
+static unsigned int ksm_advisor_max_cpu = 70;
+
+/* Target scan time in seconds to analyze all KSM candidate pages. */
+static unsigned long ksm_advisor_target_scan_time = 200;
+
+/* Exponentially weighted moving average. */
+#define EWMA_WEIGHT 30
+
+/**
+ * struct advisor_ctx - metadata for KSM advisor
+ * @start_scan: start time of the current scan
+ * @scan_time: scan time of previous scan
+ * @change: change in percent to pages_to_scan parameter
+ * @cpu_time: cpu time consumed by the ksmd thread in the previous scan
+ */
+struct advisor_ctx {
+ ktime_t start_scan;
+ unsigned long scan_time;
+ unsigned long change;
+ unsigned long long cpu_time;
+};
+static struct advisor_ctx advisor_ctx;
+
+/* Define different advisor's */
+enum ksm_advisor_type {
+ KSM_ADVISOR_NONE,
+ KSM_ADVISOR_SCAN_TIME,
+};
+static enum ksm_advisor_type ksm_advisor;
+
+#ifdef CONFIG_SYSFS
+/*
+ * Only called through the sysfs control interface:
+ */
+
+/* At least scan this many pages per batch. */
+static unsigned long ksm_advisor_min_pages_to_scan = 500;
+
+static void set_advisor_defaults(void)
+{
+ if (ksm_advisor == KSM_ADVISOR_NONE) {
+ ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
+ } else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) {
+ advisor_ctx = (const struct advisor_ctx){ 0 };
+ ksm_thread_pages_to_scan = ksm_advisor_min_pages_to_scan;
+ }
+}
+#endif /* CONFIG_SYSFS */
+
+static inline void advisor_start_scan(void)
+{
+ if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
+ advisor_ctx.start_scan = ktime_get();
+}
+
+/*
+ * Use previous scan time if available, otherwise use current scan time as an
+ * approximation for the previous scan time.
+ */
+static inline unsigned long prev_scan_time(struct advisor_ctx *ctx,
+ unsigned long scan_time)
+{
+ return ctx->scan_time ? ctx->scan_time : scan_time;
+}
+
+/* Calculate exponential weighted moving average */
+static unsigned long ewma(unsigned long prev, unsigned long curr)
+{
+ return ((100 - EWMA_WEIGHT) * prev + EWMA_WEIGHT * curr) / 100;
+}
+
+/*
+ * The scan time advisor is based on the current scan rate and the target
+ * scan rate.
+ *
+ * new_pages_to_scan = pages_to_scan * (scan_time / target_scan_time)
+ *
+ * To avoid perturbations it calculates a change factor of previous changes.
+ * A new change factor is calculated for each iteration and it uses an
+ * exponentially weighted moving average. The new pages_to_scan value is
+ * multiplied with that change factor:
+ *
+ * new_pages_to_scan *= change facor
+ *
+ * The new_pages_to_scan value is limited by the cpu min and max values. It
+ * calculates the cpu percent for the last scan and calculates the new
+ * estimated cpu percent cost for the next scan. That value is capped by the
+ * cpu min and max setting.
+ *
+ * In addition the new pages_to_scan value is capped by the max and min
+ * limits.
+ */
+static void scan_time_advisor(void)
+{
+ unsigned int cpu_percent;
+ unsigned long cpu_time;
+ unsigned long cpu_time_diff;
+ unsigned long cpu_time_diff_ms;
+ unsigned long pages;
+ unsigned long per_page_cost;
+ unsigned long factor;
+ unsigned long change;
+ unsigned long last_scan_time;
+ unsigned long scan_time;
+
+ /* Convert scan time to seconds */
+ scan_time = div_s64(ktime_ms_delta(ktime_get(), advisor_ctx.start_scan),
+ MSEC_PER_SEC);
+ scan_time = scan_time ? scan_time : 1;
+
+ /* Calculate CPU consumption of ksmd background thread */
+ cpu_time = task_sched_runtime(current);
+ cpu_time_diff = cpu_time - advisor_ctx.cpu_time;
+ cpu_time_diff_ms = cpu_time_diff / 1000 / 1000;
+
+ cpu_percent = (cpu_time_diff_ms * 100) / (scan_time * 1000);
+ cpu_percent = cpu_percent ? cpu_percent : 1;
+ last_scan_time = prev_scan_time(&advisor_ctx, scan_time);
+
+ /* Calculate scan time as percentage of target scan time */
+ factor = ksm_advisor_target_scan_time * 100 / scan_time;
+ factor = factor ? factor : 1;
+
+ /*
+ * Calculate scan time as percentage of last scan time and use
+ * exponentially weighted average to smooth it
+ */
+ change = scan_time * 100 / last_scan_time;
+ change = change ? change : 1;
+ change = ewma(advisor_ctx.change, change);
+
+ /* Calculate new scan rate based on target scan rate. */
+ pages = ksm_thread_pages_to_scan * 100 / factor;
+ /* Update pages_to_scan by weighted change percentage. */
+ pages = pages * change / 100;
+
+ /* Cap new pages_to_scan value */
+ per_page_cost = ksm_thread_pages_to_scan / cpu_percent;
+ per_page_cost = per_page_cost ? per_page_cost : 1;
+
+ pages = min(pages, per_page_cost * ksm_advisor_max_cpu);
+ pages = max(pages, per_page_cost * KSM_ADVISOR_MIN_CPU);
+ pages = min(pages, ksm_advisor_max_pages_to_scan);
+
+ /* Update advisor context */
+ advisor_ctx.change = change;
+ advisor_ctx.scan_time = scan_time;
+ advisor_ctx.cpu_time = cpu_time;
+
+ ksm_thread_pages_to_scan = pages;
+ trace_ksm_advisor(scan_time, pages, cpu_percent);
+}
+
+static void advisor_stop_scan(void)
+{
+ if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
+ scan_time_advisor();
+}
+
#ifdef CONFIG_NUMA
/* Zeroed when merging across nodes is not allowed */
static unsigned int ksm_merge_across_nodes = 1;
@@ -1099,9 +1269,9 @@ error:
static u32 calc_checksum(struct page *page)
{
u32 checksum;
- void *addr = kmap_atomic(page);
+ void *addr = kmap_local_page(page);
checksum = xxhash(addr, PAGE_SIZE, 0);
- kunmap_atomic(addr);
+ kunmap_local(addr);
return checksum;
}
@@ -1161,8 +1331,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
goto out_unlock;
}
- /* See page_try_share_anon_rmap(): clear PTE first. */
- if (anon_exclusive && page_try_share_anon_rmap(page)) {
+ /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
+ if (anon_exclusive &&
+ folio_try_share_anon_rmap_pte(page_folio(page), page)) {
set_pte_at(mm, pvmw.address, pvmw.pte, entry);
goto out_unlock;
}
@@ -1199,6 +1370,7 @@ out:
static int replace_page(struct vm_area_struct *vma, struct page *page,
struct page *kpage, pte_t orig_pte)
{
+ struct folio *kfolio = page_folio(kpage);
struct mm_struct *mm = vma->vm_mm;
struct folio *folio;
pmd_t *pmd;
@@ -1238,15 +1410,16 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
goto out_mn;
}
VM_BUG_ON_PAGE(PageAnonExclusive(page), page);
- VM_BUG_ON_PAGE(PageAnon(kpage) && PageAnonExclusive(kpage), kpage);
+ VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage),
+ kfolio);
/*
* No need to check ksm_use_zero_pages here: we can only have a
* zero_page here if ksm_use_zero_pages was enabled already.
*/
if (!is_zero_pfn(page_to_pfn(kpage))) {
- get_page(kpage);
- page_add_anon_rmap(kpage, vma, addr, RMAP_NONE);
+ folio_get(kfolio);
+ folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE);
newpte = mk_pte(kpage, vma->vm_page_prot);
} else {
/*
@@ -1277,7 +1450,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
set_pte_at_notify(mm, addr, ptep, newpte);
folio = page_folio(page);
- page_remove_rmap(page, vma, false);
+ folio_remove_rmap_pte(folio, page, vma);
if (!folio_mapped(folio))
folio_free_swap(folio);
folio_put(folio);
@@ -2401,6 +2574,7 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
mm_slot = ksm_scan.mm_slot;
if (mm_slot == &ksm_mm_head) {
+ advisor_start_scan();
trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items);
/*
@@ -2558,6 +2732,8 @@ no_vmas:
if (mm_slot != &ksm_mm_head)
goto next_mm;
+ advisor_stop_scan();
+
trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items);
ksm_scan.seqnr++;
return NULL;
@@ -2604,11 +2780,9 @@ static int ksm_scan_thread(void *nothing)
ksm_do_scan(ksm_thread_pages_to_scan);
mutex_unlock(&ksm_thread_mutex);
- try_to_freeze();
-
if (ksmd_should_run()) {
sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs);
- wait_event_interruptible_timeout(ksm_iter_wait,
+ wait_event_freezable_timeout(ksm_iter_wait,
sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs),
msecs_to_jiffies(sleep_ms));
} else {
@@ -2875,49 +3049,53 @@ void __ksm_exit(struct mm_struct *mm)
trace_ksm_exit(mm);
}
-struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr)
{
- struct folio *folio = page_folio(page);
+ struct page *page = folio_page(folio, 0);
struct anon_vma *anon_vma = folio_anon_vma(folio);
- struct page *new_page;
+ struct folio *new_folio;
- if (PageKsm(page)) {
- if (page_stable_node(page) &&
+ if (folio_test_large(folio))
+ return folio;
+
+ if (folio_test_ksm(folio)) {
+ if (folio_stable_node(folio) &&
!(ksm_run & KSM_RUN_UNMERGE))
- return page; /* no need to copy it */
+ return folio; /* no need to copy it */
} else if (!anon_vma) {
- return page; /* no need to copy it */
- } else if (page->index == linear_page_index(vma, address) &&
+ return folio; /* no need to copy it */
+ } else if (folio->index == linear_page_index(vma, addr) &&
anon_vma->root == vma->anon_vma->root) {
- return page; /* still no need to copy it */
+ return folio; /* still no need to copy it */
}
if (PageHWPoison(page))
return ERR_PTR(-EHWPOISON);
- if (!PageUptodate(page))
- return page; /* let do_swap_page report the error */
-
- new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
- if (new_page &&
- mem_cgroup_charge(page_folio(new_page), vma->vm_mm, GFP_KERNEL)) {
- put_page(new_page);
- new_page = NULL;
- }
- if (new_page) {
- if (copy_mc_user_highpage(new_page, page, address, vma)) {
- put_page(new_page);
- memory_failure_queue(page_to_pfn(page), 0);
+ if (!folio_test_uptodate(folio))
+ return folio; /* let do_swap_page report the error */
+
+ new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
+ if (new_folio &&
+ mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) {
+ folio_put(new_folio);
+ new_folio = NULL;
+ }
+ if (new_folio) {
+ if (copy_mc_user_highpage(folio_page(new_folio, 0), page,
+ addr, vma)) {
+ folio_put(new_folio);
+ memory_failure_queue(folio_pfn(folio), 0);
return ERR_PTR(-EHWPOISON);
}
- SetPageDirty(new_page);
- __SetPageUptodate(new_page);
- __SetPageLocked(new_page);
+ folio_set_dirty(new_folio);
+ __folio_mark_uptodate(new_folio);
+ __folio_set_locked(new_folio);
#ifdef CONFIG_SWAP
count_vm_event(KSM_SWPIN_COPY);
#endif
}
- return new_page;
+ return new_folio;
}
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
@@ -3244,6 +3422,9 @@ static ssize_t pages_to_scan_store(struct kobject *kobj,
unsigned int nr_pages;
int err;
+ if (ksm_advisor != KSM_ADVISOR_NONE)
+ return -EINVAL;
+
err = kstrtouint(buf, 10, &nr_pages);
if (err)
return -EINVAL;
@@ -3563,6 +3744,130 @@ static ssize_t smart_scan_store(struct kobject *kobj,
}
KSM_ATTR(smart_scan);
+static ssize_t advisor_mode_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ const char *output;
+
+ if (ksm_advisor == KSM_ADVISOR_NONE)
+ output = "[none] scan-time";
+ else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
+ output = "none [scan-time]";
+
+ return sysfs_emit(buf, "%s\n", output);
+}
+
+static ssize_t advisor_mode_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf,
+ size_t count)
+{
+ enum ksm_advisor_type curr_advisor = ksm_advisor;
+
+ if (sysfs_streq("scan-time", buf))
+ ksm_advisor = KSM_ADVISOR_SCAN_TIME;
+ else if (sysfs_streq("none", buf))
+ ksm_advisor = KSM_ADVISOR_NONE;
+ else
+ return -EINVAL;
+
+ /* Set advisor default values */
+ if (curr_advisor != ksm_advisor)
+ set_advisor_defaults();
+
+ return count;
+}
+KSM_ATTR(advisor_mode);
+
+static ssize_t advisor_max_cpu_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", ksm_advisor_max_cpu);
+}
+
+static ssize_t advisor_max_cpu_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ unsigned long value;
+
+ err = kstrtoul(buf, 10, &value);
+ if (err)
+ return -EINVAL;
+
+ ksm_advisor_max_cpu = value;
+ return count;
+}
+KSM_ATTR(advisor_max_cpu);
+
+static ssize_t advisor_min_pages_to_scan_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%lu\n", ksm_advisor_min_pages_to_scan);
+}
+
+static ssize_t advisor_min_pages_to_scan_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ unsigned long value;
+
+ err = kstrtoul(buf, 10, &value);
+ if (err)
+ return -EINVAL;
+
+ ksm_advisor_min_pages_to_scan = value;
+ return count;
+}
+KSM_ATTR(advisor_min_pages_to_scan);
+
+static ssize_t advisor_max_pages_to_scan_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%lu\n", ksm_advisor_max_pages_to_scan);
+}
+
+static ssize_t advisor_max_pages_to_scan_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ unsigned long value;
+
+ err = kstrtoul(buf, 10, &value);
+ if (err)
+ return -EINVAL;
+
+ ksm_advisor_max_pages_to_scan = value;
+ return count;
+}
+KSM_ATTR(advisor_max_pages_to_scan);
+
+static ssize_t advisor_target_scan_time_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%lu\n", ksm_advisor_target_scan_time);
+}
+
+static ssize_t advisor_target_scan_time_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ unsigned long value;
+
+ err = kstrtoul(buf, 10, &value);
+ if (err)
+ return -EINVAL;
+ if (value < 1)
+ return -EINVAL;
+
+ ksm_advisor_target_scan_time = value;
+ return count;
+}
+KSM_ATTR(advisor_target_scan_time);
+
static struct attribute *ksm_attrs[] = {
&sleep_millisecs_attr.attr,
&pages_to_scan_attr.attr,
@@ -3585,6 +3890,11 @@ static struct attribute *ksm_attrs[] = {
&use_zero_pages_attr.attr,
&general_profit_attr.attr,
&smart_scan_attr.attr,
+ &advisor_mode_attr.attr,
+ &advisor_max_cpu_attr.attr,
+ &advisor_min_pages_to_scan_attr.attr,
+ &advisor_max_pages_to_scan_attr.attr,
+ &advisor_target_scan_time_attr.attr,
NULL,
};