summaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorDennis Zhou <dennis@kernel.org>2020-01-02 22:26:35 +0100
committerDavid Sterba <dsterba@suse.com>2020-01-20 16:40:59 +0100
commita2309300841207de28307ecd2f0e031fccde37a3 (patch)
tree8f84e7b531380c5330ac456baaedd5956998682d /fs/btrfs
parentbtrfs: keep track of discardable_bytes for async discard (diff)
downloadlinux-a2309300841207de28307ecd2f0e031fccde37a3.tar.xz
linux-a2309300841207de28307ecd2f0e031fccde37a3.zip
btrfs: calculate discard delay based on number of extents
An earlier patch keeps track of discardable_extents. These are undiscarded extents managed by the free space cache. Here, we will use this to dynamically calculate the discard delay interval. There are 3 rate to consider. The first is the target convergence rate, the rate to discard all discardable_extents over the BTRFS_DISCARD_TARGET_MSEC time frame. This is clamped by the lower limit, the iops limit or BTRFS_DISCARD_MIN_DELAY (1ms), and the upper limit, BTRFS_DISCARD_MAX_DELAY (1s). We reevaluate this delay every transaction commit. Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Dennis Zhou <dennis@kernel.org> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/discard.c55
-rw-r--r--fs/btrfs/discard.h1
-rw-r--r--fs/btrfs/extent-tree.c4
-rw-r--r--fs/btrfs/sysfs.c31
5 files changed, 88 insertions, 5 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 7c1c236d13ae..451dade0a4bb 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -468,6 +468,8 @@ struct btrfs_discard_ctl {
struct list_head discard_list[BTRFS_NR_DISCARD_LISTS];
atomic_t discardable_extents;
atomic64_t discardable_bytes;
+ unsigned long delay;
+ u32 iops_limit;
};
/* delayed seq elem */
diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
index 62298a438fa5..29645676427b 100644
--- a/fs/btrfs/discard.c
+++ b/fs/btrfs/discard.c
@@ -15,6 +15,12 @@
#define BTRFS_DISCARD_DELAY (120ULL * NSEC_PER_SEC)
#define BTRFS_DISCARD_UNUSED_DELAY (10ULL * NSEC_PER_SEC)
+/* Target completion latency of discarding all discardable extents */
+#define BTRFS_DISCARD_TARGET_MSEC (6 * 60 * 60UL * MSEC_PER_SEC)
+#define BTRFS_DISCARD_MIN_DELAY_MSEC (1UL)
+#define BTRFS_DISCARD_MAX_DELAY_MSEC (1000UL)
+#define BTRFS_DISCARD_MAX_IOPS (10U)
+
static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl,
struct btrfs_block_group *block_group)
{
@@ -235,11 +241,17 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
block_group = find_next_block_group(discard_ctl, now);
if (block_group) {
- u64 delay = 0;
+ unsigned long delay = discard_ctl->delay;
+
+ /*
+ * This timeout is to hopefully prevent immediate discarding
+ * in a recently allocated block group.
+ */
+ if (now < block_group->discard_eligible_time) {
+ u64 bg_timeout = block_group->discard_eligible_time - now;
- if (now < block_group->discard_eligible_time)
- delay = nsecs_to_jiffies(
- block_group->discard_eligible_time - now);
+ delay = max(delay, nsecs_to_jiffies(bg_timeout));
+ }
mod_delayed_work(discard_ctl->discard_workers,
&discard_ctl->work, delay);
@@ -343,6 +355,39 @@ bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl)
}
/**
+ * btrfs_discard_calc_delay - recalculate the base delay
+ * @discard_ctl: discard control
+ *
+ * Recalculate the base delay which is based off the total number of
+ * discardable_extents. Clamp this between the lower_limit (iops_limit or 1ms)
+ * and the upper_limit (BTRFS_DISCARD_MAX_DELAY_MSEC).
+ */
+void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
+{
+ s32 discardable_extents;
+ u32 iops_limit;
+ unsigned long delay;
+ unsigned long lower_limit = BTRFS_DISCARD_MIN_DELAY_MSEC;
+
+ discardable_extents = atomic_read(&discard_ctl->discardable_extents);
+ if (!discardable_extents)
+ return;
+
+ spin_lock(&discard_ctl->lock);
+
+ iops_limit = READ_ONCE(discard_ctl->iops_limit);
+ if (iops_limit)
+ lower_limit = max_t(unsigned long, lower_limit,
+ MSEC_PER_SEC / iops_limit);
+
+ delay = BTRFS_DISCARD_TARGET_MSEC / discardable_extents;
+ delay = clamp(delay, lower_limit, BTRFS_DISCARD_MAX_DELAY_MSEC);
+ discard_ctl->delay = msecs_to_jiffies(delay);
+
+ spin_unlock(&discard_ctl->lock);
+}
+
+/**
* btrfs_discard_update_discardable - propagate discard counters
* @block_group: block_group of interest
* @ctl: free_space_ctl of @block_group
@@ -464,6 +509,8 @@ void btrfs_discard_init(struct btrfs_fs_info *fs_info)
atomic_set(&discard_ctl->discardable_extents, 0);
atomic64_set(&discard_ctl->discardable_bytes, 0);
+ discard_ctl->delay = BTRFS_DISCARD_MAX_DELAY_MSEC;
+ discard_ctl->iops_limit = BTRFS_DISCARD_MAX_IOPS;
}
void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/discard.h b/fs/btrfs/discard.h
index 0f2f89b1b0b9..5250fe178e49 100644
--- a/fs/btrfs/discard.h
+++ b/fs/btrfs/discard.h
@@ -17,6 +17,7 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl);
/* Update operations */
+void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl);
void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
struct btrfs_free_space_ctl *ctl);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 2c12366cfde5..0163fdd59f8f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2935,8 +2935,10 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
cond_resched();
}
- if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
+ if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) {
+ btrfs_discard_calc_delay(&fs_info->discard_ctl);
btrfs_discard_schedule_work(&fs_info->discard_ctl, true);
+ }
/*
* Transaction is finished. We don't need the lock anymore. We
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index ce1da3fcd6c9..4155174245ff 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -366,9 +366,40 @@ static ssize_t btrfs_discardable_extents_show(struct kobject *kobj,
}
BTRFS_ATTR(discard, discardable_extents, btrfs_discardable_extents_show);
+static ssize_t btrfs_discard_iops_limit_show(struct kobject *kobj,
+ struct kobj_attribute *a,
+ char *buf)
+{
+ struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ READ_ONCE(fs_info->discard_ctl.iops_limit));
+}
+
+static ssize_t btrfs_discard_iops_limit_store(struct kobject *kobj,
+ struct kobj_attribute *a,
+ const char *buf, size_t len)
+{
+ struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj);
+ struct btrfs_discard_ctl *discard_ctl = &fs_info->discard_ctl;
+ u32 iops_limit;
+ int ret;
+
+ ret = kstrtou32(buf, 10, &iops_limit);
+ if (ret)
+ return -EINVAL;
+
+ WRITE_ONCE(discard_ctl->iops_limit, iops_limit);
+
+ return len;
+}
+BTRFS_ATTR_RW(discard, iops_limit, btrfs_discard_iops_limit_show,
+ btrfs_discard_iops_limit_store);
+
static const struct attribute *discard_debug_attrs[] = {
BTRFS_ATTR_PTR(discard, discardable_bytes),
BTRFS_ATTR_PTR(discard, discardable_extents),
+ BTRFS_ATTR_PTR(discard, iops_limit),
NULL,
};