summaryrefslogtreecommitdiffstats
path: root/fs/bcachefs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-11-07 21:00:08 +0100
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 23:08:31 +0200
commit6baf2730ccaf0bbbe87f10fb34692441942b59ba (patch)
tree67636e5fb157b4cbcb5a0e8d6fa06592ce958f88 /fs/bcachefs
parentbcachefs: Avoid calling bch2_btree_iter_relock() in bch2_btree_iter_traverse() (diff)
downloadlinux-6baf2730ccaf0bbbe87f10fb34692441942b59ba.tar.xz
linux-6baf2730ccaf0bbbe87f10fb34692441942b59ba.zip
bcachefs: Inline fast path of bch2_increment_clock()
Shaving more cycles. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs')
-rw-r--r--fs/bcachefs/clock.c7
-rw-r--r--fs/bcachefs/clock.h13
2 files changed, 15 insertions, 5 deletions
diff --git a/fs/bcachefs/clock.c b/fs/bcachefs/clock.c
index e4486fcbea19..e227753563ab 100644
--- a/fs/bcachefs/clock.c
+++ b/fs/bcachefs/clock.c
@@ -135,17 +135,16 @@ static struct io_timer *get_expired_timer(struct io_clock *clock,
return ret;
}
-void bch2_increment_clock(struct bch_fs *c, unsigned sectors, int rw)
+void __bch2_increment_clock(struct io_clock *clock)
{
- struct io_clock *clock = &c->io_clock[rw];
struct io_timer *timer;
unsigned long now;
+ unsigned sectors;
/* Buffer up one megabyte worth of IO in the percpu counter */
preempt_disable();
- if (likely(this_cpu_add_return(*clock->pcpu_buf, sectors) <
- IO_CLOCK_PCPU_SECTORS)) {
+ if (this_cpu_read(*clock->pcpu_buf) < IO_CLOCK_PCPU_SECTORS) {
preempt_enable();
return;
}
diff --git a/fs/bcachefs/clock.h b/fs/bcachefs/clock.h
index 5cb043c579d8..bfbbca8a207b 100644
--- a/fs/bcachefs/clock.h
+++ b/fs/bcachefs/clock.h
@@ -6,7 +6,18 @@ void bch2_io_timer_add(struct io_clock *, struct io_timer *);
void bch2_io_timer_del(struct io_clock *, struct io_timer *);
void bch2_kthread_io_clock_wait(struct io_clock *, unsigned long,
unsigned long);
-void bch2_increment_clock(struct bch_fs *, unsigned, int);
+
+void __bch2_increment_clock(struct io_clock *);
+
+static inline void bch2_increment_clock(struct bch_fs *c, unsigned sectors,
+ int rw)
+{
+ struct io_clock *clock = &c->io_clock[rw];
+
+ if (unlikely(this_cpu_add_return(*clock->pcpu_buf, sectors) >=
+ IO_CLOCK_PCPU_SECTORS))
+ __bch2_increment_clock(clock);
+}
void bch2_io_clock_schedule_timeout(struct io_clock *, unsigned long);