diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-09-11 04:16:31 +0200 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2014-01-08 22:05:08 +0100 |
commit | a5ae4300c15c778722c139953c825cd24d6ff517 (patch) | |
tree | e9a633c4a4c5dde8e44b1226a0fbecf0f7c41b3c /drivers/md | |
parent | bcache: Don't touch bucket gen for dirty ptrs (diff) | |
download | linux-a5ae4300c15c778722c139953c825cd24d6ff517.tar.xz linux-a5ae4300c15c778722c139953c825cd24d6ff517.zip |
bcache: Zero less memory
Another minor performance optimization
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bcache/closure.h | 13 | ||||
-rw-r--r-- | drivers/md/bcache/request.c | 47 | ||||
-rw-r--r-- | drivers/md/bcache/request.h | 21 |
3 files changed, 41 insertions, 40 deletions
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index 9762f1be3304..d29b773b863f 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -348,24 +348,13 @@ do { \ } while (0) /** - * __closure_init() - Initialize a closure, skipping the memset() - * - * May be used instead of closure_init() when memory has already been zeroed. - */ -#define __closure_init(cl, parent) \ - closure_init_type(cl, parent, true) - -/** * closure_init() - Initialize a closure, setting the refcount to 1 * @cl: closure to initialize * @parent: parent of the new closure. cl will take a refcount on it for its * lifetime; may be NULL. */ #define closure_init(cl, parent) \ -do { \ - memset((cl), 0, sizeof(*(cl))); \ - __closure_init(cl, parent); \ -} while (0) + closure_init_type(cl, parent, true) static inline void closure_init_stack(struct closure *cl) { diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 59b3d6df100b..cce02f19e6c7 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -597,14 +597,12 @@ struct search { /* Stack frame for bio_complete */ struct closure cl; - struct bcache_device *d; - struct bbio bio; struct bio *orig_bio; struct bio *cache_miss; + struct bcache_device *d; unsigned insert_bio_sectors; - unsigned recoverable:1; unsigned write:1; unsigned read_dirty_data:1; @@ -712,10 +710,13 @@ static void cache_lookup(struct closure *cl) { struct search *s = container_of(cl, struct search, iop.cl); struct bio *bio = &s->bio.bio; + int ret; - int ret = bch_btree_map_keys(&s->op, s->iop.c, - &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), - cache_lookup_fn, MAP_END_KEY); + bch_btree_op_init(&s->op, -1); + + ret = bch_btree_map_keys(&s->op, s->iop.c, + &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), + cache_lookup_fn, MAP_END_KEY); if (ret == -EAGAIN) continue_at(cl, cache_lookup, bcache_wq); @@ -756,12 +757,12 @@ static void bio_complete(struct search *s) } } -static void do_bio_hook(struct search *s) +static void do_bio_hook(struct search *s, struct bio *orig_bio) { struct bio *bio = &s->bio.bio; bio_init(bio); - __bio_clone_fast(bio, s->orig_bio); + __bio_clone_fast(bio, orig_bio); bio->bi_end_io = request_endio; bio->bi_private = &s->cl; @@ -780,26 +781,32 @@ static void search_free(struct closure *cl) mempool_free(s, s->d->c->search); } -static struct search *search_alloc(struct bio *bio, struct bcache_device *d) +static inline struct search *search_alloc(struct bio *bio, + struct bcache_device *d) { struct search *s; s = mempool_alloc(d->c->search, GFP_NOIO); - memset(s, 0, offsetof(struct search, iop.insert_keys)); - __closure_init(&s->cl, NULL); + closure_init(&s->cl, NULL); + do_bio_hook(s, bio); - s->iop.inode = d->id; - s->iop.c = d->c; - s->d = d; - s->op.lock = -1; - s->iop.write_point = hash_long((unsigned long) current, 16); s->orig_bio = bio; - s->write = (bio->bi_rw & REQ_WRITE) != 0; - s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; + s->cache_miss = NULL; + s->d = d; s->recoverable = 1; + s->write = (bio->bi_rw & REQ_WRITE) != 0; + s->read_dirty_data = 0; s->start_time = jiffies; - do_bio_hook(s); + + s->iop.c = d->c; + s->iop.bio = NULL; + s->iop.inode = d->id; + s->iop.write_point = hash_long((unsigned long) current, 16); + s->iop.write_prio = 0; + s->iop.error = 0; + s->iop.flags = 0; + s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; return s; } @@ -845,7 +852,7 @@ static void cached_dev_read_error(struct closure *cl) trace_bcache_read_retry(s->orig_bio); s->iop.error = 0; - do_bio_hook(s); + do_bio_hook(s, s->orig_bio); /* XXX: invalidate cache */ diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index 2cd65bf073c2..39f21dbedc38 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -13,17 +13,22 @@ struct data_insert_op { uint16_t write_prio; short error; - unsigned bypass:1; - unsigned writeback:1; - unsigned flush_journal:1; - unsigned csum:1; + union { + uint16_t flags; - unsigned replace:1; - unsigned replace_collision:1; + struct { + unsigned bypass:1; + unsigned writeback:1; + unsigned flush_journal:1; + unsigned csum:1; - unsigned insert_data_done:1; + unsigned replace:1; + unsigned replace_collision:1; + + unsigned insert_data_done:1; + }; + }; - /* Anything past this point won't get zeroed in search_alloc() */ struct keylist insert_keys; BKEY_PADDED(replace_key); }; |