diff options
author | Jens Axboe <axboe@fb.com> | 2016-04-12 23:46:35 +0200 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-04-12 23:46:35 +0200 |
commit | 2f9a0b33ac6c4a2accaf787456080af35f1cab0b (patch) | |
tree | f9fff8867f90c42f1ec70e0ba86cf7529f881ef7 /block | |
parent | blk-mq: Make blk_mq_all_tag_busy_iter static (diff) | |
parent | block: add ability to flag write back caching on a device (diff) | |
download | linux-2f9a0b33ac6c4a2accaf787456080af35f1cab0b.tar.xz linux-2f9a0b33ac6c4a2accaf787456080af35f1cab0b.zip |
Merge branch 'for-4.7/core' into for-4.7/drivers
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-settings.c | 26 | ||||
-rw-r--r-- | block/blk-sysfs.c | 39 |
2 files changed, 65 insertions, 0 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index 331e4eee0dda..c903bee43cf8 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -846,6 +846,32 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable) } EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); +/** + * blk_queue_write_cache - configure queue's write cache + * @q: the request queue for the device + * @wc: write back cache on or off + * @fua: device supports FUA writes, if true + * + * Tell the block layer about the write cache of @q. + */ +void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) +{ + spin_lock_irq(q->queue_lock); + if (wc) { + queue_flag_set(QUEUE_FLAG_WC, q); + q->flush_flags = REQ_FLUSH; + } else + queue_flag_clear(QUEUE_FLAG_WC, q); + if (fua) { + if (wc) + q->flush_flags |= REQ_FUA; + queue_flag_set(QUEUE_FLAG_FUA, q); + } else + queue_flag_clear(QUEUE_FLAG_FUA, q); + spin_unlock_irq(q->queue_lock); +} +EXPORT_SYMBOL_GPL(blk_queue_write_cache); + static int __init blk_settings_init(void) { blk_max_low_pfn = max_low_pfn - 1; diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 995b58d46ed1..99205965f559 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -347,6 +347,38 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page, return ret; } +static ssize_t queue_wc_show(struct request_queue *q, char *page) +{ + if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) + return sprintf(page, "write back\n"); + + return sprintf(page, "write through\n"); +} + +static ssize_t queue_wc_store(struct request_queue *q, const char *page, + size_t count) +{ + int set = -1; + + if (!strncmp(page, "write back", 10)) + set = 1; + else if (!strncmp(page, "write through", 13) || + !strncmp(page, "none", 4)) + set = 0; + + if (set == -1) + return -EINVAL; + + spin_lock_irq(q->queue_lock); + if (set) + queue_flag_set(QUEUE_FLAG_WC, q); + else + queue_flag_clear(QUEUE_FLAG_WC, q); + spin_unlock_irq(q->queue_lock); + + return count; +} + static struct queue_sysfs_entry queue_requests_entry = { .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, .show = queue_requests_show, @@ -478,6 +510,12 @@ static struct queue_sysfs_entry queue_poll_entry = { .store = queue_poll_store, }; +static struct queue_sysfs_entry queue_wc_entry = { + .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR }, + .show = queue_wc_show, + .store = queue_wc_store, +}; + static struct attribute *default_attrs[] = { &queue_requests_entry.attr, &queue_ra_entry.attr, @@ -503,6 +541,7 @@ static struct attribute *default_attrs[] = { &queue_iostats_entry.attr, &queue_random_entry.attr, &queue_poll_entry.attr, + &queue_wc_entry.attr, NULL, }; |