summaryrefslogtreecommitdiffstats
path: root/drivers/block/nbd.c
diff options
context:
space:
mode:
authorShaun McDowell <shaunjmcdowell@gmail.com>2017-05-26 05:55:54 +0200
committerJens Axboe <axboe@fb.com>2017-05-30 16:20:25 +0200
commit685c9b24ad5090e7a74781c4784fc12e0a04a176 (patch)
tree76b566b20b4629798098ed02242dc0f61ff9a0eb /drivers/block/nbd.c
parentblk-mq: make per-sw-queue bio merge as default .bio_merge (diff)
downloadlinux-685c9b24ad5090e7a74781c4784fc12e0a04a176.tar.xz
linux-685c9b24ad5090e7a74781c4784fc12e0a04a176.zip
nbd: add FUA op support
NBD userland client and server have FUA (forced unit access) support and flags defined. Make NBD kernel module recognize NBD_FLAG_SEND_FUA, enable FUA on the queue, and forward FUA requests to the server. Signed-off-by: Shaun McDowell <shaunjmcdowell@gmail.com> Reviewed-by: Josef Bacik <jbacik@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block/nbd.c')
-rw-r--r--drivers/block/nbd.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9a7bb2c29447..c5e52f66d3d4 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -400,6 +400,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
unsigned long size = blk_rq_bytes(req);
struct bio *bio;
u32 type;
+ u32 nbd_cmd_flags = 0;
u32 tag = blk_mq_unique_tag(req);
int sent = nsock->sent, skip = 0;
@@ -429,6 +430,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
return -EIO;
}
+ if (req->cmd_flags & REQ_FUA)
+ nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
+
/* We did a partial send previously, and we at least sent the whole
* request struct, so just go and send the rest of the pages in the
* request.
@@ -442,7 +446,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
}
cmd->index = index;
cmd->cookie = nsock->cookie;
- request.type = htonl(type);
+ request.type = htonl(type | nbd_cmd_flags);
if (type != NBD_CMD_FLUSH) {
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
@@ -965,8 +969,12 @@ static void nbd_parse_flags(struct nbd_device *nbd)
set_disk_ro(nbd->disk, false);
if (config->flags & NBD_FLAG_SEND_TRIM)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
- if (config->flags & NBD_FLAG_SEND_FLUSH)
- blk_queue_write_cache(nbd->disk->queue, true, false);
+ if (config->flags & NBD_FLAG_SEND_FLUSH) {
+ if (config->flags & NBD_FLAG_SEND_FUA)
+ blk_queue_write_cache(nbd->disk->queue, true, true);
+ else
+ blk_queue_write_cache(nbd->disk->queue, true, false);
+ }
else
blk_queue_write_cache(nbd->disk->queue, false, false);
}
@@ -1309,6 +1317,8 @@ static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
seq_puts(s, "NBD_FLAG_READ_ONLY\n");
if (flags & NBD_FLAG_SEND_FLUSH)
seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
+ if (flags & NBD_FLAG_SEND_FUA)
+ seq_puts(s, "NBD_FLAG_SEND_FUA\n");
if (flags & NBD_FLAG_SEND_TRIM)
seq_puts(s, "NBD_FLAG_SEND_TRIM\n");