summaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorRoland Dreier <roland@purestorage.com>2012-02-14 01:18:17 +0100
committerNicholas Bellinger <nab@linux-iscsi.org>2012-02-25 23:37:49 +0100
commit015487b89f27d91d95a056cdc3c85e6c729bff12 (patch)
tree0cecb2acc903154e25abb23e8f345f301fcd1ef5 /drivers/target
parenttarget: Don't set WBUS16 or SYNC bits in INQUIRY response (diff)
downloadlinux-015487b89f27d91d95a056cdc3c85e6c729bff12.tar.xz
linux-015487b89f27d91d95a056cdc3c85e6c729bff12.zip
target: Untangle front-end and back-end meanings of max_sectors attribute
se_dev_attrib.max_sectors currently has two independent meanings: - It is reported in the block limits VPD page as the maximum transfer length, ie the largest IO that the front-end (fabric) can handle. Also the target core doesn't enforce this maximum transfer length. - It is used to hold the size of the largest IO that the back-end can handle, so we know when to split SCSI commands into multiple tasks. Fix this by adding a new se_dev_attrib.fabric_max_sectors to hold the maximum transfer length, and checking incoming IOs against that limit. Signed-off-by: Roland Dreier <roland@purestorage.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/target_core_cdb.c2
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c65
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_transport.c8
5 files changed, 73 insertions, 7 deletions
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 4f65b258cc25..41ca2d43377b 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -456,7 +456,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set MAXIMUM TRANSFER LENGTH
*/
- put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]);
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, &buf[8]);
/*
* Set OPTIMAL TRANSFER LENGTH
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 0700d3b3d1c0..ac0ee5021c29 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -702,6 +702,9 @@ SE_DEV_ATTR_RO(hw_max_sectors);
DEF_DEV_ATTRIB(max_sectors);
SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
+DEF_DEV_ATTRIB(fabric_max_sectors);
+SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
+
DEF_DEV_ATTRIB(optimal_sectors);
SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
@@ -741,6 +744,7 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
&target_core_dev_attrib_block_size.attr,
&target_core_dev_attrib_hw_max_sectors.attr,
&target_core_dev_attrib_max_sectors.attr,
+ &target_core_dev_attrib_fabric_max_sectors.attr,
&target_core_dev_attrib_optimal_sectors.attr,
&target_core_dev_attrib_hw_queue_depth.attr,
&target_core_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 0b25b50900e9..27da4e4e07c6 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -889,10 +889,15 @@ void se_dev_set_default_attribs(
limits->logical_block_size);
dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
/*
- * Set optimal_sectors from max_sectors, which can be lowered via
- * configfs.
+ * Set fabric_max_sectors, which is reported in block limits
+ * VPD page (B0h).
*/
- dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors;
+ dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
+ /*
+ * Set optimal_sectors from fabric_max_sectors, which can be
+ * lowered via configfs.
+ */
+ dev->se_sub_dev->se_dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
/*
* queue_depth is based on subsystem plugin dependent requirements.
*/
@@ -1224,6 +1229,54 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
return 0;
}
+int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
+{
+ if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+ pr_err("dev[%p]: Unable to change SE Device"
+ " fabric_max_sectors while dev_export_obj: %d count exists\n",
+ dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+ return -EINVAL;
+ }
+ if (!fabric_max_sectors) {
+ pr_err("dev[%p]: Illegal ZERO value for"
+ " fabric_max_sectors\n", dev);
+ return -EINVAL;
+ }
+ if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
+ pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
+ " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
+ DA_STATUS_MAX_SECTORS_MIN);
+ return -EINVAL;
+ }
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (fabric_max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
+ pr_err("dev[%p]: Passed fabric_max_sectors: %u"
+ " greater than TCM/SE_Device max_sectors:"
+ " %u\n", dev, fabric_max_sectors,
+ dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+ return -EINVAL;
+ }
+ } else {
+ if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
+ pr_err("dev[%p]: Passed fabric_max_sectors: %u"
+ " greater than DA_STATUS_MAX_SECTORS_MAX:"
+ " %u\n", dev, fabric_max_sectors,
+ DA_STATUS_MAX_SECTORS_MAX);
+ return -EINVAL;
+ }
+ }
+ /*
+ * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
+ */
+ fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
+ dev->se_sub_dev->se_dev_attrib.block_size);
+
+ dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors;
+ pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
+ dev, fabric_max_sectors);
+ return 0;
+}
+
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
{
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
@@ -1237,10 +1290,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
" changed for TCM/pSCSI\n", dev);
return -EINVAL;
}
- if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
+ if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
- " greater than max_sectors: %u\n", dev,
- optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
+ " greater than fabric_max_sectors: %u\n", dev,
+ optimal_sectors, dev->se_sub_dev->se_dev_attrib.fabric_max_sectors);
return -EINVAL;
}
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index b026dedb8184..21c05638f158 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -53,6 +53,7 @@ int se_dev_set_is_nonrot(struct se_device *, int);
int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
int se_dev_set_queue_depth(struct se_device *, u32);
int se_dev_set_max_sectors(struct se_device *, u32);
+int se_dev_set_fabric_max_sectors(struct se_device *, u32);
int se_dev_set_optimal_sectors(struct se_device *, u32);
int se_dev_set_block_size(struct se_device *, u32);
struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 19804b6fdbaa..b79c6a2824ee 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -37,6 +37,7 @@
#include <linux/in.h>
#include <linux/cdrom.h>
#include <linux/module.h>
+#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
@@ -3107,6 +3108,13 @@ static int transport_generic_cmd_sequencer(
cmd->data_length = size;
}
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB &&
+ sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
+ printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n",
+ cdb[0], sectors);
+ goto out_invalid_cdb_field;
+ }
+
/* reject any command that we don't have a handler for */
if (!(passthrough || cmd->execute_task ||
(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))