summaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-03-25 03:37:53 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-25 03:37:53 +0100
commit6f2689a7662809ff39f2b24e452d11569c21ea2f (patch)
tree6dfbfd3feb4d77a66bf06f246640ae4ed321bbb2 /drivers/target
parentMerge tag 'for-5.18/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/... (diff)
parentscsi: scsi_ioctl: Drop needless assignment in sg_io() (diff)
downloadlinux-6f2689a7662809ff39f2b24e452d11569c21ea2f.tar.xz
linux-6f2689a7662809ff39f2b24e452d11569c21ea2f.zip
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This series consists of the usual driver updates (qla2xxx, pm8001, libsas, smartpqi, scsi_debug, lpfc, iscsi, mpi3mr) plus minor updates and bug fixes. The high blast radius core update is the removal of write same, which affects block and several non-SCSI devices. The other big change, which is more local, is the removal of the SCSI pointer" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (281 commits) scsi: scsi_ioctl: Drop needless assignment in sg_io() scsi: bsg: Drop needless assignment in scsi_bsg_sg_io_fn() scsi: lpfc: Copyright updates for 14.2.0.0 patches scsi: lpfc: Update lpfc version to 14.2.0.0 scsi: lpfc: SLI path split: Refactor BSG paths scsi: lpfc: SLI path split: Refactor Abort paths scsi: lpfc: SLI path split: Refactor SCSI paths scsi: lpfc: SLI path split: Refactor CT paths scsi: lpfc: SLI path split: Refactor misc ELS paths scsi: lpfc: SLI path split: Refactor VMID paths scsi: lpfc: SLI path split: Refactor FDISC paths scsi: lpfc: SLI path split: Refactor LS_RJT paths scsi: lpfc: SLI path split: Refactor LS_ACC paths scsi: lpfc: SLI path split: Refactor the RSCN/SCR/RDF/EDC/FARPR paths scsi: lpfc: SLI path split: Refactor PLOGI/PRLI/ADISC/LOGO paths scsi: lpfc: SLI path split: Refactor base ELS paths and the FLOGI path scsi: lpfc: SLI path split: Introduce lpfc_prep_wqe scsi: lpfc: SLI path split: Refactor fast and slow paths to native SLI4 scsi: lpfc: SLI path split: Refactor lpfc_iocbq scsi: lpfc: Use kcalloc() ...
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/iscsi/iscsi_target.c77
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c32
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c8
-rw-r--r--drivers/target/target_core_pscsi.c67
-rw-r--r--drivers/target/target_core_pscsi.h4
-rw-r--r--drivers/target/target_core_user.c73
6 files changed, 204 insertions, 57 deletions
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 2c54c5d8412d..6fe6a6bab3f4 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -702,13 +702,19 @@ static int __init iscsi_target_init_module(void)
if (!iscsit_global->ts_bitmap)
goto configfs_out;
+ if (!zalloc_cpumask_var(&iscsit_global->allowed_cpumask, GFP_KERNEL)) {
+ pr_err("Unable to allocate iscsit_global->allowed_cpumask\n");
+ goto bitmap_out;
+ }
+ cpumask_setall(iscsit_global->allowed_cpumask);
+
lio_qr_cache = kmem_cache_create("lio_qr_cache",
sizeof(struct iscsi_queue_req),
__alignof__(struct iscsi_queue_req), 0, NULL);
if (!lio_qr_cache) {
pr_err("Unable to kmem_cache_create() for"
" lio_qr_cache\n");
- goto bitmap_out;
+ goto cpumask_out;
}
lio_dr_cache = kmem_cache_create("lio_dr_cache",
@@ -753,6 +759,8 @@ dr_out:
kmem_cache_destroy(lio_dr_cache);
qr_out:
kmem_cache_destroy(lio_qr_cache);
+cpumask_out:
+ free_cpumask_var(iscsit_global->allowed_cpumask);
bitmap_out:
vfree(iscsit_global->ts_bitmap);
configfs_out:
@@ -782,6 +790,7 @@ static void __exit iscsi_target_cleanup_module(void)
target_unregister_template(&iscsi_ops);
+ free_cpumask_var(iscsit_global->allowed_cpumask);
vfree(iscsit_global->ts_bitmap);
kfree(iscsit_global);
}
@@ -3587,6 +3596,11 @@ static int iscsit_send_reject(
void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
{
int ord, cpu;
+ cpumask_t conn_allowed_cpumask;
+
+ cpumask_and(&conn_allowed_cpumask, iscsit_global->allowed_cpumask,
+ cpu_online_mask);
+
/*
* bitmap_id is assigned from iscsit_global->ts_bitmap from
* within iscsit_start_kthreads()
@@ -3595,8 +3609,9 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
* iSCSI connection's RX/TX threads will be scheduled to
* execute upon.
*/
- ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
- for_each_online_cpu(cpu) {
+ cpumask_clear(conn->conn_cpumask);
+ ord = conn->bitmap_id % cpumask_weight(&conn_allowed_cpumask);
+ for_each_cpu(cpu, &conn_allowed_cpumask) {
if (ord-- == 0) {
cpumask_set_cpu(cpu, conn->conn_cpumask);
return;
@@ -3609,6 +3624,62 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
cpumask_setall(conn->conn_cpumask);
}
+static void iscsit_thread_reschedule(struct iscsi_conn *conn)
+{
+ /*
+ * If iscsit_global->allowed_cpumask modified, reschedule iSCSI
+ * connection's RX/TX threads update conn->allowed_cpumask.
+ */
+ if (!cpumask_equal(iscsit_global->allowed_cpumask,
+ conn->allowed_cpumask)) {
+ iscsit_thread_get_cpumask(conn);
+ conn->conn_tx_reset_cpumask = 1;
+ conn->conn_rx_reset_cpumask = 1;
+ cpumask_copy(conn->allowed_cpumask,
+ iscsit_global->allowed_cpumask);
+ }
+}
+
+void iscsit_thread_check_cpumask(
+ struct iscsi_conn *conn,
+ struct task_struct *p,
+ int mode)
+{
+ /*
+ * The TX and RX threads maybe call iscsit_thread_check_cpumask()
+ * at the same time. The RX thread might be faster and return from
+ * iscsit_thread_reschedule() with conn_rx_reset_cpumask set to 0.
+ * Then the TX thread sets it back to 1.
+ * The next time the RX thread loops, it sees conn_rx_reset_cpumask
+ * set to 1 and calls set_cpus_allowed_ptr() again and set it to 0.
+ */
+ iscsit_thread_reschedule(conn);
+
+ /*
+ * mode == 1 signals iscsi_target_tx_thread() usage.
+ * mode == 0 signals iscsi_target_rx_thread() usage.
+ */
+ if (mode == 1) {
+ if (!conn->conn_tx_reset_cpumask)
+ return;
+ } else {
+ if (!conn->conn_rx_reset_cpumask)
+ return;
+ }
+
+ /*
+ * Update the CPU mask for this single kthread so that
+ * both TX and RX kthreads are scheduled to run on the
+ * same CPU.
+ */
+ set_cpus_allowed_ptr(p, conn->conn_cpumask);
+ if (mode == 1)
+ conn->conn_tx_reset_cpumask = 0;
+ else
+ conn->conn_rx_reset_cpumask = 0;
+}
+EXPORT_SYMBOL(iscsit_thread_check_cpumask);
+
int
iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
{
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 2a9de24a8bbe..0cedcfe207b5 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1127,8 +1127,40 @@ static ssize_t lio_target_wwn_lio_version_show(struct config_item *item,
CONFIGFS_ATTR_RO(lio_target_wwn_, lio_version);
+static ssize_t lio_target_wwn_cpus_allowed_list_show(
+ struct config_item *item, char *page)
+{
+ return sprintf(page, "%*pbl\n",
+ cpumask_pr_args(iscsit_global->allowed_cpumask));
+}
+
+static ssize_t lio_target_wwn_cpus_allowed_list_store(
+ struct config_item *item, const char *page, size_t count)
+{
+ int ret;
+ char *orig;
+ cpumask_t new_allowed_cpumask;
+
+ orig = kstrdup(page, GFP_KERNEL);
+ if (!orig)
+ return -ENOMEM;
+
+ cpumask_clear(&new_allowed_cpumask);
+ ret = cpulist_parse(orig, &new_allowed_cpumask);
+
+ kfree(orig);
+ if (ret != 0)
+ return ret;
+
+ cpumask_copy(iscsit_global->allowed_cpumask, &new_allowed_cpumask);
+ return count;
+}
+
+CONFIGFS_ATTR(lio_target_wwn_, cpus_allowed_list);
+
static struct configfs_attribute *lio_target_wwn_attrs[] = {
&lio_target_wwn_attr_lio_version,
+ &lio_target_wwn_attr_cpus_allowed_list,
NULL,
};
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 1a9c50401bdb..9c01fb864585 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1129,8 +1129,15 @@ static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np)
goto free_conn_ops;
}
+ if (!zalloc_cpumask_var(&conn->allowed_cpumask, GFP_KERNEL)) {
+ pr_err("Unable to allocate conn->allowed_cpumask\n");
+ goto free_conn_cpumask;
+ }
+
return conn;
+free_conn_cpumask:
+ free_cpumask_var(conn->conn_cpumask);
free_conn_ops:
kfree(conn->conn_ops);
put_transport:
@@ -1142,6 +1149,7 @@ free_conn:
void iscsit_free_conn(struct iscsi_conn *conn)
{
+ free_cpumask_var(conn->allowed_cpumask);
free_cpumask_var(conn->conn_cpumask);
kfree(conn->conn_ops);
iscsit_put_transport(conn->conn_transport);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 0fae71ac5cc8..ff292b75e23f 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -592,16 +592,14 @@ static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct scsi_device *sd = pdv->pdv_sd;
- struct pscsi_plugin_task *pt = cmd->priv;
- unsigned char *cdb;
+ unsigned char *cdb = cmd->priv;
+
/*
- * Special case for REPORT_LUNs handling where pscsi_plugin_task has
- * not been allocated because TCM is handling the emulation directly.
+ * Special case for REPORT_LUNs which is emulated and not passed on.
*/
- if (!pt)
+ if (!cdb)
return;
- cdb = &pt->pscsi_cdb[0];
/*
* Hack to make sure that Write-Protect modepage is set if R/O mode is
* forced.
@@ -962,30 +960,15 @@ pscsi_execute_cmd(struct se_cmd *cmd)
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
- struct pscsi_plugin_task *pt;
+ struct scsi_cmnd *scmd;
struct request *req;
sense_reason_t ret;
- /*
- * Dynamically alloc cdb space, since it may be larger than
- * TCM_MAX_COMMAND_SIZE
- */
- pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL);
- if (!pt) {
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
- cmd->priv = pt;
-
- memcpy(pt->pscsi_cdb, cmd->t_task_cdb,
- scsi_command_size(cmd->t_task_cdb));
-
req = scsi_alloc_request(pdv->pdv_sd->request_queue,
cmd->data_direction == DMA_TO_DEVICE ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- if (IS_ERR(req)) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- goto fail;
- }
+ if (IS_ERR(req))
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (sgl) {
ret = pscsi_map_sg(cmd, sgl, sgl_nents, req);
@@ -995,14 +978,23 @@ pscsi_execute_cmd(struct se_cmd *cmd)
req->end_io = pscsi_req_done;
req->end_io_data = cmd;
- scsi_req(req)->cmd_len = scsi_command_size(pt->pscsi_cdb);
- scsi_req(req)->cmd = &pt->pscsi_cdb[0];
+
+ scmd = blk_mq_rq_to_pdu(req);
+ scmd->cmd_len = scsi_command_size(cmd->t_task_cdb);
+ if (scmd->cmd_len > sizeof(scmd->cmnd)) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto fail_put_request;
+ }
+ memcpy(scmd->cmnd, cmd->t_task_cdb, scmd->cmd_len);
+
if (pdv->pdv_sd->type == TYPE_DISK ||
pdv->pdv_sd->type == TYPE_ZBC)
req->timeout = PS_TIMEOUT_DISK;
else
req->timeout = PS_TIMEOUT_OTHER;
- scsi_req(req)->retries = PS_RETRY;
+ scmd->allowed = PS_RETRY;
+
+ cmd->priv = scmd->cmnd;
blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG,
pscsi_req_done);
@@ -1011,8 +1003,6 @@ pscsi_execute_cmd(struct se_cmd *cmd)
fail_put_request:
blk_mq_free_request(req);
-fail:
- kfree(pt);
return ret;
}
@@ -1040,33 +1030,30 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
static void pscsi_req_done(struct request *req, blk_status_t status)
{
struct se_cmd *cmd = req->end_io_data;
- struct pscsi_plugin_task *pt = cmd->priv;
- int result = scsi_req(req)->result;
- enum sam_status scsi_status = result & 0xff;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
+ enum sam_status scsi_status = scmd->result & 0xff;
+ u8 *cdb = cmd->priv;
if (scsi_status != SAM_STAT_GOOD) {
pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
- " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
- result);
+ " 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result);
}
- pscsi_complete_cmd(cmd, scsi_status, scsi_req(req)->sense);
+ pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer);
- switch (host_byte(result)) {
+ switch (host_byte(scmd->result)) {
case DID_OK:
target_complete_cmd_with_length(cmd, scsi_status,
- cmd->data_length - scsi_req(req)->resid_len);
+ cmd->data_length - scmd->resid_len);
break;
default:
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
- " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
- result);
+ " 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result);
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
break;
}
blk_mq_free_request(req);
- kfree(pt);
}
static const struct target_backend_ops pscsi_ops = {
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index e8458b5e85c9..23d9a6e340d4 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -23,10 +23,6 @@ struct block_device;
struct scsi_device;
struct Scsi_Host;
-struct pscsi_plugin_task {
- unsigned char pscsi_cdb[0];
-} ____cacheline_aligned;
-
#define PDF_HAS_CHANNEL_ID 0x01
#define PDF_HAS_TARGET_ID 0x02
#define PDF_HAS_LUN_ID 0x04
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 7b2a89a67cdb..95d4ca50a605 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -61,10 +61,10 @@
#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
/* For mailbox plus cmd ring, the size is fixed 8MB */
-#define MB_CMDR_SIZE (8 * 1024 * 1024)
+#define MB_CMDR_SIZE_DEF (8 * 1024 * 1024)
/* Offset of cmd ring is size of mailbox */
-#define CMDR_OFF sizeof(struct tcmu_mailbox)
-#define CMDR_SIZE (MB_CMDR_SIZE - CMDR_OFF)
+#define CMDR_OFF ((__u32)sizeof(struct tcmu_mailbox))
+#define CMDR_SIZE_DEF (MB_CMDR_SIZE_DEF - CMDR_OFF)
/*
* For data area, the default block size is PAGE_SIZE and
@@ -1617,6 +1617,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF;
udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk;
+ udev->cmdr_size = CMDR_SIZE_DEF;
udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF);
mutex_init(&udev->cmdr_lock);
@@ -2189,7 +2190,7 @@ static int tcmu_configure_device(struct se_device *dev)
goto err_bitmap_alloc;
}
- mb = vzalloc(MB_CMDR_SIZE);
+ mb = vzalloc(udev->cmdr_size + CMDR_OFF);
if (!mb) {
ret = -ENOMEM;
goto err_vzalloc;
@@ -2198,10 +2199,9 @@ static int tcmu_configure_device(struct se_device *dev)
/* mailbox fits in first part of CMDR space */
udev->mb_addr = mb;
udev->cmdr = (void *)mb + CMDR_OFF;
- udev->cmdr_size = CMDR_SIZE;
- udev->data_off = MB_CMDR_SIZE;
+ udev->data_off = udev->cmdr_size + CMDR_OFF;
data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT;
- udev->mmap_pages = (data_size + MB_CMDR_SIZE) >> PAGE_SHIFT;
+ udev->mmap_pages = (data_size + udev->cmdr_size + CMDR_OFF) >> PAGE_SHIFT;
udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE;
udev->dbi_thresh = 0; /* Default in Idle state */
@@ -2221,7 +2221,7 @@ static int tcmu_configure_device(struct se_device *dev)
info->mem[0].name = "tcm-user command & data buffer";
info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
- info->mem[0].size = data_size + MB_CMDR_SIZE;
+ info->mem[0].size = data_size + udev->cmdr_size + CMDR_OFF;
info->mem[0].memtype = UIO_MEM_NONE;
info->irqcontrol = tcmu_irqcontrol;
@@ -2401,7 +2401,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
enum {
Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_data_pages_per_blk,
- Opt_err,
+ Opt_cmd_ring_size_mb, Opt_err,
};
static match_table_t tokens = {
@@ -2412,6 +2412,7 @@ static match_table_t tokens = {
{Opt_nl_reply_supported, "nl_reply_supported=%d"},
{Opt_max_data_area_mb, "max_data_area_mb=%d"},
{Opt_data_pages_per_blk, "data_pages_per_blk=%d"},
+ {Opt_cmd_ring_size_mb, "cmd_ring_size_mb=%d"},
{Opt_err, NULL}
};
@@ -2509,6 +2510,41 @@ unlock:
return ret;
}
+static int tcmu_set_cmd_ring_size(struct tcmu_dev *udev, substring_t *arg)
+{
+ int val, ret;
+
+ ret = match_int(arg, &val);
+ if (ret < 0) {
+ pr_err("match_int() failed for cmd_ring_size_mb=. Error %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (val <= 0) {
+ pr_err("Invalid cmd_ring_size_mb %d.\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&udev->cmdr_lock);
+ if (udev->data_bitmap) {
+ pr_err("Cannot set cmd_ring_size_mb after it has been enabled.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ udev->cmdr_size = (val << 20) - CMDR_OFF;
+ if (val > (MB_CMDR_SIZE_DEF >> 20)) {
+ pr_err("%d is too large. Adjusting cmd_ring_size_mb to global limit of %u\n",
+ val, (MB_CMDR_SIZE_DEF >> 20));
+ udev->cmdr_size = CMDR_SIZE_DEF;
+ }
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return ret;
+}
+
static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
@@ -2563,6 +2599,9 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
case Opt_data_pages_per_blk:
ret = tcmu_set_data_pages_per_blk(udev, &args[0]);
break;
+ case Opt_cmd_ring_size_mb:
+ ret = tcmu_set_cmd_ring_size(udev, &args[0]);
+ break;
default:
break;
}
@@ -2584,7 +2623,9 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
udev->dev_config[0] ? udev->dev_config : "NULL");
bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb);
- bl += sprintf(b + bl, "DataPagesPerBlk: %u\n", udev->data_pages_per_blk);
+ bl += sprintf(b + bl, "DataPagesPerBlk: %u ", udev->data_pages_per_blk);
+ bl += sprintf(b + bl, "CmdRingSizeMB: %u\n",
+ (udev->cmdr_size + CMDR_OFF) >> 20);
return bl;
}
@@ -2693,6 +2734,17 @@ static ssize_t tcmu_data_pages_per_blk_show(struct config_item *item,
}
CONFIGFS_ATTR_RO(tcmu_, data_pages_per_blk);
+static ssize_t tcmu_cmd_ring_size_mb_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%u\n",
+ (udev->cmdr_size + CMDR_OFF) >> 20);
+}
+CONFIGFS_ATTR_RO(tcmu_, cmd_ring_size_mb);
+
static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
@@ -3064,6 +3116,7 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = {
&tcmu_attr_qfull_time_out,
&tcmu_attr_max_data_area_mb,
&tcmu_attr_data_pages_per_blk,
+ &tcmu_attr_cmd_ring_size_mb,
&tcmu_attr_dev_config,
&tcmu_attr_dev_size,
&tcmu_attr_emulate_write_cache,