summaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorMintz, Yuval <Yuval.Mintz@cavium.com>2017-03-11 17:39:18 +0100
committerDavid S. Miller <davem@davemloft.net>2017-03-13 23:33:09 +0100
commitbe086e7c53f1fac51eed14523b28f2214b548dd2 (patch)
tree3eb35b0075b6dac25726da6ce4f7d766de15a990 /drivers/scsi
parentMerge branch 'mpls-ttl-propagation' (diff)
downloadlinux-be086e7c53f1fac51eed14523b28f2214b548dd2.tar.xz
linux-be086e7c53f1fac51eed14523b28f2214b548dd2.zip
qed*: Utilize Firmware 8.15.3.0
This patch advances the qed* drivers into using the newer firmware - This solves several firmware bugs, mostly related [but not limited to] various init/deinit issues in various offloaded protocols. It also introduces a major 4-Cached SGE change in firmware, which can be seen in the storage drivers' changes. In addition, this firmware is required for supporting the new QL41xxx series of adapters; While this patch doesn't add the actual support, the firmware contains the necessary initialization & firmware logic to operate such adapters [actual support would be added later on]. Changes from Previous versions: ------------------------------- - V2 - fix kbuild-test robot warnings Signed-off-by: Tomer Tayar <Tomer.Tayar@cavium.com> Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com> Signed-off-by: Manish Rangankar <Manish.Rangankar@cavium.com> Signed-off-by: Chad Dupuis <Chad.Dupuis@cavium.com> Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/qedf/Makefile2
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.c190
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.h93
-rw-r--r--drivers/scsi/qedf/drv_scsi_fw_funcs.c44
-rw-r--r--drivers/scsi/qedf/drv_scsi_fw_funcs.h85
-rw-r--r--drivers/scsi/qedf/qedf.h23
-rw-r--r--drivers/scsi/qedf/qedf_els.c25
-rw-r--r--drivers/scsi/qedf/qedf_io.c670
-rw-r--r--drivers/scsi/qedi/Makefile2
-rw-r--r--drivers/scsi/qedi/qedi_fw.c1068
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c781
-rw-r--r--drivers/scsi/qedi/qedi_fw_iscsi.h117
-rw-r--r--drivers/scsi/qedi/qedi_fw_scsi.h55
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c12
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_version.h4
16 files changed, 2104 insertions, 1069 deletions
diff --git a/drivers/scsi/qedf/Makefile b/drivers/scsi/qedf/Makefile
index 64e9f507ce32..414f2a772a5f 100644
--- a/drivers/scsi/qedf/Makefile
+++ b/drivers/scsi/qedf/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_QEDF) := qedf.o
qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \
- qedf_attr.o qedf_els.o
+ qedf_attr.o qedf_els.o drv_scsi_fw_funcs.o drv_fcoe_fw_funcs.o
qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
new file mode 100644
index 000000000000..bb812db48da6
--- /dev/null
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -0,0 +1,190 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#include "drv_fcoe_fw_funcs.h"
+#include "drv_scsi_fw_funcs.h"
+
+#define FCOE_RX_ID ((u32)0x0000FFFF)
+
+static inline void init_common_sqe(struct fcoe_task_params *task_params,
+ enum fcoe_sqe_request_type request_type)
+{
+ memset(task_params->sqe, 0, sizeof(*(task_params->sqe)));
+ SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE,
+ request_type);
+ task_params->sqe->task_id = task_params->itid;
+}
+
+int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
+ struct scsi_sgl_task_params *sgl_task_params,
+ struct regpair sense_data_buffer_phys_addr,
+ u32 task_retry_id,
+ u8 fcp_cmd_payload[32])
+{
+ struct fcoe_task_context *ctx = task_params->context;
+ struct ystorm_fcoe_task_st_ctx *y_st_ctx;
+ struct tstorm_fcoe_task_st_ctx *t_st_ctx;
+ struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+ struct mstorm_fcoe_task_st_ctx *m_st_ctx;
+ u32 io_size, val;
+ bool slow_sgl;
+
+ memset(ctx, 0, sizeof(*(ctx)));
+ slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge);
+ io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
+ task_params->tx_io_size : task_params->rx_io_size);
+
+ /* Ystorm ctx */
+ y_st_ctx = &ctx->ystorm_st_context;
+ y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
+ y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
+ y_st_ctx->task_type = task_params->task_type;
+ memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload,
+ fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
+
+ /* Tstorm ctx */
+ t_st_ctx = &ctx->tstorm_st_context;
+ t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
+ FCOE_TASK_DEV_TYPE_TAPE :
+ FCOE_TASK_DEV_TYPE_DISK);
+ t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
+ val = cpu_to_le32(task_params->cq_rss_number);
+ t_st_ctx->read_only.glbl_q_num = val;
+ t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
+ t_st_ctx->read_only.task_type = task_params->task_type;
+ SET_FIELD(t_st_ctx->read_write.flags,
+ FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
+ t_st_ctx->read_write.rx_id = cpu_to_le32(FCOE_RX_ID);
+
+ /* Ustorm ctx */
+ u_ag_ctx = &ctx->ustorm_ag_context;
+ u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
+
+ /* Mstorm buffer for sense/rsp data placement */
+ m_st_ctx = &ctx->mstorm_st_context;
+ val = cpu_to_le32(sense_data_buffer_phys_addr.hi);
+ m_st_ctx->rsp_buf_addr.hi = val;
+ val = cpu_to_le32(sense_data_buffer_phys_addr.lo);
+ m_st_ctx->rsp_buf_addr.lo = val;
+
+ if (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
+ /* Ystorm ctx */
+ y_st_ctx->expect_first_xfer = 1;
+
+ /* Set the amount of super SGEs. Can be up to 4. */
+ SET_FIELD(y_st_ctx->sgl_mode,
+ YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
+ (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+ init_scsi_sgl_context(&y_st_ctx->sgl_params,
+ &y_st_ctx->data_desc,
+ sgl_task_params);
+
+ /* Mstorm ctx */
+ SET_FIELD(m_st_ctx->flags,
+ MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
+ (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+ } else {
+ /* Tstorm ctx */
+ SET_FIELD(t_st_ctx->read_write.flags,
+ FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
+ (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+
+ /* Mstorm ctx */
+ m_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
+ init_scsi_sgl_context(&m_st_ctx->sgl_params,
+ &m_st_ctx->data_desc,
+ sgl_task_params);
+ }
+
+ init_common_sqe(task_params, SEND_FCOE_CMD);
+ return 0;
+}
+
+int init_initiator_midpath_unsolicited_fcoe_task(
+ struct fcoe_task_params *task_params,
+ struct fcoe_tx_mid_path_params *mid_path_fc_header,
+ struct scsi_sgl_task_params *tx_sgl_task_params,
+ struct scsi_sgl_task_params *rx_sgl_task_params,
+ u8 fw_to_place_fc_header)
+{
+ struct fcoe_task_context *ctx = task_params->context;
+ struct ystorm_fcoe_task_st_ctx *y_st_ctx;
+ struct tstorm_fcoe_task_st_ctx *t_st_ctx;
+ struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+ struct mstorm_fcoe_task_st_ctx *m_st_ctx;
+ u32 val;
+
+ memset(ctx, 0, sizeof(*(ctx)));
+
+ /* Init Ystorm */
+ y_st_ctx = &ctx->ystorm_st_context;
+ init_scsi_sgl_context(&y_st_ctx->sgl_params,
+ &y_st_ctx->data_desc,
+ tx_sgl_task_params);
+ SET_FIELD(y_st_ctx->sgl_mode,
+ YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
+ y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
+ y_st_ctx->task_type = task_params->task_type;
+ memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
+ mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
+
+ /* Init Mstorm */
+ m_st_ctx = &ctx->mstorm_st_context;
+ init_scsi_sgl_context(&m_st_ctx->sgl_params,
+ &m_st_ctx->data_desc,
+ rx_sgl_task_params);
+ SET_FIELD(m_st_ctx->flags,
+ MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER,
+ fw_to_place_fc_header);
+ m_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->rx_io_size);
+
+ /* Init Tstorm */
+ t_st_ctx = &ctx->tstorm_st_context;
+ t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
+ val = cpu_to_le32(task_params->cq_rss_number);
+ t_st_ctx->read_only.glbl_q_num = val;
+ t_st_ctx->read_only.task_type = task_params->task_type;
+ SET_FIELD(t_st_ctx->read_write.flags,
+ FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
+ t_st_ctx->read_write.rx_id = cpu_to_le32(FCOE_RX_ID);
+
+ /* Init Ustorm */
+ u_ag_ctx = &ctx->ustorm_ag_context;
+ u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
+
+ /* Init SQE */
+ init_common_sqe(task_params, SEND_FCOE_MIDPATH);
+ task_params->sqe->additional_info_union.burst_length =
+ tx_sgl_task_params->total_buffer_size;
+ SET_FIELD(task_params->sqe->flags,
+ FCOE_WQE_NUM_SGES, tx_sgl_task_params->num_sges);
+ SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE,
+ SCSI_FAST_SGL);
+
+ return 0;
+}
+
+int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params)
+{
+ init_common_sqe(task_params, SEND_FCOE_ABTS_REQUEST);
+ return 0;
+}
+
+int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params)
+{
+ init_common_sqe(task_params, FCOE_EXCHANGE_CLEANUP);
+ return 0;
+}
+
+int init_initiator_sequence_recovery_fcoe_task(
+ struct fcoe_task_params *task_params, u32 off)
+{
+ init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
+ task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
+ return 0;
+}
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
new file mode 100644
index 000000000000..617529b058f4
--- /dev/null
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -0,0 +1,93 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _FCOE_FW_FUNCS_H
+#define _FCOE_FW_FUNCS_H
+#include "drv_scsi_fw_funcs.h"
+#include "qedf_hsi.h"
+#include <linux/qed/qed_if.h>
+
+struct fcoe_task_params {
+ /* Output parameter [set/filled by the HSI function] */
+ struct fcoe_task_context *context;
+
+ /* Output parameter [set/filled by the HSI function] */
+ struct fcoe_wqe *sqe;
+ enum fcoe_task_type task_type;
+ u32 tx_io_size; /* in bytes */
+ u32 rx_io_size; /* in bytes */
+ u32 conn_cid;
+ u16 itid;
+ u8 cq_rss_number;
+
+ /* Whether it's Tape device or not (0=Disk, 1=Tape) */
+ u8 is_tape_device;
+};
+
+/**
+ * @brief init_initiator_rw_fcoe_task - Initializes FCoE task context for
+ * read/write task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param sgl_task_params - Pointer to SGL task params
+ * @param sense_data_buffer_phys_addr - Pointer to sense data buffer
+ * @param task_retry_id - retry identification - Used only for Tape device
+ * @param fcp_cmnd_payload - FCP CMD Payload
+ */
+int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
+ struct scsi_sgl_task_params *sgl_task_params,
+ struct regpair sense_data_buffer_phys_addr,
+ u32 task_retry_id,
+ u8 fcp_cmd_payload[32]);
+
+/**
+ * @brief init_initiator_midpath_fcoe_task - Initializes FCoE task context for
+ * midpath/unsolicited task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param mid_path_fc_header - FC header
+ * @param tx_sgl_task_params - Pointer to Tx SGL task params
+ * @param rx_sgl_task_params - Pointer to Rx SGL task params
+ * @param fw_to_place_fc_header - Indication if the FW will place the FC header
+ * in addition to the data arrives.
+ */
+int init_initiator_midpath_unsolicited_fcoe_task(
+ struct fcoe_task_params *task_params,
+ struct fcoe_tx_mid_path_params *mid_path_fc_header,
+ struct scsi_sgl_task_params *tx_sgl_task_params,
+ struct scsi_sgl_task_params *rx_sgl_task_params,
+ u8 fw_to_place_fc_header);
+
+/**
+ * @brief init_initiator_abort_fcoe_task - Initializes FCoE task context for
+ * abort task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params);
+
+/**
+ * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
+ * cleanup task types and init fcoe_sqe
+ *
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params);
+
+/**
+ * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
+ * sequence recovery task types and init fcoe_sqe
+ *
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param desired_offset - The desired offest the task will be re-sent from
+ */
+int init_initiator_sequence_recovery_fcoe_task(
+ struct fcoe_task_params *task_params,
+ u32 desired_offset);
+#endif
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.c b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
new file mode 100644
index 000000000000..11e0cc082ec0
--- /dev/null
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
@@ -0,0 +1,44 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#include "drv_scsi_fw_funcs.h"
+
+#define SCSI_NUM_SGES_IN_CACHE 0x4
+
+bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
+{
+ return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
+}
+
+void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
+ struct scsi_cached_sges *ctx_data_desc,
+ struct scsi_sgl_task_params *sgl_task_params)
+{
+ /* no need to check for sgl_task_params->sgl validity */
+ u8 num_sges_to_init = sgl_task_params->num_sges >
+ SCSI_NUM_SGES_IN_CACHE ? SCSI_NUM_SGES_IN_CACHE :
+ sgl_task_params->num_sges;
+ u8 sge_index;
+ u32 val;
+
+ val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
+ ctx_sgl_params->sgl_addr.lo = val;
+ val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
+ ctx_sgl_params->sgl_addr.hi = val;
+ val = cpu_to_le32(sgl_task_params->total_buffer_size);
+ ctx_sgl_params->sgl_total_length = val;
+ ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
+
+ for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) {
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
+ ctx_data_desc->sge[sge_index].sge_addr.lo = val;
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
+ ctx_data_desc->sge[sge_index].sge_addr.hi = val;
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
+ ctx_data_desc->sge[sge_index].sge_len = val;
+ }
+}
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.h b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
new file mode 100644
index 000000000000..9cb45410bc45
--- /dev/null
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
@@ -0,0 +1,85 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _SCSI_FW_FUNCS_H
+#define _SCSI_FW_FUNCS_H
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/fcoe_common.h>
+
+struct scsi_sgl_task_params {
+ struct scsi_sge *sgl;
+ struct regpair sgl_phys_addr;
+ u32 total_buffer_size;
+ u16 num_sges;
+
+ /* true if SGL contains a small (< 4KB) SGE in middle(not 1st or last)
+ * -> relevant for tx only
+ */
+ bool small_mid_sge;
+};
+
+struct scsi_dif_task_params {
+ u32 initial_ref_tag;
+ bool initial_ref_tag_is_valid;
+ u16 application_tag;
+ u16 application_tag_mask;
+ u16 dif_block_size_log;
+ bool dif_on_network;
+ bool dif_on_host;
+ u8 host_guard_type;
+ u8 protection_type;
+ u8 ref_tag_mask;
+ bool crc_seed;
+
+ /* Enable Connection error upon DIF error (segments with DIF errors are
+ * dropped)
+ */
+ bool tx_dif_conn_err_en;
+ bool ignore_app_tag;
+ bool keep_ref_tag_const;
+ bool validate_guard;
+ bool validate_app_tag;
+ bool validate_ref_tag;
+ bool forward_guard;
+ bool forward_app_tag;
+ bool forward_ref_tag;
+ bool forward_app_tag_with_mask;
+ bool forward_ref_tag_with_mask;
+};
+
+struct scsi_initiator_cmd_params {
+ /* for cdb_size > default CDB size (extended CDB > 16 bytes) ->
+ * pointer to the CDB buffer SGE
+ */
+ struct scsi_sge extended_cdb_sge;
+
+ /* Physical address of sense data buffer for sense data - 256B buffer */
+ struct regpair sense_data_buffer_phys_addr;
+};
+
+/**
+ * @brief scsi_is_slow_sgl - checks for slow SGL
+ *
+ * @param num_sges - number of sges in SGL
+ * @param small_mid_sge - True is the SGL contains an SGE which is smaller than
+ * 4KB and its not the 1st or last SGE in the SGL
+ */
+bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge);
+
+/**
+ * @brief init_scsi_sgl_context - initializes SGL task context
+ *
+ * @param sgl_params - SGL context parameters to initialize (output parameter)
+ * @param data_desc - context struct containing SGEs array to set (output
+ * parameter)
+ * @param sgl_task_params - SGL parameters (input)
+ */
+void init_scsi_sgl_context(struct scsi_sgl_params *sgl_params,
+ struct scsi_cached_sges *ctx_data_desc,
+ struct scsi_sgl_task_params *sgl_task_params);
+#endif
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 96346a1b1515..40aeb6bb96a2 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -26,6 +26,7 @@
#include <linux/qed/qed_ll2_if.h>
#include "qedf_version.h"
#include "qedf_dbg.h"
+#include "drv_fcoe_fw_funcs.h"
/* Helpers to extract upper and lower 32-bits of pointer */
#define U64_HI(val) ((u32)(((u64)(val)) >> 32))
@@ -59,19 +60,17 @@
#define UPSTREAM_KEEP 1
struct qedf_mp_req {
- uint8_t tm_flags;
-
uint32_t req_len;
void *req_buf;
dma_addr_t req_buf_dma;
- struct fcoe_sge *mp_req_bd;
+ struct scsi_sge *mp_req_bd;
dma_addr_t mp_req_bd_dma;
struct fc_frame_header req_fc_hdr;
uint32_t resp_len;
void *resp_buf;
dma_addr_t resp_buf_dma;
- struct fcoe_sge *mp_resp_bd;
+ struct scsi_sge *mp_resp_bd;
dma_addr_t mp_resp_bd_dma;
struct fc_frame_header resp_fc_hdr;
};
@@ -119,6 +118,7 @@ struct qedf_ioreq {
#define QEDF_CMD_IN_CLEANUP 0x2
#define QEDF_CMD_SRR_SENT 0x3
u8 io_req_flags;
+ uint8_t tm_flags;
struct qedf_rport *fcport;
unsigned long flags;
enum qedf_ioreq_event event;
@@ -130,6 +130,8 @@ struct qedf_ioreq {
struct completion tm_done;
struct completion abts_done;
struct fcoe_task_context *task;
+ struct fcoe_task_params *task_params;
+ struct scsi_sgl_task_params *sgl_task_params;
int idx;
/*
* Need to allocate enough room for both sense data and FCP response data
@@ -199,8 +201,8 @@ struct qedf_rport {
dma_addr_t sq_pbl_dma;
u32 sq_pbl_size;
u32 sid;
-#define QEDF_RPORT_TYPE_DISK 1
-#define QEDF_RPORT_TYPE_TAPE 2
+#define QEDF_RPORT_TYPE_DISK 0
+#define QEDF_RPORT_TYPE_TAPE 1
uint dev_type; /* Disk or tape */
struct list_head peers;
};
@@ -391,7 +393,7 @@ struct qedf_ctx {
struct io_bdt {
struct qedf_ioreq *io_req;
- struct fcoe_sge *bd_tbl;
+ struct scsi_sge *bd_tbl;
dma_addr_t bd_tbl_dma;
u16 bd_valid;
};
@@ -400,7 +402,7 @@ struct qedf_cmd_mgr {
struct qedf_ctx *qedf;
u16 idx;
struct io_bdt **io_bdt_pool;
-#define FCOE_PARAMS_NUM_TASKS 4096
+#define FCOE_PARAMS_NUM_TASKS 2048
struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS];
spinlock_t lock;
atomic_t free_list_cnt;
@@ -465,9 +467,8 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
unsigned int timer_msec);
extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct fcoe_task_context *task_ctx);
-extern void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid,
- u32 ptu_invalidate, enum fcoe_task_type req_type, u32 offset);
+ struct fcoe_task_context *task_ctx, struct fcoe_wqe *wqe);
+extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
extern void qedf_ring_doorbell(struct qedf_rport *fcport);
extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *els_req);
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 59f3e5c73a13..c505d41f6dc8 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -25,6 +25,9 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
uint16_t xid;
uint32_t start_time = jiffies / HZ;
uint32_t current_time;
+ struct fcoe_wqe *sqe;
+ unsigned long flags;
+ u16 sqe_idx;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
@@ -113,20 +116,25 @@ retry_els:
/* Obtain exchange id */
xid = els_req->xid;
+ spin_lock_irqsave(&fcport->rport_lock, flags);
+
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+
/* Initialize task context for this IO request */
task = qedf_get_task_mem(&qedf->tasks, xid);
- qedf_init_mp_task(els_req, task);
+ qedf_init_mp_task(els_req, task, sqe);
/* Put timer on original I/O request */
if (timer_msec)
qedf_cmd_timer_set(qedf, els_req, timer_msec);
- qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
-
/* Ring doorbell */
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
"req\n");
qedf_ring_doorbell(fcport);
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
els_err:
return rc;
}
@@ -604,6 +612,8 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
struct qedf_rport *fcport;
unsigned long flags;
struct qedf_els_cb_arg *cb_arg;
+ struct fcoe_wqe *sqe;
+ u16 sqe_idx;
fcport = orig_io_req->fcport;
@@ -631,8 +641,13 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
spin_lock_irqsave(&fcport->rport_lock, flags);
- qedf_add_to_sq(fcport, orig_io_req->xid, 0,
- FCOE_TASK_TYPE_SEQUENCE_CLEANUP, offset);
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+ orig_io_req->task_params->sqe = sqe;
+
+ init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
+ offset);
qedf_ring_doorbell(fcport);
spin_unlock_irqrestore(&fcport->rport_lock, flags);
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index ee0dcf9d3aba..af2294635ab2 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -96,7 +96,7 @@ void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
if (!cmgr->io_bdt_pool)
goto free_cmd_pool;
- bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge);
+ bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
for (i = 0; i < num_ios; i++) {
bdt_info = cmgr->io_bdt_pool[i];
if (bdt_info->bd_tbl) {
@@ -119,6 +119,8 @@ free_cmd_pool:
for (i = 0; i < num_ios; i++) {
io_req = &cmgr->cmds[i];
+ kfree(io_req->sgl_task_params);
+ kfree(io_req->task_params);
/* Make sure we free per command sense buffer */
if (io_req->sense_buffer)
dma_free_coherent(&qedf->pdev->dev,
@@ -178,7 +180,7 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
spin_lock_init(&cmgr->lock);
/*
- * Initialize list of qedf_ioreq.
+ * Initialize I/O request fields.
*/
xid = QEDF_MIN_XID;
@@ -196,6 +198,29 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
GFP_KERNEL);
if (!io_req->sense_buffer)
goto mem_err;
+
+ /* Allocate task parameters to pass to f/w init funcions */
+ io_req->task_params = kzalloc(sizeof(*io_req->task_params),
+ GFP_KERNEL);
+ if (!io_req->task_params) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Failed to allocate task_params for xid=0x%x\n",
+ i);
+ goto mem_err;
+ }
+
+ /*
+ * Allocate scatter/gather list info to pass to f/w init
+ * functions.
+ */
+ io_req->sgl_task_params = kzalloc(
+ sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
+ if (!io_req->sgl_task_params) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Failed to allocate sgl_task_params for xid=0x%x\n",
+ i);
+ goto mem_err;
+ }
}
/* Allocate pool of io_bdts - one for each qedf_ioreq */
@@ -211,8 +236,8 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
GFP_KERNEL);
if (!cmgr->io_bdt_pool[i]) {
- QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
- "io_bdt_pool[%d].\n", i);
+ QEDF_WARN(&(qedf->dbg_ctx),
+ "Failed to alloc io_bdt_pool[%d].\n", i);
goto mem_err;
}
}
@@ -220,11 +245,11 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
for (i = 0; i < num_ios; i++) {
bdt_info = cmgr->io_bdt_pool[i];
bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
- QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge),
+ QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
&bdt_info->bd_tbl_dma, GFP_KERNEL);
if (!bdt_info->bd_tbl) {
- QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
- "bdt_tbl[%d].\n", i);
+ QEDF_WARN(&(qedf->dbg_ctx),
+ "Failed to alloc bdt_tbl[%d].\n", i);
goto mem_err;
}
}
@@ -318,6 +343,7 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
}
bd_tbl->io_req = io_req;
io_req->cmd_type = cmd_type;
+ io_req->tm_flags = 0;
/* Reset sequence offset data */
io_req->rx_buf_off = 0;
@@ -336,10 +362,9 @@ static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
{
struct qedf_mp_req *mp_req = &(io_req->mp_req);
struct qedf_ctx *qedf = io_req->fcport->qedf;
- uint64_t sz = sizeof(struct fcoe_sge);
+ uint64_t sz = sizeof(struct scsi_sge);
/* clear tm flags */
- mp_req->tm_flags = 0;
if (mp_req->mp_req_bd) {
dma_free_coherent(&qedf->pdev->dev, sz,
mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
@@ -387,7 +412,7 @@ void qedf_release_cmd(struct kref *ref)
static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
int bd_index)
{
- struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+ struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
int frag_size, sg_frags;
sg_frags = 0;
@@ -398,7 +423,7 @@ static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
frag_size = sg_len;
bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
- bd[bd_index + sg_frags].size = (uint16_t)frag_size;
+ bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
addr += (u64)frag_size;
sg_frags++;
@@ -413,7 +438,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
struct Scsi_Host *host = sc->device->host;
struct fc_lport *lport = shost_priv(host);
struct qedf_ctx *qedf = lport_priv(lport);
- struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+ struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
struct scatterlist *sg;
int byte_count = 0;
int sg_count = 0;
@@ -439,7 +464,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
bd[bd_count].sge_addr.hi = (addr >> 32);
- bd[bd_count].size = (u16)sg_len;
+ bd[bd_count].sge_len = (u16)sg_len;
return ++bd_count;
}
@@ -480,7 +505,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
sg_frags = 1;
bd[bd_count].sge_addr.lo = U64_LO(addr);
bd[bd_count].sge_addr.hi = U64_HI(addr);
- bd[bd_count].size = (uint16_t)sg_len;
+ bd[bd_count].sge_len = (uint16_t)sg_len;
}
bd_count += sg_frags;
@@ -498,7 +523,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
{
struct scsi_cmnd *sc = io_req->sc_cmd;
- struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+ struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
int bd_count;
if (scsi_sg_count(sc)) {
@@ -508,7 +533,7 @@ static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
} else {
bd_count = 0;
bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
- bd[0].size = 0;
+ bd[0].sge_len = 0;
}
io_req->bd_tbl->bd_valid = bd_count;
@@ -529,430 +554,223 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
/* 4 bytes: flag info */
fcp_cmnd->fc_pri_ta = 0;
- fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
+ fcp_cmnd->fc_tm_flags = io_req->tm_flags;
fcp_cmnd->fc_flags = io_req->io_req_flags;
fcp_cmnd->fc_cmdref = 0;
/* Populate data direction */
- if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
- fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
- else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
+ if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
+ } else {
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
+ fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
+ else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
+ fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
+ }
fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
/* 16 bytes: CDB information */
- memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
+ if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
+ memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
/* 4 bytes: FCP data length */
fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
-
}
static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
- struct qedf_ioreq *io_req, u32 *ptu_invalidate,
- struct fcoe_task_context *task_ctx)
+ struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
+ struct fcoe_wqe *sqe)
{
enum fcoe_task_type task_type;
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
struct io_bdt *bd_tbl = io_req->bd_tbl;
- union fcoe_data_desc_ctx *data_desc;
- u32 *fcp_cmnd;
+ u8 fcp_cmnd[32];
u32 tmp_fcp_cmnd[8];
- int cnt, i;
- int bd_count;
+ int bd_count = 0;
struct qedf_ctx *qedf = fcport->qedf;
uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
- u8 tmp_sgl_mode = 0;
- u8 mst_sgl_mode = 0;
+ struct regpair sense_data_buffer_phys_addr;
+ u32 tx_io_size = 0;
+ u32 rx_io_size = 0;
+ int i, cnt;
- memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+ /* Note init_initiator_rw_fcoe_task memsets the task context */
io_req->task = task_ctx;
+ memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+ memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
+ memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
- if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
- task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
- else
+ /* Set task type bassed on DMA directio of command */
+ if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
task_type = FCOE_TASK_TYPE_READ_INITIATOR;
-
- /* Y Storm context */
- task_ctx->ystorm_st_context.expect_first_xfer = 1;
- task_ctx->ystorm_st_context.data_2_trns_rem = io_req->data_xfer_len;
- /* Check if this is required */
- task_ctx->ystorm_st_context.ox_id = io_req->xid;
- task_ctx->ystorm_st_context.task_rety_identifier =
- io_req->task_retry_identifier;
-
- /* T Storm ag context */
- SET_FIELD(task_ctx->tstorm_ag_context.flags0,
- TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, PROTOCOLID_FCOE);
- task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
-
- /* T Storm st context */
- SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
- FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
- 1);
- task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
-
- task_ctx->tstorm_st_context.read_only.dev_type =
- FCOE_TASK_DEV_TYPE_DISK;
- task_ctx->tstorm_st_context.read_only.conf_supported = 0;
- task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
-
- /* Completion queue for response. */
- task_ctx->tstorm_st_context.read_only.glbl_q_num = cq_idx;
- task_ctx->tstorm_st_context.read_only.fcp_cmd_trns_size =
- io_req->data_xfer_len;
- task_ctx->tstorm_st_context.read_write.e_d_tov_exp_timeout_val =
- lport->e_d_tov;
-
- task_ctx->ustorm_ag_context.global_cq_num = cq_idx;
- io_req->fp_idx = cq_idx;
-
- bd_count = bd_tbl->bd_valid;
- if (task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
- /* Setup WRITE task */
- struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
-
- task_ctx->ystorm_st_context.task_type =
- FCOE_TASK_TYPE_WRITE_INITIATOR;
- data_desc = &task_ctx->ystorm_st_context.data_desc;
-
- if (io_req->use_slowpath) {
- SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
- YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
- FCOE_SLOW_SGL);
- data_desc->slow.base_sgl_addr.lo =
- U64_LO(bd_tbl->bd_tbl_dma);
- data_desc->slow.base_sgl_addr.hi =
- U64_HI(bd_tbl->bd_tbl_dma);
- data_desc->slow.remainder_num_sges = bd_count;
- data_desc->slow.curr_sge_off = 0;
- data_desc->slow.curr_sgl_index = 0;
- qedf->slow_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
- } else {
- SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
- YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
- (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
- FCOE_MUL_FAST_SGES);
-
- if (bd_count == 1) {
- data_desc->single_sge.sge_addr.lo =
- fcoe_bd_tbl->sge_addr.lo;
- data_desc->single_sge.sge_addr.hi =
- fcoe_bd_tbl->sge_addr.hi;
- data_desc->single_sge.size =
- fcoe_bd_tbl->size;
- data_desc->single_sge.is_valid_sge = 0;
- qedf->single_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
- } else {
- data_desc->fast.sgl_start_addr.lo =
- U64_LO(bd_tbl->bd_tbl_dma);
- data_desc->fast.sgl_start_addr.hi =
- U64_HI(bd_tbl->bd_tbl_dma);
- data_desc->fast.sgl_byte_offset =
- data_desc->fast.sgl_start_addr.lo &
- (QEDF_PAGE_SIZE - 1);
- if (data_desc->fast.sgl_byte_offset > 0)
- QEDF_ERR(&(qedf->dbg_ctx),
- "byte_offset=%u for xid=0x%x.\n",
- io_req->xid,
- data_desc->fast.sgl_byte_offset);
- data_desc->fast.task_reuse_cnt =
- io_req->reuse_count;
- io_req->reuse_count++;
- if (io_req->reuse_count == QEDF_MAX_REUSE) {
- *ptu_invalidate = 1;
- io_req->reuse_count = 0;
- }
- qedf->fast_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_FAST_SGE;
- }
- }
-
- /* T Storm context */
- task_ctx->tstorm_st_context.read_only.task_type =
- FCOE_TASK_TYPE_WRITE_INITIATOR;
-
- /* M Storm context */
- tmp_sgl_mode = GET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
- YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE);
- SET_FIELD(task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
- FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE,
- tmp_sgl_mode);
-
} else {
- /* Setup READ task */
-
- /* M Storm context */
- struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
-
- data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
- task_ctx->mstorm_st_context.fp.data_2_trns_rem =
- io_req->data_xfer_len;
-
- if (io_req->use_slowpath) {
- SET_FIELD(
- task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
- FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
- FCOE_SLOW_SGL);
- data_desc->slow.base_sgl_addr.lo =
- U64_LO(bd_tbl->bd_tbl_dma);
- data_desc->slow.base_sgl_addr.hi =
- U64_HI(bd_tbl->bd_tbl_dma);
- data_desc->slow.remainder_num_sges =
- bd_count;
- data_desc->slow.curr_sge_off = 0;
- data_desc->slow.curr_sgl_index = 0;
- qedf->slow_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
+ tx_io_size = io_req->data_xfer_len;
} else {
- SET_FIELD(
- task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
- FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
- (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
- FCOE_MUL_FAST_SGES);
-
- if (bd_count == 1) {
- data_desc->single_sge.sge_addr.lo =
- fcoe_bd_tbl->sge_addr.lo;
- data_desc->single_sge.sge_addr.hi =
- fcoe_bd_tbl->sge_addr.hi;
- data_desc->single_sge.size =
- fcoe_bd_tbl->size;
- data_desc->single_sge.is_valid_sge = 0;
- qedf->single_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
- } else {
- data_desc->fast.sgl_start_addr.lo =
- U64_LO(bd_tbl->bd_tbl_dma);
- data_desc->fast.sgl_start_addr.hi =
- U64_HI(bd_tbl->bd_tbl_dma);
- data_desc->fast.sgl_byte_offset = 0;
- data_desc->fast.task_reuse_cnt =
- io_req->reuse_count;
- io_req->reuse_count++;
- if (io_req->reuse_count == QEDF_MAX_REUSE) {
- *ptu_invalidate = 1;
- io_req->reuse_count = 0;
- }
- qedf->fast_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_FAST_SGE;
- }
+ task_type = FCOE_TASK_TYPE_READ_INITIATOR;
+ rx_io_size = io_req->data_xfer_len;
}
-
- /* Y Storm context */
- task_ctx->ystorm_st_context.expect_first_xfer = 0;
- task_ctx->ystorm_st_context.task_type =
- FCOE_TASK_TYPE_READ_INITIATOR;
-
- /* T Storm context */
- task_ctx->tstorm_st_context.read_only.task_type =
- FCOE_TASK_TYPE_READ_INITIATOR;
- mst_sgl_mode = GET_FIELD(
- task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
- FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE);
- SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
- FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
- mst_sgl_mode);
}
+ /* Setup the fields for fcoe_task_params */
+ io_req->task_params->context = task_ctx;
+ io_req->task_params->sqe = sqe;
+ io_req->task_params->task_type = task_type;
+ io_req->task_params->tx_io_size = tx_io_size;
+ io_req->task_params->rx_io_size = rx_io_size;
+ io_req->task_params->conn_cid = fcport->fw_cid;
+ io_req->task_params->itid = io_req->xid;
+ io_req->task_params->cq_rss_number = cq_idx;
+ io_req->task_params->is_tape_device = fcport->dev_type;
+
+ /* Fill in information for scatter/gather list */
+ if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
+ bd_count = bd_tbl->bd_valid;
+ io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
+ io_req->sgl_task_params->sgl_phys_addr.lo =
+ U64_LO(bd_tbl->bd_tbl_dma);
+ io_req->sgl_task_params->sgl_phys_addr.hi =
+ U64_HI(bd_tbl->bd_tbl_dma);
+ io_req->sgl_task_params->num_sges = bd_count;
+ io_req->sgl_task_params->total_buffer_size =
+ scsi_bufflen(io_req->sc_cmd);
+ io_req->sgl_task_params->small_mid_sge =
+ io_req->use_slowpath;
+ }
+
+ /* Fill in physical address of sense buffer */
+ sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
+ sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
+
/* fill FCP_CMND IU */
- fcp_cmnd = (u32 *)task_ctx->ystorm_st_context.tx_info_union.fcp_cmd_payload.opaque;
- qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
+ qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
/* Swap fcp_cmnd since FC is big endian */
cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
-
for (i = 0; i < cnt; i++) {
- *fcp_cmnd = cpu_to_be32(tmp_fcp_cmnd[i]);
- fcp_cmnd++;
+ tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
+ }
+ memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
+
+ init_initiator_rw_fcoe_task(io_req->task_params,
+ io_req->sgl_task_params,
+ sense_data_buffer_phys_addr,
+ io_req->task_retry_identifier, fcp_cmnd);
+
+ /* Increment SGL type counters */
+ if (bd_count == 1) {
+ qedf->single_sge_ios++;
+ io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
+ } else if (io_req->use_slowpath) {
+ qedf->slow_sge_ios++;
+ io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
+ } else {
+ qedf->fast_sge_ios++;
+ io_req->sge_type = QEDF_IOREQ_FAST_SGE;
}
-
- /* M Storm context - Sense buffer */
- task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
- U64_LO(io_req->sense_buffer_dma);
- task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
- U64_HI(io_req->sense_buffer_dma);
}
void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct fcoe_task_context *task_ctx)
+ struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
{
struct qedf_mp_req *mp_req = &(io_req->mp_req);
struct qedf_rport *fcport = io_req->fcport;
struct qedf_ctx *qedf = io_req->fcport->qedf;
struct fc_frame_header *fc_hdr;
- enum fcoe_task_type task_type = 0;
- union fcoe_data_desc_ctx *data_desc;
+ struct fcoe_tx_mid_path_params task_fc_hdr;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Initializing MP task "
- "for cmd_type = %d\n", io_req->cmd_type);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Initializing MP task for cmd_type=%d\n",
+ io_req->cmd_type);
qedf->control_requests++;
- /* Obtain task_type */
- if ((io_req->cmd_type == QEDF_TASK_MGMT_CMD) ||
- (io_req->cmd_type == QEDF_ELS)) {
- task_type = FCOE_TASK_TYPE_MIDPATH;
- } else if (io_req->cmd_type == QEDF_ABTS) {
- task_type = FCOE_TASK_TYPE_ABTS;
- }
-
+ memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+ memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
/* Setup the task from io_req for easy reference */
io_req->task = task_ctx;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "task type = %d\n",
- task_type);
-
- /* YSTORM only */
- {
- /* Initialize YSTORM task context */
- struct fcoe_tx_mid_path_params *task_fc_hdr =
- &task_ctx->ystorm_st_context.tx_info_union.tx_params.mid_path;
- memset(task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
- task_ctx->ystorm_st_context.task_rety_identifier =
- io_req->task_retry_identifier;
-
- /* Init SGL parameters */
- if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
- (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
- data_desc = &task_ctx->ystorm_st_context.data_desc;
- data_desc->slow.base_sgl_addr.lo =
- U64_LO(mp_req->mp_req_bd_dma);
- data_desc->slow.base_sgl_addr.hi =
- U64_HI(mp_req->mp_req_bd_dma);
- data_desc->slow.remainder_num_sges = 1;
- data_desc->slow.curr_sge_off = 0;
- data_desc->slow.curr_sgl_index = 0;
- }
-
- fc_hdr = &(mp_req->req_fc_hdr);
- if (task_type == FCOE_TASK_TYPE_MIDPATH) {
- fc_hdr->fh_ox_id = io_req->xid;
- fc_hdr->fh_rx_id = htons(0xffff);
- } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
- fc_hdr->fh_rx_id = io_req->xid;
- }
+ /* Setup the fields for fcoe_task_params */
+ io_req->task_params->context = task_ctx;
+ io_req->task_params->sqe = sqe;
+ io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
+ io_req->task_params->tx_io_size = io_req->data_xfer_len;
+ /* rx_io_size tells the f/w how large a response buffer we have */
+ io_req->task_params->rx_io_size = PAGE_SIZE;
+ io_req->task_params->conn_cid = fcport->fw_cid;
+ io_req->task_params->itid = io_req->xid;
+ /* Return middle path commands on CQ 0 */
+ io_req->task_params->cq_rss_number = 0;
+ io_req->task_params->is_tape_device = fcport->dev_type;
+
+ fc_hdr = &(mp_req->req_fc_hdr);
+ /* Set OX_ID and RX_ID based on driver task id */
+ fc_hdr->fh_ox_id = io_req->xid;
+ fc_hdr->fh_rx_id = htons(0xffff);
+
+ /* Set up FC header information */
+ task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
+ task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
+ task_fc_hdr.type = fc_hdr->fh_type;
+ task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
+ task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
+ task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
+ task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
+
+ /* Set up s/g list parameters for request buffer */
+ tx_sgl_task_params.sgl = mp_req->mp_req_bd;
+ tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
+ tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
+ tx_sgl_task_params.num_sges = 1;
+ /* Set PAGE_SIZE for now since sg element is that size ??? */
+ tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
+ tx_sgl_task_params.small_mid_sge = 0;
+
+ /* Set up s/g list parameters for request buffer */
+ rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
+ rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
+ rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
+ rx_sgl_task_params.num_sges = 1;
+ /* Set PAGE_SIZE for now since sg element is that size ??? */
+ rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
+ rx_sgl_task_params.small_mid_sge = 0;
- /* Fill FC Header into middle path buffer */
- task_fc_hdr->parameter = fc_hdr->fh_parm_offset;
- task_fc_hdr->r_ctl = fc_hdr->fh_r_ctl;
- task_fc_hdr->type = fc_hdr->fh_type;
- task_fc_hdr->cs_ctl = fc_hdr->fh_cs_ctl;
- task_fc_hdr->df_ctl = fc_hdr->fh_df_ctl;
- task_fc_hdr->rx_id = fc_hdr->fh_rx_id;
- task_fc_hdr->ox_id = fc_hdr->fh_ox_id;
-
- task_ctx->ystorm_st_context.data_2_trns_rem =
- io_req->data_xfer_len;
- task_ctx->ystorm_st_context.task_type = task_type;
- }
-
- /* TSTORM ONLY */
- {
- task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
- task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
- /* Always send middle-path repsonses on CQ #0 */
- task_ctx->tstorm_st_context.read_only.glbl_q_num = 0;
- io_req->fp_idx = 0;
- SET_FIELD(task_ctx->tstorm_ag_context.flags0,
- TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE,
- PROTOCOLID_FCOE);
- task_ctx->tstorm_st_context.read_only.task_type = task_type;
- SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
- FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
- 1);
- task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
- }
-
- /* MSTORM only */
- {
- if (task_type == FCOE_TASK_TYPE_MIDPATH) {
- /* Initialize task context */
- data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
-
- /* Set cache sges address and length */
- data_desc->slow.base_sgl_addr.lo =
- U64_LO(mp_req->mp_resp_bd_dma);
- data_desc->slow.base_sgl_addr.hi =
- U64_HI(mp_req->mp_resp_bd_dma);
- data_desc->slow.remainder_num_sges = 1;
- data_desc->slow.curr_sge_off = 0;
- data_desc->slow.curr_sgl_index = 0;
- /*
- * Also need to fil in non-fastpath response address
- * for middle path commands.
- */
- task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
- U64_LO(mp_req->mp_resp_bd_dma);
- task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
- U64_HI(mp_req->mp_resp_bd_dma);
- }
- }
-
- /* USTORM ONLY */
- {
- task_ctx->ustorm_ag_context.global_cq_num = 0;
- }
+ /*
+ * Last arg is 0 as previous code did not set that we wanted the
+ * fc header information.
+ */
+ init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
+ &task_fc_hdr,
+ &tx_sgl_task_params,
+ &rx_sgl_task_params, 0);
- /* I/O stats. Middle path commands always use slow SGEs */
- qedf->slow_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
+ /* Midpath requests always consume 1 SGE */
+ qedf->single_sge_ios++;
}
-void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, u32 ptu_invalidate,
- enum fcoe_task_type req_type, u32 offset)
+/* Presumed that fcport->rport_lock is held */
+u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
{
- struct fcoe_wqe *sqe;
uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
+ u16 rval;
- sqe = &fcport->sq[fcport->sq_prod_idx];
+ rval = fcport->sq_prod_idx;
+ /* Adjust ring index */
fcport->sq_prod_idx++;
fcport->fw_sq_prod_idx++;
if (fcport->sq_prod_idx == total_sqe)
fcport->sq_prod_idx = 0;
- switch (req_type) {
- case FCOE_TASK_TYPE_WRITE_INITIATOR:
- case FCOE_TASK_TYPE_READ_INITIATOR:
- SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_CMD);
- if (ptu_invalidate)
- SET_FIELD(sqe->flags, FCOE_WQE_INVALIDATE_PTU, 1);
- break;
- case FCOE_TASK_TYPE_MIDPATH:
- SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_MIDPATH);
- break;
- case FCOE_TASK_TYPE_ABTS:
- SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
- SEND_FCOE_ABTS_REQUEST);
- break;
- case FCOE_TASK_TYPE_EXCHANGE_CLEANUP:
- SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
- FCOE_EXCHANGE_CLEANUP);
- break;
- case FCOE_TASK_TYPE_SEQUENCE_CLEANUP:
- SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
- FCOE_SEQUENCE_RECOVERY);
- /* NOTE: offset param only used for sequence recovery */
- sqe->additional_info_union.seq_rec_updated_offset = offset;
- break;
- case FCOE_TASK_TYPE_UNSOLICITED:
- break;
- default:
- break;
- }
-
- sqe->task_id = xid;
-
- /* Make sure SQ data is coherent */
- wmb();
-
+ return rval;
}
void qedf_ring_doorbell(struct qedf_rport *fcport)
@@ -1029,7 +847,8 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
struct fcoe_task_context *task_ctx;
u16 xid;
enum fcoe_task_type req_type = 0;
- u32 ptu_invalidate = 0;
+ struct fcoe_wqe *sqe;
+ u16 sqe_idx;
/* Initialize rest of io_req fileds */
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
@@ -1061,6 +880,16 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
return -EAGAIN;
}
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
+ kref_put(&io_req->refcount, qedf_release_cmd);
+ }
+
+ /* Obtain free SQE */
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+
/* Get the task context */
task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
if (!task_ctx) {
@@ -1070,15 +899,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
return -EINVAL;
}
- qedf_init_task(fcport, lport, io_req, &ptu_invalidate, task_ctx);
-
- if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
- QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
- kref_put(&io_req->refcount, qedf_release_cmd);
- }
-
- /* Obtain free SQ entry */
- qedf_add_to_sq(fcport, xid, ptu_invalidate, req_type, 0);
+ qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
/* Ring doorbell */
qedf_ring_doorbell(fcport);
@@ -1661,6 +1482,8 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
u32 r_a_tov = 0;
int rc = 0;
unsigned long flags;
+ struct fcoe_wqe *sqe;
+ u16 sqe_idx;
r_a_tov = rdata->r_a_tov;
lport = qedf->lport;
@@ -1712,10 +1535,12 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
spin_lock_irqsave(&fcport->rport_lock, flags);
- /* Add ABTS to send queue */
- qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_ABTS, 0);
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+ io_req->task_params->sqe = sqe;
- /* Ring doorbell */
+ init_initiator_abort_fcoe_task(io_req->task_params);
qedf_ring_doorbell(fcport);
spin_unlock_irqrestore(&fcport->rport_lock, flags);
@@ -1784,8 +1609,8 @@ void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
int qedf_init_mp_req(struct qedf_ioreq *io_req)
{
struct qedf_mp_req *mp_req;
- struct fcoe_sge *mp_req_bd;
- struct fcoe_sge *mp_resp_bd;
+ struct scsi_sge *mp_req_bd;
+ struct scsi_sge *mp_resp_bd;
struct qedf_ctx *qedf = io_req->fcport->qedf;
dma_addr_t addr;
uint64_t sz;
@@ -1819,7 +1644,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
}
/* Allocate and map mp_req_bd and mp_resp_bd */
- sz = sizeof(struct fcoe_sge);
+ sz = sizeof(struct scsi_sge);
mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
&mp_req->mp_req_bd_dma, GFP_KERNEL);
if (!mp_req->mp_req_bd) {
@@ -1841,7 +1666,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
mp_req_bd = mp_req->mp_req_bd;
mp_req_bd->sge_addr.lo = U64_LO(addr);
mp_req_bd->sge_addr.hi = U64_HI(addr);
- mp_req_bd->size = QEDF_PAGE_SIZE;
+ mp_req_bd->sge_len = QEDF_PAGE_SIZE;
/*
* MP buffer is either a task mgmt command or an ELS.
@@ -1852,7 +1677,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
addr = mp_req->resp_buf_dma;
mp_resp_bd->sge_addr.lo = U64_LO(addr);
mp_resp_bd->sge_addr.hi = U64_HI(addr);
- mp_resp_bd->size = QEDF_PAGE_SIZE;
+ mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
return 0;
}
@@ -1895,6 +1720,8 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
int tmo = 0;
int rc = SUCCESS;
unsigned long flags;
+ struct fcoe_wqe *sqe;
+ u16 sqe_idx;
fcport = io_req->fcport;
if (!fcport) {
@@ -1940,12 +1767,16 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
init_completion(&io_req->tm_done);
- /* Obtain free SQ entry */
spin_lock_irqsave(&fcport->rport_lock, flags);
- qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_EXCHANGE_CLEANUP, 0);
- /* Ring doorbell */
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+ io_req->task_params->sqe = sqe;
+
+ init_initiator_cleanup_fcoe_task(io_req->task_params);
qedf_ring_doorbell(fcport);
+
spin_unlock_irqrestore(&fcport->rport_lock, flags);
tmo = wait_for_completion_timeout(&io_req->tm_done,
@@ -1991,16 +1822,15 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
uint8_t tm_flags)
{
struct qedf_ioreq *io_req;
- struct qedf_mp_req *tm_req;
struct fcoe_task_context *task;
- struct fc_frame_header *fc_hdr;
- struct fcp_cmnd *fcp_cmnd;
struct qedf_ctx *qedf = fcport->qedf;
+ struct fc_lport *lport = qedf->lport;
int rc = 0;
uint16_t xid;
- uint32_t sid, did;
int tmo = 0;
unsigned long flags;
+ struct fcoe_wqe *sqe;
+ u16 sqe_idx;
if (!sc_cmd) {
QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
@@ -2031,36 +1861,14 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
/* Set the return CPU to be the same as the request one */
io_req->cpu = smp_processor_id();
- tm_req = (struct qedf_mp_req *)&(io_req->mp_req);
-
- rc = qedf_init_mp_req(io_req);
- if (rc == FAILED) {
- QEDF_ERR(&(qedf->dbg_ctx), "Task mgmt MP request init "
- "failed\n");
- kref_put(&io_req->refcount, qedf_release_cmd);
- goto reset_tmf_err;
- }
-
/* Set TM flags */
- io_req->io_req_flags = 0;
- tm_req->tm_flags = tm_flags;
+ io_req->io_req_flags = QEDF_READ;
+ io_req->data_xfer_len = 0;
+ io_req->tm_flags = tm_flags;
/* Default is to return a SCSI command when an error occurs */
io_req->return_scsi_cmd_on_abts = true;
- /* Fill FCP_CMND */
- qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
- fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
- memset(fcp_cmnd->fc_cdb, 0, FCP_CMND_LEN);
- fcp_cmnd->fc_dl = 0;
-
- /* Fill FC header */
- fc_hdr = &(tm_req->req_fc_hdr);
- sid = fcport->sid;
- did = fcport->rdata->ids.port_id;
- __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, sid, did,
- FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
- FC_FC_SEQ_INIT, 0);
/* Obtain exchange id */
xid = io_req->xid;
@@ -2069,16 +1877,18 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
/* Initialize task context for this IO request */
task = qedf_get_task_mem(&qedf->tasks, xid);
- qedf_init_mp_task(io_req, task);
init_completion(&io_req->tm_done);
- /* Obtain free SQ entry */
spin_lock_irqsave(&fcport->rport_lock, flags);
- qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
- /* Ring doorbell */
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+
+ qedf_init_task(fcport, lport, io_req, task, sqe);
qedf_ring_doorbell(fcport);
+
spin_unlock_irqrestore(&fcport->rport_lock, flags);
tmo = wait_for_completion_timeout(&io_req->tm_done,
@@ -2162,14 +1972,6 @@ void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req)
{
struct fcoe_cqe_rsp_info *fcp_rsp;
- struct fcoe_cqe_midpath_info *mp_info;
-
-
- /* Get TMF response length from CQE */
- mp_info = &cqe->cqe_info.midpath_info;
- io_req->mp_req.resp_len = mp_info->data_placement_size;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
- "Response len is %d.\n", io_req->mp_req.resp_len);
fcp_rsp = &cqe->cqe_info.rsp_info;
qedf_parse_fcp_rsp(io_req, fcp_rsp);
diff --git a/drivers/scsi/qedi/Makefile b/drivers/scsi/qedi/Makefile
index 2b3e16b24299..90a6925577cc 100644
--- a/drivers/scsi/qedi/Makefile
+++ b/drivers/scsi/qedi/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_QEDI) := qedi.o
qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \
- qedi_dbg.o
+ qedi_dbg.o qedi_fw_api.o
qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index c9f0ef4e11b3..eca40b0513a3 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -14,6 +14,8 @@
#include "qedi.h"
#include "qedi_iscsi.h"
#include "qedi_gbl.h"
+#include "qedi_fw_iscsi.h"
+#include "qedi_fw_scsi.h"
static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
struct iscsi_task *mtask);
@@ -53,8 +55,8 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
- resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time2wait);
- resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time2retain);
+ resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time_2_wait);
+ resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time_2_retain);
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
"Freeing tid=0x%x for cid=0x%x\n",
@@ -975,81 +977,6 @@ exit_fp_process:
return;
}
-static void qedi_add_to_sq(struct qedi_conn *qedi_conn, struct iscsi_task *task,
- u16 tid, uint16_t ptu_invalidate, int is_cleanup)
-{
- struct iscsi_wqe *wqe;
- struct iscsi_wqe_field *cont_field;
- struct qedi_endpoint *ep;
- struct scsi_cmnd *sc = task->sc;
- struct iscsi_login_req *login_hdr;
- struct qedi_cmd *cmd = task->dd_data;
-
- login_hdr = (struct iscsi_login_req *)task->hdr;
- ep = qedi_conn->ep;
- wqe = &ep->sq[ep->sq_prod_idx];
-
- memset(wqe, 0, sizeof(*wqe));
-
- ep->sq_prod_idx++;
- ep->fw_sq_prod_idx++;
- if (ep->sq_prod_idx == QEDI_SQ_SIZE)
- ep->sq_prod_idx = 0;
-
- if (is_cleanup) {
- SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
- ISCSI_WQE_TYPE_TASK_CLEANUP);
- wqe->task_id = tid;
- return;
- }
-
- if (ptu_invalidate) {
- SET_FIELD(wqe->flags, ISCSI_WQE_PTU_INVALIDATE,
- ISCSI_WQE_SET_PTU_INVALIDATE);
- }
-
- cont_field = &wqe->cont_prevtid_union.cont_field;
-
- switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
- case ISCSI_OP_LOGIN:
- case ISCSI_OP_TEXT:
- SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
- ISCSI_WQE_TYPE_MIDDLE_PATH);
- SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
- 1);
- cont_field->contlen_cdbsize_field = ntoh24(login_hdr->dlength);
- break;
- case ISCSI_OP_LOGOUT:
- case ISCSI_OP_NOOP_OUT:
- case ISCSI_OP_SCSI_TMFUNC:
- SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
- ISCSI_WQE_TYPE_NORMAL);
- break;
- default:
- if (!sc)
- break;
-
- SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
- ISCSI_WQE_TYPE_NORMAL);
- cont_field->contlen_cdbsize_field =
- (sc->sc_data_direction == DMA_TO_DEVICE) ?
- scsi_bufflen(sc) : 0;
- if (cmd->use_slowpath)
- SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, 0);
- else
- SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
- (sc->sc_data_direction ==
- DMA_TO_DEVICE) ?
- min((u16)QEDI_FAST_SGE_COUNT,
- (u16)cmd->io_tbl.sge_valid) : 0);
- break;
- }
-
- wqe->task_id = tid;
- /* Make sure SQ data is coherent */
- wmb();
-}
-
static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
{
struct iscsi_db_data dbell = { 0 };
@@ -1076,96 +1003,116 @@ static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
qedi_conn->iscsi_conn_id);
}
+static u16 qedi_get_wqe_idx(struct qedi_conn *qedi_conn)
+{
+ struct qedi_endpoint *ep;
+ u16 rval;
+
+ ep = qedi_conn->ep;
+ rval = ep->sq_prod_idx;
+
+ /* Increament SQ index */
+ ep->sq_prod_idx++;
+ ep->fw_sq_prod_idx++;
+ if (ep->sq_prod_idx == QEDI_SQ_SIZE)
+ ep->sq_prod_idx = 0;
+
+ return rval;
+}
+
int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
struct iscsi_task *task)
{
- struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_login_req_hdr login_req_pdu_header;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
+ struct iscsi_task_params task_params;
struct iscsi_task_context *fw_task_ctx;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_login_req *login_hdr;
- struct iscsi_login_req_hdr *fw_login_req = NULL;
- struct iscsi_cached_sge_ctx *cached_sge = NULL;
- struct iscsi_sge *single_sge = NULL;
- struct iscsi_sge *req_sge = NULL;
- struct iscsi_sge *resp_sge = NULL;
+ struct scsi_sge *req_sge = NULL;
+ struct scsi_sge *resp_sge = NULL;
struct qedi_cmd *qedi_cmd;
- s16 ptu_invalidate = 0;
+ struct qedi_endpoint *ep;
s16 tid = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
- req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
- resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ ep = qedi_conn->ep;
login_hdr = (struct iscsi_login_req *)task->hdr;
tid = qedi_get_task_idx(qedi);
if (tid == -1)
return -ENOMEM;
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
- /* Ystorm context */
- fw_login_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.login_req;
- fw_login_req->opcode = login_hdr->opcode;
- fw_login_req->version_min = login_hdr->min_version;
- fw_login_req->version_max = login_hdr->max_version;
- fw_login_req->flags_attr = login_hdr->flags;
- fw_login_req->isid_tabc = *((u16 *)login_hdr->isid + 2);
- fw_login_req->isid_d = *((u32 *)login_hdr->isid);
- fw_login_req->tsih = login_hdr->tsih;
- qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
- fw_login_req->itt = qedi_set_itt(tid, get_itt(task->itt));
- fw_login_req->cid = qedi_conn->iscsi_conn_id;
- fw_login_req->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
- fw_login_req->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
- fw_login_req->exp_stat_sn = 0;
-
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&login_req_pdu_header, 0, sizeof(login_req_pdu_header));
+ memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+ /* Update header info */
+ login_req_pdu_header.opcode = login_hdr->opcode;
+ login_req_pdu_header.version_min = login_hdr->min_version;
+ login_req_pdu_header.version_max = login_hdr->max_version;
+ login_req_pdu_header.flags_attr = login_hdr->flags;
+ login_req_pdu_header.isid_tabc = swab32p((u32 *)login_hdr->isid);
+ login_req_pdu_header.isid_d = swab16p((u16 *)&login_hdr->isid[4]);
+
+ login_req_pdu_header.tsih = login_hdr->tsih;
+ login_req_pdu_header.hdr_second_dword = ntoh24(login_hdr->dlength);
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
- cached_sge =
- &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
- cached_sge->sge.sge_len = req_sge->sge_len;
- cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
- cached_sge->sge.sge_addr.hi =
- (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
-
- /* Mstorm context */
- single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
- fw_task_ctx->mstorm_st_context.task_type = 0x2;
- fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
- single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
- single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
- single_sge->sge_len = resp_sge->sge_len;
-
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SINGLE_SGE, 1);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- fw_task_ctx->mstorm_st_context.sgl_size = 1;
- fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
- /* Ustorm context */
- fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
- ntoh24(login_hdr->dlength);
- fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
- fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
- fw_task_ctx->ustorm_st_context.task_type = 0x2;
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
- fw_task_ctx->ustorm_ag_context.exp_data_acked =
- ntoh24(login_hdr->dlength);
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
- SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
- USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ login_req_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+ login_req_pdu_header.cid = qedi_conn->iscsi_conn_id;
+ login_req_pdu_header.cmd_sn = be32_to_cpu(login_hdr->cmdsn);
+ login_req_pdu_header.exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+ login_req_pdu_header.exp_stat_sn = 0;
+
+ /* Fill tx AHS and rx buffer */
+ tx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ tx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ tx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+ tx_sgl_task_params.total_buffer_size = ntoh24(login_hdr->dlength);
+ tx_sgl_task_params.num_sges = 1;
+
+ rx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ rx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.resp_dma_addr);
+ rx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+ rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
+ rx_sgl_task_params.num_sges = 1;
+
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = 0;
+ task_params.tx_io_size = ntoh24(login_hdr->dlength);
+ task_params.rx_io_size = resp_sge->sge_len;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+ rval = init_initiator_login_request_task(&task_params,
+ &login_req_pdu_header,
+ &tx_sgl_task_params,
+ &rx_sgl_task_params);
+ if (rval)
+ return -1;
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1173,7 +1120,6 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
qedi_conn->active_cmd_count++;
spin_unlock(&qedi_conn->list_lock);
- qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
qedi_ring_doorbell(qedi_conn);
return 0;
}
@@ -1181,65 +1127,64 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
struct iscsi_task *task)
{
- struct qedi_ctx *qedi = qedi_conn->qedi;
- struct iscsi_logout_req_hdr *fw_logout_req = NULL;
- struct iscsi_task_context *fw_task_ctx = NULL;
+ struct iscsi_logout_req_hdr logout_pdu_header;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
+ struct iscsi_task_params task_params;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_logout *logout_hdr = NULL;
- struct qedi_cmd *qedi_cmd = NULL;
- s16 tid = 0;
- s16 ptu_invalidate = 0;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct qedi_cmd *qedi_cmd;
+ struct qedi_endpoint *ep;
+ s16 tid = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
qedi_cmd = (struct qedi_cmd *)task->dd_data;
logout_hdr = (struct iscsi_logout *)task->hdr;
+ ep = qedi_conn->ep;
tid = qedi_get_task_idx(qedi);
if (tid == -1)
return -ENOMEM;
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
-
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
qedi_cmd->task_id = tid;
- /* Ystorm context */
- fw_logout_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.logout_req;
- fw_logout_req->opcode = ISCSI_OPCODE_LOGOUT_REQUEST;
- fw_logout_req->reason_code = 0x80 | logout_hdr->flags;
- qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
- fw_logout_req->itt = qedi_set_itt(tid, get_itt(task->itt));
- fw_logout_req->exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
- fw_logout_req->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&logout_pdu_header, 0, sizeof(logout_pdu_header));
+ memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
- fw_logout_req->cid = qedi_conn->iscsi_conn_id;
- fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
-
- /* Mstorm context */
- fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
-
- /* Ustorm context */
- fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
- fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
- fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-
- SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
- USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
-
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ /* Update header info */
+ logout_pdu_header.opcode = logout_hdr->opcode;
+ logout_pdu_header.reason_code = 0x80 | logout_hdr->flags;
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ logout_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+ logout_pdu_header.exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
+ logout_pdu_header.cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+ logout_pdu_header.cid = qedi_conn->iscsi_conn_id;
+
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = 0;
+ task_params.tx_io_size = 0;
+ task_params.rx_io_size = 0;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+
+ rval = init_initiator_logout_request_task(&task_params,
+ &logout_pdu_header,
+ NULL, NULL);
+ if (rval)
+ return -1;
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1247,9 +1192,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
qedi_conn->active_cmd_count++;
spin_unlock(&qedi_conn->list_lock);
- qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
qedi_ring_doorbell(qedi_conn);
-
return 0;
}
@@ -1533,47 +1476,46 @@ ldel_exit:
static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
struct iscsi_task *mtask)
{
- struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_tmf_request_hdr tmf_pdu_header;
+ struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_task_context *fw_task_ctx;
- struct iscsi_tmf_request_hdr *fw_tmf_request;
- struct iscsi_sge *single_sge;
- struct qedi_cmd *qedi_cmd;
- struct qedi_cmd *cmd;
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_task *ctask;
struct iscsi_tm *tmf_hdr;
- struct iscsi_sge *req_sge;
- struct iscsi_sge *resp_sge;
- u32 lun[2];
- s16 tid = 0, ptu_invalidate = 0;
+ struct qedi_cmd *qedi_cmd;
+ struct qedi_cmd *cmd;
+ struct qedi_endpoint *ep;
+ u32 scsi_lun[2];
+ s16 tid = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
- req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
- resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
- qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
+ ep = qedi_conn->ep;
- tid = qedi_cmd->task_id;
- qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
- fw_tmf_request = &fw_task_ctx->ystorm_st_context.pdu_hdr.tmf_request;
- fw_tmf_request->itt = qedi_set_itt(tid, get_itt(mtask->itt));
- fw_tmf_request->cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
+ qedi_cmd->task_id = tid;
- memcpy(lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
- fw_tmf_request->lun.lo = be32_to_cpu(lun[0]);
- fw_tmf_request->lun.hi = be32_to_cpu(lun[1]);
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&tmf_pdu_header, 0, sizeof(tmf_pdu_header));
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
+ /* Update header info */
+ qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
+ tmf_pdu_header.itt = qedi_set_itt(tid, get_itt(mtask->itt));
+ tmf_pdu_header.cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
+
+ memcpy(scsi_lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
+ tmf_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
+ tmf_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
ISCSI_TM_FUNC_ABORT_TASK) {
@@ -1584,53 +1526,34 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
return 0;
}
cmd = (struct qedi_cmd *)ctask->dd_data;
- fw_tmf_request->rtt =
+ tmf_pdu_header.rtt =
qedi_set_itt(cmd->task_id,
get_itt(tmf_hdr->rtt));
} else {
- fw_tmf_request->rtt = ISCSI_RESERVED_TAG;
+ tmf_pdu_header.rtt = ISCSI_RESERVED_TAG;
}
- fw_tmf_request->opcode = tmf_hdr->opcode;
- fw_tmf_request->function = tmf_hdr->flags;
- fw_tmf_request->hdr_second_dword = ntoh24(tmf_hdr->dlength);
- fw_tmf_request->ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
-
- single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
- fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
- single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
- single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
- single_sge->sge_len = resp_sge->sge_len;
-
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SINGLE_SGE, 1);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- fw_task_ctx->mstorm_st_context.sgl_size = 1;
- fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
- /* Ustorm context */
- fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
- fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
- fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-
- SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
- USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
-
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
- fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
- fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+ tmf_pdu_header.opcode = tmf_hdr->opcode;
+ tmf_pdu_header.function = tmf_hdr->flags;
+ tmf_pdu_header.hdr_second_dword = ntoh24(tmf_hdr->dlength);
+ tmf_pdu_header.ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
- "Add TMF to SQ, tmf tid=0x%x, itt=0x%x, cid=0x%x\n",
- tid, mtask->itt, qedi_conn->iscsi_conn_id);
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = 0;
+ task_params.tx_io_size = 0;
+ task_params.rx_io_size = 0;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+ rval = init_initiator_tmf_request_task(&task_params,
+ &tmf_pdu_header);
+ if (rval)
+ return -1;
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1638,7 +1561,6 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
qedi_conn->active_cmd_count++;
spin_unlock(&qedi_conn->list_lock);
- qedi_add_to_sq(qedi_conn, mtask, tid, ptu_invalidate, false);
qedi_ring_doorbell(qedi_conn);
return 0;
}
@@ -1689,101 +1611,98 @@ int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
struct iscsi_task *task)
{
- struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_text_request_hdr text_request_pdu_header;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
+ struct iscsi_task_params task_params;
struct iscsi_task_context *fw_task_ctx;
- struct iscsi_text_request_hdr *fw_text_request;
- struct iscsi_cached_sge_ctx *cached_sge;
- struct iscsi_sge *single_sge;
- struct qedi_cmd *qedi_cmd;
- /* For 6.5 hdr iscsi_hdr */
+ struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_text *text_hdr;
- struct iscsi_sge *req_sge;
- struct iscsi_sge *resp_sge;
- s16 ptu_invalidate = 0;
+ struct scsi_sge *req_sge = NULL;
+ struct scsi_sge *resp_sge = NULL;
+ struct qedi_cmd *qedi_cmd;
+ struct qedi_endpoint *ep;
s16 tid = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
- req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
- resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
qedi_cmd = (struct qedi_cmd *)task->dd_data;
text_hdr = (struct iscsi_text *)task->hdr;
+ ep = qedi_conn->ep;
tid = qedi_get_task_idx(qedi);
if (tid == -1)
return -ENOMEM;
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
- /* Ystorm context */
- fw_text_request =
- &fw_task_ctx->ystorm_st_context.pdu_hdr.text_request;
- fw_text_request->opcode = text_hdr->opcode;
- fw_text_request->flags_attr = text_hdr->flags;
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&text_request_pdu_header, 0, sizeof(text_request_pdu_header));
+ memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+
+ /* Update header info */
+ text_request_pdu_header.opcode = text_hdr->opcode;
+ text_request_pdu_header.flags_attr = text_hdr->flags;
qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
- fw_text_request->itt = qedi_set_itt(tid, get_itt(task->itt));
- fw_text_request->ttt = text_hdr->ttt;
- fw_text_request->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
- fw_text_request->exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
- fw_text_request->hdr_second_dword = ntoh24(text_hdr->dlength);
-
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
-
- cached_sge =
- &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
- cached_sge->sge.sge_len = req_sge->sge_len;
- cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
- cached_sge->sge.sge_addr.hi =
+ text_request_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+ text_request_pdu_header.ttt = text_hdr->ttt;
+ text_request_pdu_header.cmd_sn = be32_to_cpu(text_hdr->cmdsn);
+ text_request_pdu_header.exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
+ text_request_pdu_header.hdr_second_dword = ntoh24(text_hdr->dlength);
+
+ /* Fill tx AHS and rx buffer */
+ tx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ tx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ tx_sgl_task_params.sgl_phys_addr.hi =
(u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+ tx_sgl_task_params.total_buffer_size = req_sge->sge_len;
+ tx_sgl_task_params.num_sges = 1;
+
+ rx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ rx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.resp_dma_addr);
+ rx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+ rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
+ rx_sgl_task_params.num_sges = 1;
+
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = 0;
+ task_params.tx_io_size = ntoh24(text_hdr->dlength);
+ task_params.rx_io_size = resp_sge->sge_len;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+ rval = init_initiator_text_request_task(&task_params,
+ &text_request_pdu_header,
+ &tx_sgl_task_params,
+ &rx_sgl_task_params);
+ if (rval)
+ return -1;
- /* Mstorm context */
- single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
- fw_task_ctx->mstorm_st_context.task_type = 0x2;
- fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
- single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
- single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
- single_sge->sge_len = resp_sge->sge_len;
-
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SINGLE_SGE, 1);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- fw_task_ctx->mstorm_st_context.sgl_size = 1;
- fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
- /* Ustorm context */
- fw_task_ctx->ustorm_ag_context.exp_data_acked =
- ntoh24(text_hdr->dlength);
- fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
- ntoh24(text_hdr->dlength);
- fw_task_ctx->ustorm_st_context.exp_data_sn =
- be32_to_cpu(text_hdr->exp_statsn);
- fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
- fw_task_ctx->ustorm_st_context.task_type = 0x2;
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
-
- /* Add command in active command list */
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
qedi_cmd->io_cmd_in_list = true;
qedi_conn->active_cmd_count++;
spin_unlock(&qedi_conn->list_lock);
- qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
qedi_ring_doorbell(qedi_conn);
-
return 0;
}
@@ -1791,58 +1710,62 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
struct iscsi_task *task,
char *datap, int data_len, int unsol)
{
+ struct iscsi_nop_out_hdr nop_out_pdu_header;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
+ struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_task_context *fw_task_ctx;
- struct iscsi_nop_out_hdr *fw_nop_out;
- struct qedi_cmd *qedi_cmd;
- /* For 6.5 hdr iscsi_hdr */
struct iscsi_nopout *nopout_hdr;
- struct iscsi_cached_sge_ctx *cached_sge;
- struct iscsi_sge *single_sge;
- struct iscsi_sge *req_sge;
- struct iscsi_sge *resp_sge;
- u32 lun[2];
- s16 ptu_invalidate = 0;
+ struct scsi_sge *req_sge = NULL;
+ struct scsi_sge *resp_sge = NULL;
+ struct qedi_cmd *qedi_cmd;
+ struct qedi_endpoint *ep;
+ u32 scsi_lun[2];
s16 tid = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
- req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
- resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
qedi_cmd = (struct qedi_cmd *)task->dd_data;
nopout_hdr = (struct iscsi_nopout *)task->hdr;
+ ep = qedi_conn->ep;
tid = qedi_get_task_idx(qedi);
- if (tid == -1) {
- QEDI_WARN(&qedi->dbg_ctx, "Invalid tid\n");
+ if (tid == -1)
return -ENOMEM;
- }
-
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
qedi_cmd->task_id = tid;
- /* Ystorm context */
- fw_nop_out = &fw_task_ctx->ystorm_st_context.pdu_hdr.nop_out;
- SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
- SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&nop_out_pdu_header, 0, sizeof(nop_out_pdu_header));
+ memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+
+ /* Update header info */
+ nop_out_pdu_header.opcode = nopout_hdr->opcode;
+ SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
+ SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
- memcpy(lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
- fw_nop_out->lun.lo = be32_to_cpu(lun[0]);
- fw_nop_out->lun.hi = be32_to_cpu(lun[1]);
+ memcpy(scsi_lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
+ nop_out_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
+ nop_out_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
+ nop_out_pdu_header.cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
+ nop_out_pdu_header.exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
- fw_nop_out->itt = be32_to_cpu(nopout_hdr->itt);
- fw_nop_out->ttt = be32_to_cpu(nopout_hdr->ttt);
- fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
- fw_task_ctx->ystorm_st_context.state.local_comp = 1;
- SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
- USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
+ nop_out_pdu_header.itt = be32_to_cpu(nopout_hdr->itt);
+ nop_out_pdu_header.ttt = be32_to_cpu(nopout_hdr->ttt);
} else {
- fw_nop_out->itt = qedi_set_itt(tid, get_itt(task->itt));
- fw_nop_out->ttt = ISCSI_TTT_ALL_ONES;
- fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+ nop_out_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+ nop_out_pdu_header.ttt = ISCSI_TTT_ALL_ONES;
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1851,53 +1774,46 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
spin_unlock(&qedi_conn->list_lock);
}
- fw_nop_out->opcode = ISCSI_OPCODE_NOP_OUT;
- fw_nop_out->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
- fw_nop_out->exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
-
- cached_sge =
- &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
- cached_sge->sge.sge_len = req_sge->sge_len;
- cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
- cached_sge->sge.sge_addr.hi =
- (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
-
- /* Mstorm context */
- fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
-
- single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
- single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
- single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
- single_sge->sge_len = resp_sge->sge_len;
- fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
- /* Ustorm context */
- fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len = data_len;
- fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
- fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
-
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
-
- fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
- fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
-
- qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ /* Fill tx AHS and rx buffer */
+ if (data_len) {
+ tx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ tx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ tx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+ tx_sgl_task_params.total_buffer_size = data_len;
+ tx_sgl_task_params.num_sges = 1;
+
+ rx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ rx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.resp_dma_addr);
+ rx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+ rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
+ rx_sgl_task_params.num_sges = 1;
+ }
+
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = 0;
+ task_params.tx_io_size = data_len;
+ task_params.rx_io_size = resp_sge->sge_len;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+ rval = init_initiator_nop_out_task(&task_params,
+ &nop_out_pdu_header,
+ &tx_sgl_task_params,
+ &rx_sgl_task_params);
+ if (rval)
+ return -1;
+
qedi_ring_doorbell(qedi_conn);
return 0;
}
@@ -1905,7 +1821,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
int bd_index)
{
- struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
int frag_size, sg_frags;
sg_frags = 0;
@@ -1938,7 +1854,7 @@ static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
{
struct scsi_cmnd *sc = cmd->scsi_cmd;
- struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
struct scatterlist *sg;
int byte_count = 0;
int bd_count = 0;
@@ -2040,7 +1956,7 @@ static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
if (bd_count == 0)
return;
} else {
- struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
bd[0].sge_addr.lo = 0;
bd[0].sge_addr.hi = 0;
@@ -2136,244 +2052,182 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
struct qedi_conn *qedi_conn = conn->dd_data;
struct qedi_cmd *cmd = task->dd_data;
struct scsi_cmnd *sc = task->sc;
+ struct iscsi_cmd_hdr cmd_pdu_header;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
+ struct scsi_sgl_task_params *prx_sgl = NULL;
+ struct scsi_sgl_task_params *ptx_sgl = NULL;
+ struct iscsi_task_params task_params;
+ struct iscsi_conn_params conn_params;
+ struct scsi_initiator_cmd_params cmd_params;
struct iscsi_task_context *fw_task_ctx;
- struct iscsi_cached_sge_ctx *cached_sge;
- struct iscsi_phys_sgl_ctx *phys_sgl;
- struct iscsi_virt_sgl_ctx *virt_sgl;
- struct ystorm_iscsi_task_st_ctx *yst_cxt;
- struct mstorm_iscsi_task_st_ctx *mst_cxt;
- struct iscsi_sgl *sgl_struct;
- struct iscsi_sge *single_sge;
+ struct iscsi_cls_conn *cls_conn;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
- struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
- enum iscsi_task_type task_type;
- struct iscsi_cmd_hdr *fw_cmd;
- u32 lun[2];
- u32 exp_data;
- u16 cq_idx = smp_processor_id() % qedi->num_queues;
- s16 ptu_invalidate = 0;
+ enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
+ struct qedi_endpoint *ep;
+ u32 scsi_lun[2];
s16 tid = 0;
- u8 num_fast_sgs;
+ u16 sq_idx = 0;
+ u16 cq_idx;
+ int rval = 0;
- tid = qedi_get_task_idx(qedi);
- if (tid == -1)
- return -ENOMEM;
+ ep = qedi_conn->ep;
+ cls_conn = qedi_conn->cls_conn;
+ conn = cls_conn->dd_data;
qedi_iscsi_map_sg_list(cmd);
+ int_to_scsilun(sc->device->lun, (struct scsi_lun *)scsi_lun);
- int_to_scsilun(sc->device->lun, (struct scsi_lun *)lun);
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
- cmd->task_id = tid;
- /* Ystorm context */
- fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd;
- SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE);
+ cmd->task_id = tid;
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&cmd_pdu_header, 0, sizeof(cmd_pdu_header));
+ memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+ memset(&conn_params, 0, sizeof(conn_params));
+ memset(&cmd_params, 0, sizeof(cmd_params));
+
+ cq_idx = smp_processor_id() % qedi->num_queues;
+ /* Update header info */
+ SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR,
+ ISCSI_ATTR_SIMPLE);
if (sc->sc_data_direction == DMA_TO_DEVICE) {
- if (conn->session->initial_r2t_en) {
- exp_data = min((conn->session->imm_data_en *
- conn->max_xmit_dlength),
- conn->session->first_burst);
- exp_data = min(exp_data, scsi_bufflen(sc));
- fw_task_ctx->ustorm_ag_context.exp_data_acked =
- cpu_to_le32(exp_data);
- } else {
- fw_task_ctx->ustorm_ag_context.exp_data_acked =
- min(conn->session->first_burst, scsi_bufflen(sc));
- }
-
- SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1);
+ SET_FIELD(cmd_pdu_header.flags_attr,
+ ISCSI_CMD_HDR_WRITE, 1);
task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
} else {
- if (scsi_bufflen(sc))
- SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1);
+ SET_FIELD(cmd_pdu_header.flags_attr,
+ ISCSI_CMD_HDR_READ, 1);
task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
}
- fw_cmd->lun.lo = be32_to_cpu(lun[0]);
- fw_cmd->lun.hi = be32_to_cpu(lun[1]);
+ cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
+ cmd_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
qedi_update_itt_map(qedi, tid, task->itt, cmd);
- fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt));
- fw_cmd->expected_transfer_length = scsi_bufflen(sc);
- fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
- fw_cmd->opcode = hdr->opcode;
- qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb);
-
- /* Mstorm context */
- fw_task_ctx->mstorm_st_context.sense_db.lo = (u32)cmd->sense_buffer_dma;
- fw_task_ctx->mstorm_st_context.sense_db.hi =
- (u32)((u64)cmd->sense_buffer_dma >> 32);
- fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id;
- fw_task_ctx->mstorm_st_context.task_type = task_type;
-
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
-
- /* Ustorm context */
- fw_task_ctx->ustorm_st_context.rem_rcv_len = scsi_bufflen(sc);
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len = scsi_bufflen(sc);
- fw_task_ctx->ustorm_st_context.exp_data_sn =
- be32_to_cpu(hdr->exp_statsn);
- fw_task_ctx->ustorm_st_context.task_type = task_type;
- fw_task_ctx->ustorm_st_context.cq_rss_number = cq_idx;
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
-
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
- SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
- USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
-
- num_fast_sgs = (cmd->io_tbl.sge_valid ?
- min((u16)QEDI_FAST_SGE_COUNT,
- (u16)cmd->io_tbl.sge_valid) : 0);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, num_fast_sgs);
-
- fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
- fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
-
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Total sge count [%d]\n",
- cmd->io_tbl.sge_valid);
-
- yst_cxt = &fw_task_ctx->ystorm_st_context;
- mst_cxt = &fw_task_ctx->mstorm_st_context;
- /* Tx path */
+ cmd_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+ cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length);
+ cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength);
+ cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn);
+ cmd_pdu_header.opcode = hdr->opcode;
+ qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb);
+
+ /* Fill tx AHS and rx buffer */
if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
- /* not considering superIO or FastIO */
- if (cmd->io_tbl.sge_valid == 1) {
- cached_sge = &yst_cxt->state.sgl_ctx_union.cached_sge;
- cached_sge->sge.sge_addr.lo = bd[0].sge_addr.lo;
- cached_sge->sge.sge_addr.hi = bd[0].sge_addr.hi;
- cached_sge->sge.sge_len = bd[0].sge_len;
- qedi->cached_sgls++;
- } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 1);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
- phys_sgl = &yst_cxt->state.sgl_ctx_union.phys_sgl;
- phys_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
- phys_sgl->sgl_base.hi =
- (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
- phys_sgl->sgl_size = cmd->io_tbl.sge_valid;
- qedi->slow_sgls++;
- } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES,
- min((u16)QEDI_FAST_SGE_COUNT,
- (u16)cmd->io_tbl.sge_valid));
- virt_sgl = &yst_cxt->state.sgl_ctx_union.virt_sgl;
- virt_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
- virt_sgl->sgl_base.hi =
+ tx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
+ tx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(cmd->io_tbl.sge_tbl_dma);
+ tx_sgl_task_params.sgl_phys_addr.hi =
(u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
- virt_sgl->sgl_initial_offset =
- (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
- qedi->fast_sgls++;
- }
- fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
- fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
- } else {
- /* Rx path */
- if (cmd->io_tbl.sge_valid == 1) {
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SINGLE_SGE, 1);
- single_sge = &mst_cxt->sgl_union.single_sge;
- single_sge->sge_addr.lo = bd[0].sge_addr.lo;
- single_sge->sge_addr.hi = bd[0].sge_addr.hi;
- single_sge->sge_len = bd[0].sge_len;
- qedi->cached_sgls++;
- } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
- sgl_struct = &mst_cxt->sgl_union.sgl_struct;
- sgl_struct->sgl_addr.lo =
- (u32)(cmd->io_tbl.sge_tbl_dma);
- sgl_struct->sgl_addr.hi =
- (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 1);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
- sgl_struct->updated_sge_size = 0;
- sgl_struct->updated_sge_offset = 0;
- qedi->slow_sgls++;
- } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
- sgl_struct = &mst_cxt->sgl_union.sgl_struct;
- sgl_struct->sgl_addr.lo =
- (u32)(cmd->io_tbl.sge_tbl_dma);
- sgl_struct->sgl_addr.hi =
- (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
- sgl_struct->byte_offset =
- (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
- sgl_struct->updated_sge_size = 0;
- sgl_struct->updated_sge_offset = 0;
- qedi->fast_sgls++;
- }
- fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
- fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
- }
-
- if (cmd->io_tbl.sge_valid == 1)
- /* Singel-SGL */
- qedi->use_cached_sge = true;
- else {
+ tx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
+ tx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
if (cmd->use_slowpath)
- qedi->use_slow_sge = true;
- else
- qedi->use_fast_sge = true;
- }
+ tx_sgl_task_params.small_mid_sge = true;
+ } else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
+ rx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
+ rx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(cmd->io_tbl.sge_tbl_dma);
+ rx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ rx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
+ rx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
+ }
+
+ /* Add conn param */
+ conn_params.first_burst_length = conn->session->first_burst;
+ conn_params.max_send_pdu_length = conn->max_xmit_dlength;
+ conn_params.max_burst_length = conn->session->max_burst;
+ if (conn->session->initial_r2t_en)
+ conn_params.initial_r2t = true;
+ if (conn->session->imm_data_en)
+ conn_params.immediate_data = true;
+
+ /* Add cmd params */
+ cmd_params.sense_data_buffer_phys_addr.lo = (u32)cmd->sense_buffer_dma;
+ cmd_params.sense_data_buffer_phys_addr.hi =
+ (u32)((u64)cmd->sense_buffer_dma >> 32);
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = cq_idx;
+ if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE)
+ task_params.tx_io_size = scsi_bufflen(sc);
+ else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ)
+ task_params.rx_io_size = scsi_bufflen(sc);
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
- "%s: %s-SGL: num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x",
+ "%s: %s-SGL: sg_len=0x%x num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x\n",
(task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
"Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
"Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
- (u16)cmd->io_tbl.sge_valid, (u32)(cmd->io_tbl.sge_tbl_dma),
+ (u16)cmd->io_tbl.sge_valid, scsi_bufflen(sc),
+ (u32)(cmd->io_tbl.sge_tbl_dma),
(u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
- /* Add command in active command list */
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+
+ if (task_params.tx_io_size != 0)
+ ptx_sgl = &tx_sgl_task_params;
+ if (task_params.rx_io_size != 0)
+ prx_sgl = &rx_sgl_task_params;
+
+ rval = init_initiator_rw_iscsi_task(&task_params, &conn_params,
+ &cmd_params, &cmd_pdu_header,
+ ptx_sgl, prx_sgl,
+ NULL);
+ if (rval)
+ return -1;
+
spin_lock(&qedi_conn->list_lock);
list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
cmd->io_cmd_in_list = true;
qedi_conn->active_cmd_count++;
spin_unlock(&qedi_conn->list_lock);
- qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
qedi_ring_doorbell(qedi_conn);
- if (qedi_io_tracing)
- qedi_trace_io(qedi, task, tid, QEDI_IO_TRACE_REQ);
-
return 0;
}
int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
{
+ struct iscsi_task_params task_params;
+ struct qedi_endpoint *ep;
struct iscsi_conn *conn = task->conn;
struct qedi_conn *qedi_conn = conn->dd_data;
struct qedi_cmd *cmd = task->dd_data;
- s16 ptu_invalidate = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
cmd->task_id, get_itt(task->itt), task->state,
cmd->state, qedi_conn->iscsi_conn_id);
- qedi_add_to_sq(qedi_conn, task, cmd->task_id, ptu_invalidate, true);
- qedi_ring_doorbell(qedi_conn);
+ memset(&task_params, 0, sizeof(task_params));
+ ep = qedi_conn->ep;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+
+ task_params.sqe = &ep->sq[sq_idx];
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+ task_params.itid = cmd->task_id;
+ rval = init_cleanup_task(&task_params);
+ if (rval)
+ return rval;
+
+ qedi_ring_doorbell(qedi_conn);
return 0;
}
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
new file mode 100644
index 000000000000..fd354d4e03eb
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -0,0 +1,781 @@
+/* QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+
+#include "qedi_fw_iscsi.h"
+#include "qedi_fw_scsi.h"
+
+#define SCSI_NUM_SGES_IN_CACHE 0x4
+
+static bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
+{
+ return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
+}
+
+static
+void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
+ struct scsi_cached_sges *ctx_data_desc,
+ struct scsi_sgl_task_params *sgl_task_params)
+{
+ u8 sge_index;
+ u8 num_sges;
+ u32 val;
+
+ num_sges = (sgl_task_params->num_sges > SCSI_NUM_SGES_IN_CACHE) ?
+ SCSI_NUM_SGES_IN_CACHE : sgl_task_params->num_sges;
+
+ /* sgl params */
+ val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
+ ctx_sgl_params->sgl_addr.lo = val;
+ val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
+ ctx_sgl_params->sgl_addr.hi = val;
+ val = cpu_to_le32(sgl_task_params->total_buffer_size);
+ ctx_sgl_params->sgl_total_length = val;
+ ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
+
+ for (sge_index = 0; sge_index < num_sges; sge_index++) {
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
+ ctx_data_desc->sge[sge_index].sge_addr.lo = val;
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
+ ctx_data_desc->sge[sge_index].sge_addr.hi = val;
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
+ ctx_data_desc->sge[sge_index].sge_len = val;
+ }
+}
+
+static u32 calc_rw_task_size(struct iscsi_task_params *task_params,
+ enum iscsi_task_type task_type,
+ struct scsi_sgl_task_params *sgl_task_params,
+ struct scsi_dif_task_params *dif_task_params)
+{
+ u32 io_size;
+
+ if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
+ task_type == ISCSI_TASK_TYPE_TARGET_READ)
+ io_size = task_params->tx_io_size;
+ else
+ io_size = task_params->rx_io_size;
+
+ if (!io_size)
+ return 0;
+
+ if (!dif_task_params)
+ return io_size;
+
+ return !dif_task_params->dif_on_network ?
+ io_size : sgl_task_params->total_buffer_size;
+}
+
+static void
+init_dif_context_flags(struct iscsi_dif_flags *ctx_dif_flags,
+ struct scsi_dif_task_params *dif_task_params)
+{
+ if (!dif_task_params)
+ return;
+
+ SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG,
+ dif_task_params->dif_block_size_log);
+ SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_DIF_TO_PEER,
+ dif_task_params->dif_on_network ? 1 : 0);
+ SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_HOST_INTERFACE,
+ dif_task_params->dif_on_host ? 1 : 0);
+}
+
+static void init_sqe(struct iscsi_task_params *task_params,
+ struct scsi_sgl_task_params *sgl_task_params,
+ struct scsi_dif_task_params *dif_task_params,
+ struct iscsi_common_hdr *pdu_header,
+ struct scsi_initiator_cmd_params *cmd_params,
+ enum iscsi_task_type task_type,
+ bool is_cleanup)
+{
+ if (!task_params->sqe)
+ return;
+
+ memset(task_params->sqe, 0, sizeof(*task_params->sqe));
+ task_params->sqe->task_id = cpu_to_le16(task_params->itid);
+ if (is_cleanup) {
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_TASK_CLEANUP);
+ return;
+ }
+
+ switch (task_type) {
+ case ISCSI_TASK_TYPE_INITIATOR_WRITE:
+ {
+ u32 buf_size = 0;
+ u32 num_sges = 0;
+
+ init_dif_context_flags(&task_params->sqe->prot_flags,
+ dif_task_params);
+
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_NORMAL);
+
+ if (task_params->tx_io_size) {
+ buf_size = calc_rw_task_size(task_params, task_type,
+ sgl_task_params,
+ dif_task_params);
+
+ if (scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge))
+ num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
+ else
+ num_sges = min(sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
+ }
+
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, num_sges);
+ SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
+ buf_size);
+
+ if (GET_FIELD(pdu_header->hdr_second_dword,
+ ISCSI_CMD_HDR_TOTAL_AHS_LEN))
+ SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CDB_SIZE,
+ cmd_params->extended_cdb_sge.sge_len);
+ }
+ break;
+ case ISCSI_TASK_TYPE_INITIATOR_READ:
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_NORMAL);
+
+ if (GET_FIELD(pdu_header->hdr_second_dword,
+ ISCSI_CMD_HDR_TOTAL_AHS_LEN))
+ SET_FIELD(task_params->sqe->contlen_cdbsize,
+ ISCSI_WQE_CDB_SIZE,
+ cmd_params->extended_cdb_sge.sge_len);
+ break;
+ case ISCSI_TASK_TYPE_LOGIN_RESPONSE:
+ case ISCSI_TASK_TYPE_MIDPATH:
+ {
+ bool advance_statsn = true;
+
+ if (task_type == ISCSI_TASK_TYPE_LOGIN_RESPONSE)
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_LOGIN);
+ else
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_MIDDLE_PATH);
+
+ if (task_type == ISCSI_TASK_TYPE_MIDPATH) {
+ u8 opcode = GET_FIELD(pdu_header->hdr_first_byte,
+ ISCSI_COMMON_HDR_OPCODE);
+
+ if (opcode != ISCSI_OPCODE_TEXT_RESPONSE &&
+ (opcode != ISCSI_OPCODE_NOP_IN ||
+ pdu_header->itt == ISCSI_TTT_ALL_ONES))
+ advance_statsn = false;
+ }
+
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE,
+ advance_statsn ? 1 : 0);
+
+ if (task_params->tx_io_size) {
+ SET_FIELD(task_params->sqe->contlen_cdbsize,
+ ISCSI_WQE_CONT_LEN, task_params->tx_io_size);
+
+ if (scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge))
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
+ ISCSI_WQE_NUM_SGES_SLOWIO);
+ else
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
+ min(sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR));
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void init_default_iscsi_task(struct iscsi_task_params *task_params,
+ struct data_hdr *pdu_header,
+ enum iscsi_task_type task_type)
+{
+ struct iscsi_task_context *context;
+ u16 index;
+ u32 val;
+
+ context = task_params->context;
+ memset(context, 0, sizeof(*context));
+
+ for (index = 0; index <
+ ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
+ index++) {
+ val = cpu_to_le32(pdu_header->data[index]);
+ context->ystorm_st_context.pdu_hdr.data.data[index] = val;
+ }
+
+ context->mstorm_st_context.task_type = task_type;
+ context->mstorm_ag_context.task_cid =
+ cpu_to_le16(task_params->conn_icid);
+
+ SET_FIELD(context->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+ context->ustorm_st_context.task_type = task_type;
+ context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
+ context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid);
+}
+
+static
+void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
+ struct scsi_initiator_cmd_params *cmd)
+{
+ union iscsi_task_hdr *ctx_pdu_hdr = &ystc->pdu_hdr;
+ u32 val;
+
+ if (!cmd->extended_cdb_sge.sge_len)
+ return;
+
+ SET_FIELD(ctx_pdu_hdr->ext_cdb_cmd.hdr_second_dword,
+ ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE,
+ cmd->extended_cdb_sge.sge_len);
+ val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.lo);
+ ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.lo = val;
+ val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.hi);
+ ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.hi = val;
+ val = cpu_to_le32(cmd->extended_cdb_sge.sge_len);
+ ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_len = val;
+}
+
+static
+void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
+ struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
+ u32 remaining_recv_len,
+ u32 expected_data_transfer_len,
+ u8 num_sges, bool tx_dif_conn_err_en)
+{
+ u32 val;
+
+ ustorm_st_cxt->rem_rcv_len = cpu_to_le32(remaining_recv_len);
+ ustorm_ag_cxt->exp_data_acked = cpu_to_le32(expected_data_transfer_len);
+ val = cpu_to_le32(expected_data_transfer_len);
+ ustorm_st_cxt->exp_data_transfer_len = val;
+ SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
+ SET_FIELD(ustorm_ag_cxt->flags2,
+ USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
+ tx_dif_conn_err_en ? 1 : 0);
+}
+
+static
+void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
+ struct iscsi_conn_params *conn_params,
+ enum iscsi_task_type task_type,
+ u32 task_size,
+ u32 exp_data_transfer_len,
+ u8 total_ahs_length)
+{
+ u32 max_unsolicited_data = 0, val;
+
+ if (total_ahs_length &&
+ (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
+ task_type == ISCSI_TASK_TYPE_INITIATOR_READ))
+ SET_FIELD(context->ustorm_st_context.flags2,
+ USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST, 1);
+
+ switch (task_type) {
+ case ISCSI_TASK_TYPE_INITIATOR_WRITE:
+ if (!conn_params->initial_r2t)
+ max_unsolicited_data = conn_params->first_burst_length;
+ else if (conn_params->immediate_data)
+ max_unsolicited_data =
+ min(conn_params->first_burst_length,
+ conn_params->max_send_pdu_length);
+
+ context->ustorm_ag_context.exp_data_acked =
+ cpu_to_le32(total_ahs_length == 0 ?
+ min(exp_data_transfer_len,
+ max_unsolicited_data) :
+ ((u32)(total_ahs_length +
+ ISCSI_AHS_CNTL_SIZE)));
+ break;
+ case ISCSI_TASK_TYPE_TARGET_READ:
+ val = cpu_to_le32(exp_data_transfer_len);
+ context->ustorm_ag_context.exp_data_acked = val;
+ break;
+ case ISCSI_TASK_TYPE_INITIATOR_READ:
+ context->ustorm_ag_context.exp_data_acked =
+ cpu_to_le32((total_ahs_length == 0 ? 0 :
+ total_ahs_length +
+ ISCSI_AHS_CNTL_SIZE));
+ break;
+ case ISCSI_TASK_TYPE_TARGET_WRITE:
+ val = cpu_to_le32(task_size);
+ context->ustorm_ag_context.exp_cont_len = val;
+ break;
+ default:
+ break;
+ }
+}
+
+static
+void init_rtdif_task_context(struct rdif_task_context *rdif_context,
+ struct tdif_task_context *tdif_context,
+ struct scsi_dif_task_params *dif_task_params,
+ enum iscsi_task_type task_type)
+{
+ u32 val;
+
+ if (!dif_task_params->dif_on_network || !dif_task_params->dif_on_host)
+ return;
+
+ if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE ||
+ task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
+ rdif_context->app_tag_value =
+ cpu_to_le16(dif_task_params->application_tag);
+ rdif_context->partial_crc_value = cpu_to_le16(0xffff);
+ val = cpu_to_le32(dif_task_params->initial_ref_tag);
+ rdif_context->initial_ref_tag = val;
+ rdif_context->app_tag_mask =
+ cpu_to_le16(dif_task_params->application_tag_mask);
+ SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
+ dif_task_params->crc_seed ? 1 : 0);
+ SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+ dif_task_params->host_guard_type);
+ SET_FIELD(rdif_context->flags0,
+ RDIF_TASK_CONTEXT_PROTECTIONTYPE,
+ dif_task_params->protection_type);
+ SET_FIELD(rdif_context->flags0,
+ RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1);
+ SET_FIELD(rdif_context->flags0,
+ RDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+ dif_task_params->keep_ref_tag_const ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+ (dif_task_params->validate_app_tag &&
+ dif_task_params->dif_on_network) ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_VALIDATEGUARD,
+ (dif_task_params->validate_guard &&
+ dif_task_params->dif_on_network) ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_VALIDATEREFTAG,
+ (dif_task_params->validate_ref_tag &&
+ dif_task_params->dif_on_network) ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_HOSTINTERFACE,
+ dif_task_params->dif_on_host ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_NETWORKINTERFACE,
+ dif_task_params->dif_on_network ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_FORWARDGUARD,
+ dif_task_params->forward_guard ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_FORWARDAPPTAG,
+ dif_task_params->forward_app_tag ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_FORWARDREFTAG,
+ dif_task_params->forward_ref_tag ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+ dif_task_params->forward_app_tag_with_mask ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+ dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_INTERVALSIZE,
+ dif_task_params->dif_block_size_log - 9);
+ SET_FIELD(rdif_context->state,
+ RDIF_TASK_CONTEXT_REFTAGMASK,
+ dif_task_params->ref_tag_mask);
+ SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG,
+ dif_task_params->ignore_app_tag);
+ }
+
+ if (task_type == ISCSI_TASK_TYPE_TARGET_READ ||
+ task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
+ tdif_context->app_tag_value =
+ cpu_to_le16(dif_task_params->application_tag);
+ tdif_context->partial_crc_valueB =
+ cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
+ tdif_context->partial_crc_value_a =
+ cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
+ SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_CRC_SEED,
+ dif_task_params->crc_seed ? 1 : 0);
+
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_SETERRORWITHEOP,
+ dif_task_params->tx_dif_conn_err_en ? 1 : 0);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD,
+ dif_task_params->forward_guard ? 1 : 0);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG,
+ dif_task_params->forward_app_tag ? 1 : 0);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG,
+ dif_task_params->forward_ref_tag ? 1 : 0);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE,
+ dif_task_params->dif_block_size_log - 9);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE,
+ dif_task_params->dif_on_host ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_NETWORKINTERFACE,
+ dif_task_params->dif_on_network ? 1 : 0);
+ val = cpu_to_le32(dif_task_params->initial_ref_tag);
+ tdif_context->initial_ref_tag = val;
+ tdif_context->app_tag_mask =
+ cpu_to_le16(dif_task_params->application_tag_mask);
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+ dif_task_params->host_guard_type);
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_PROTECTIONTYPE,
+ dif_task_params->protection_type);
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_INITIALREFTAGVALID,
+ dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+ dif_task_params->keep_ref_tag_const ? 1 : 0);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD,
+ (dif_task_params->validate_guard &&
+ dif_task_params->dif_on_host) ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+ (dif_task_params->validate_app_tag &&
+ dif_task_params->dif_on_host) ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_VALIDATEREFTAG,
+ (dif_task_params->validate_ref_tag &&
+ dif_task_params->dif_on_host) ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+ dif_task_params->forward_app_tag_with_mask ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+ dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_REFTAGMASK,
+ dif_task_params->ref_tag_mask);
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_IGNOREAPPTAG,
+ dif_task_params->ignore_app_tag ? 1 : 0);
+ }
+}
+
+static void set_local_completion_context(struct iscsi_task_context *context)
+{
+ SET_FIELD(context->ystorm_st_context.state.flags,
+ YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
+ SET_FIELD(context->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
+}
+
+static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
+ enum iscsi_task_type task_type,
+ struct iscsi_conn_params *conn_params,
+ struct iscsi_common_hdr *pdu_header,
+ struct scsi_sgl_task_params *sgl_task_params,
+ struct scsi_initiator_cmd_params *cmd_params,
+ struct scsi_dif_task_params *dif_task_params)
+{
+ u32 exp_data_transfer_len = conn_params->max_burst_length;
+ struct iscsi_task_context *cxt;
+ bool slow_io = false;
+ u32 task_size, val;
+ u8 num_sges = 0;
+
+ task_size = calc_rw_task_size(task_params, task_type, sgl_task_params,
+ dif_task_params);
+
+ init_default_iscsi_task(task_params, (struct data_hdr *)pdu_header,
+ task_type);
+
+ cxt = task_params->context;
+
+ val = cpu_to_le32(task_size);
+ cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val;
+ init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
+ cmd_params);
+ val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
+ cxt->mstorm_st_context.sense_db.lo = val;
+
+ val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
+ cxt->mstorm_st_context.sense_db.hi = val;
+
+ if (task_params->tx_io_size) {
+ init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
+ dif_task_params);
+ init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+ &cxt->ystorm_st_context.state.data_desc,
+ sgl_task_params);
+
+ slow_io = scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge);
+
+ num_sges = !slow_io ? min_t(u16, sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
+ ISCSI_WQE_NUM_SGES_SLOWIO;
+
+ if (slow_io) {
+ SET_FIELD(cxt->ystorm_st_context.state.flags,
+ YSTORM_ISCSI_TASK_STATE_SLOW_IO, 1);
+ }
+ } else if (task_params->rx_io_size) {
+ init_dif_context_flags(&cxt->mstorm_st_context.dif_flags,
+ dif_task_params);
+ init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+ &cxt->mstorm_st_context.data_desc,
+ sgl_task_params);
+ num_sges = !scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge) ?
+ min_t(u16, sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
+ ISCSI_WQE_NUM_SGES_SLOWIO;
+ cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_size);
+ }
+
+ if (exp_data_transfer_len > task_size ||
+ task_type != ISCSI_TASK_TYPE_TARGET_WRITE)
+ exp_data_transfer_len = task_size;
+
+ init_ustorm_task_contexts(&task_params->context->ustorm_st_context,
+ &task_params->context->ustorm_ag_context,
+ task_size, exp_data_transfer_len, num_sges,
+ dif_task_params ?
+ dif_task_params->tx_dif_conn_err_en : false);
+
+ set_rw_exp_data_acked_and_cont_len(task_params->context, conn_params,
+ task_type, task_size,
+ exp_data_transfer_len,
+ GET_FIELD(pdu_header->hdr_second_dword,
+ ISCSI_CMD_HDR_TOTAL_AHS_LEN));
+
+ if (dif_task_params)
+ init_rtdif_task_context(&task_params->context->rdif_context,
+ &task_params->context->tdif_context,
+ dif_task_params, task_type);
+
+ init_sqe(task_params, sgl_task_params, dif_task_params, pdu_header,
+ cmd_params, task_type, false);
+
+ return 0;
+}
+
+int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
+ struct iscsi_conn_params *conn_params,
+ struct scsi_initiator_cmd_params *cmd_params,
+ struct iscsi_cmd_hdr *cmd_header,
+ struct scsi_sgl_task_params *tx_sgl_params,
+ struct scsi_sgl_task_params *rx_sgl_params,
+ struct scsi_dif_task_params *dif_task_params)
+{
+ if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_WRITE))
+ return init_rw_iscsi_task(task_params,
+ ISCSI_TASK_TYPE_INITIATOR_WRITE,
+ conn_params,
+ (struct iscsi_common_hdr *)cmd_header,
+ tx_sgl_params, cmd_params,
+ dif_task_params);
+ else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ))
+ return init_rw_iscsi_task(task_params,
+ ISCSI_TASK_TYPE_INITIATOR_READ,
+ conn_params,
+ (struct iscsi_common_hdr *)cmd_header,
+ rx_sgl_params, cmd_params,
+ dif_task_params);
+ else
+ return -1;
+}
+
+int init_initiator_login_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_login_req_hdr *login_header,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params)
+{
+ struct iscsi_task_context *cxt;
+
+ cxt = task_params->context;
+
+ init_default_iscsi_task(task_params,
+ (struct data_hdr *)login_header,
+ ISCSI_TASK_TYPE_MIDPATH);
+
+ init_ustorm_task_contexts(&cxt->ustorm_st_context,
+ &cxt->ustorm_ag_context,
+ task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0,
+ task_params->tx_io_size ?
+ tx_params->total_buffer_size : 0, 0,
+ 0);
+
+ if (task_params->tx_io_size)
+ init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+ &cxt->ystorm_st_context.state.data_desc,
+ tx_params);
+
+ if (task_params->rx_io_size)
+ init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+ &cxt->mstorm_st_context.data_desc,
+ rx_params);
+
+ cxt->mstorm_st_context.rem_task_size =
+ cpu_to_le32(task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0);
+
+ init_sqe(task_params, tx_params, NULL,
+ (struct iscsi_common_hdr *)login_header, NULL,
+ ISCSI_TASK_TYPE_MIDPATH, false);
+
+ return 0;
+}
+
+int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
+ struct iscsi_nop_out_hdr *nop_out_pdu_header,
+ struct scsi_sgl_task_params *tx_sgl_task_params,
+ struct scsi_sgl_task_params *rx_sgl_task_params)
+{
+ struct iscsi_task_context *cxt;
+
+ cxt = task_params->context;
+
+ init_default_iscsi_task(task_params,
+ (struct data_hdr *)nop_out_pdu_header,
+ ISCSI_TASK_TYPE_MIDPATH);
+
+ if (nop_out_pdu_header->itt == ISCSI_ITT_ALL_ONES)
+ set_local_completion_context(task_params->context);
+
+ if (task_params->tx_io_size)
+ init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+ &cxt->ystorm_st_context.state.data_desc,
+ tx_sgl_task_params);
+
+ if (task_params->rx_io_size)
+ init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+ &cxt->mstorm_st_context.data_desc,
+ rx_sgl_task_params);
+
+ init_ustorm_task_contexts(&cxt->ustorm_st_context,
+ &cxt->ustorm_ag_context,
+ task_params->rx_io_size ?
+ rx_sgl_task_params->total_buffer_size : 0,
+ task_params->tx_io_size ?
+ tx_sgl_task_params->total_buffer_size : 0,
+ 0, 0);
+
+ cxt->mstorm_st_context.rem_task_size =
+ cpu_to_le32(task_params->rx_io_size ?
+ rx_sgl_task_params->total_buffer_size :
+ 0);
+
+ init_sqe(task_params, tx_sgl_task_params, NULL,
+ (struct iscsi_common_hdr *)nop_out_pdu_header, NULL,
+ ISCSI_TASK_TYPE_MIDPATH, false);
+
+ return 0;
+}
+
+int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_logout_req_hdr *logout_hdr,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params)
+{
+ struct iscsi_task_context *cxt;
+
+ cxt = task_params->context;
+
+ init_default_iscsi_task(task_params,
+ (struct data_hdr *)logout_hdr,
+ ISCSI_TASK_TYPE_MIDPATH);
+
+ if (task_params->tx_io_size)
+ init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+ &cxt->ystorm_st_context.state.data_desc,
+ tx_params);
+
+ if (task_params->rx_io_size)
+ init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+ &cxt->mstorm_st_context.data_desc,
+ rx_params);
+
+ init_ustorm_task_contexts(&cxt->ustorm_st_context,
+ &cxt->ustorm_ag_context,
+ task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0,
+ task_params->tx_io_size ?
+ tx_params->total_buffer_size : 0,
+ 0, 0);
+
+ cxt->mstorm_st_context.rem_task_size =
+ cpu_to_le32(task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0);
+
+ init_sqe(task_params, tx_params, NULL,
+ (struct iscsi_common_hdr *)logout_hdr, NULL,
+ ISCSI_TASK_TYPE_MIDPATH, false);
+
+ return 0;
+}
+
+int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_tmf_request_hdr *tmf_header)
+{
+ init_default_iscsi_task(task_params, (struct data_hdr *)tmf_header,
+ ISCSI_TASK_TYPE_MIDPATH);
+
+ init_sqe(task_params, NULL, NULL,
+ (struct iscsi_common_hdr *)tmf_header, NULL,
+ ISCSI_TASK_TYPE_MIDPATH, false);
+
+ return 0;
+}
+
+int init_initiator_text_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_text_request_hdr *text_header,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params)
+{
+ struct iscsi_task_context *cxt;
+
+ cxt = task_params->context;
+
+ init_default_iscsi_task(task_params,
+ (struct data_hdr *)text_header,
+ ISCSI_TASK_TYPE_MIDPATH);
+
+ if (task_params->tx_io_size)
+ init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+ &cxt->ystorm_st_context.state.data_desc,
+ tx_params);
+
+ if (task_params->rx_io_size)
+ init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+ &cxt->mstorm_st_context.data_desc,
+ rx_params);
+
+ cxt->mstorm_st_context.rem_task_size =
+ cpu_to_le32(task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0);
+
+ init_ustorm_task_contexts(&cxt->ustorm_st_context,
+ &cxt->ustorm_ag_context,
+ task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0,
+ task_params->tx_io_size ?
+ tx_params->total_buffer_size : 0, 0, 0);
+
+ init_sqe(task_params, tx_params, NULL,
+ (struct iscsi_common_hdr *)text_header, NULL,
+ ISCSI_TASK_TYPE_MIDPATH, false);
+
+ return 0;
+}
+
+int init_cleanup_task(struct iscsi_task_params *task_params)
+{
+ init_sqe(task_params, NULL, NULL, NULL, NULL, ISCSI_TASK_TYPE_MIDPATH,
+ true);
+ return 0;
+}
diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h
new file mode 100644
index 000000000000..b6f24f91849d
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw_iscsi.h
@@ -0,0 +1,117 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_FW_ISCSI_H_
+#define _QEDI_FW_ISCSI_H_
+
+#include "qedi_fw_scsi.h"
+
+struct iscsi_task_params {
+ struct iscsi_task_context *context;
+ struct iscsi_wqe *sqe;
+ u32 tx_io_size;
+ u32 rx_io_size;
+ u16 conn_icid;
+ u16 itid;
+ u8 cq_rss_number;
+};
+
+struct iscsi_conn_params {
+ u32 first_burst_length;
+ u32 max_send_pdu_length;
+ u32 max_burst_length;
+ bool initial_r2t;
+ bool immediate_data;
+};
+
+/* @brief init_initiator_read_iscsi_task - initializes iSCSI Initiator Read
+ * task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param conn_params - Connection Parameters
+ * @param cmd_params - command specific parameters
+ * @param cmd_pdu_header - PDU Header Parameters
+ * @param sgl_task_params - Pointer to SGL task params
+ * @param dif_task_params - Pointer to DIF parameters struct
+ */
+int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
+ struct iscsi_conn_params *conn_params,
+ struct scsi_initiator_cmd_params *cmd_params,
+ struct iscsi_cmd_hdr *cmd_pdu_header,
+ struct scsi_sgl_task_params *tx_sgl_params,
+ struct scsi_sgl_task_params *rx_sgl_params,
+ struct scsi_dif_task_params *dif_task_params);
+
+/* @brief init_initiator_login_request_task - initializes iSCSI Initiator Login
+ * Request task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param login_req_pdu_header - PDU Header Parameters
+ * @param tx_sgl_task_params - Pointer to SGL task params
+ * @param rx_sgl_task_params - Pointer to SGL task params
+ */
+int init_initiator_login_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_login_req_hdr *login_header,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_initiator_nop_out_task - initializes iSCSI Initiator NOP Out
+ * task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param nop_out_pdu_header - PDU Header Parameters
+ * @param tx_sgl_task_params - Pointer to SGL task params
+ * @param rx_sgl_task_params - Pointer to SGL task params
+ */
+int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
+ struct iscsi_nop_out_hdr *nop_out_pdu_header,
+ struct scsi_sgl_task_params *tx_sgl_params,
+ struct scsi_sgl_task_params *rx_sgl_params);
+
+/* @brief init_initiator_logout_request_task - initializes iSCSI Initiator
+ * Logout Request task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param logout_pdu_header - PDU Header Parameters
+ * @param tx_sgl_task_params - Pointer to SGL task params
+ * @param rx_sgl_task_params - Pointer to SGL task params
+ */
+int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_logout_req_hdr *logout_hdr,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_initiator_tmf_request_task - initializes iSCSI Initiator TMF
+ * task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param tmf_pdu_header - PDU Header Parameters
+ */
+int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_tmf_request_hdr *tmf_header);
+
+/* @brief init_initiator_text_request_task - initializes iSCSI Initiator Text
+ * Request task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param text_request_pdu_header - PDU Header Parameters
+ * @param tx_sgl_task_params - Pointer to Tx SGL task params
+ * @param rx_sgl_task_params - Pointer to Rx SGL task params
+ */
+int init_initiator_text_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_text_request_hdr *text_header,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_cleanup_task - initializes Clean task (SQE)
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_cleanup_task(struct iscsi_task_params *task_params);
+#endif
diff --git a/drivers/scsi/qedi/qedi_fw_scsi.h b/drivers/scsi/qedi/qedi_fw_scsi.h
new file mode 100644
index 000000000000..cdaf918f1019
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw_scsi.h
@@ -0,0 +1,55 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_FW_SCSI_H_
+#define _QEDI_FW_SCSI_H_
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+
+struct scsi_sgl_task_params {
+ struct scsi_sge *sgl;
+ struct regpair sgl_phys_addr;
+ u32 total_buffer_size;
+ u16 num_sges;
+ bool small_mid_sge;
+};
+
+struct scsi_dif_task_params {
+ u32 initial_ref_tag;
+ bool initial_ref_tag_is_valid;
+ u16 application_tag;
+ u16 application_tag_mask;
+ u16 dif_block_size_log;
+ bool dif_on_network;
+ bool dif_on_host;
+ u8 host_guard_type;
+ u8 protection_type;
+ u8 ref_tag_mask;
+ bool crc_seed;
+ bool tx_dif_conn_err_en;
+ bool ignore_app_tag;
+ bool keep_ref_tag_const;
+ bool validate_guard;
+ bool validate_app_tag;
+ bool validate_ref_tag;
+ bool forward_guard;
+ bool forward_app_tag;
+ bool forward_ref_tag;
+ bool forward_app_tag_with_mask;
+ bool forward_ref_tag_with_mask;
+};
+
+struct scsi_initiator_cmd_params {
+ struct scsi_sge extended_cdb_sge;
+ struct regpair sense_data_buffer_phys_addr;
+};
+#endif
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index b9f79d36142d..d5eff68507e5 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -175,7 +175,7 @@ static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi,
if (cmd->io_tbl.sge_tbl)
dma_free_coherent(&qedi->pdev->dev,
QEDI_ISCSI_MAX_BDS_PER_CMD *
- sizeof(struct iscsi_sge),
+ sizeof(struct scsi_sge),
cmd->io_tbl.sge_tbl,
cmd->io_tbl.sge_tbl_dma);
@@ -191,7 +191,7 @@ static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
struct qedi_cmd *cmd)
{
struct qedi_io_bdt *io = &cmd->io_tbl;
- struct iscsi_sge *sge;
+ struct scsi_sge *sge;
io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev,
QEDI_ISCSI_MAX_BDS_PER_CMD *
@@ -708,22 +708,20 @@ static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn)
{
- struct iscsi_sge *bd_tbl;
+ struct scsi_sge *bd_tbl;
- bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
bd_tbl->sge_addr.hi =
(u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr;
bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr -
qedi_conn->gen_pdu.req_buf;
- bd_tbl->reserved0 = 0;
- bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
bd_tbl->sge_addr.hi =
(u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr;
bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN;
- bd_tbl->reserved0 = 0;
}
static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index d3c06bbddb4e..3247287cb0e7 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -102,7 +102,7 @@ struct qedi_endpoint {
#define QEDI_SQ_WQES_MIN 16
struct qedi_io_bdt {
- struct iscsi_sge *sge_tbl;
+ struct scsi_sge *sge_tbl;
dma_addr_t sge_tbl_dma;
u16 sge_valid;
};
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
index 9543a1b139d4..d61e3ac22e67 100644
--- a/drivers/scsi/qedi/qedi_version.h
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -7,8 +7,8 @@
* this source tree.
*/
-#define QEDI_MODULE_VERSION "8.10.3.0"
+#define QEDI_MODULE_VERSION "8.10.4.0"
#define QEDI_DRIVER_MAJOR_VER 8
#define QEDI_DRIVER_MINOR_VER 10
-#define QEDI_DRIVER_REV_VER 3
+#define QEDI_DRIVER_REV_VER 4
#define QEDI_DRIVER_ENG_VER 0