summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_scsi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_scsi.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1235
1 files changed, 1176 insertions, 59 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index bd1867411821..b103b6ed4970 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -18,13 +18,14 @@
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*******************************************************************/
-
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>
@@ -43,6 +44,73 @@
#define LPFC_RESET_WAIT 2
#define LPFC_ABORT_WAIT 2
+int _dump_buf_done;
+
+static char *dif_op_str[] = {
+ "SCSI_PROT_NORMAL",
+ "SCSI_PROT_READ_INSERT",
+ "SCSI_PROT_WRITE_STRIP",
+ "SCSI_PROT_READ_STRIP",
+ "SCSI_PROT_WRITE_INSERT",
+ "SCSI_PROT_READ_PASS",
+ "SCSI_PROT_WRITE_PASS",
+ "SCSI_PROT_READ_CONVERT",
+ "SCSI_PROT_WRITE_CONVERT"
+};
+
+static void
+lpfc_debug_save_data(struct scsi_cmnd *cmnd)
+{
+ void *src, *dst;
+ struct scatterlist *sgde = scsi_sglist(cmnd);
+
+ if (!_dump_buf_data) {
+ printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
+ __func__);
+ return;
+ }
+
+
+ if (!sgde) {
+ printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n");
+ return;
+ }
+
+ dst = (void *) _dump_buf_data;
+ while (sgde) {
+ src = sg_virt(sgde);
+ memcpy(dst, src, sgde->length);
+ dst += sgde->length;
+ sgde = sg_next(sgde);
+ }
+}
+
+static void
+lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
+{
+ void *src, *dst;
+ struct scatterlist *sgde = scsi_prot_sglist(cmnd);
+
+ if (!_dump_buf_dif) {
+ printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
+ __func__);
+ return;
+ }
+
+ if (!sgde) {
+ printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n");
+ return;
+ }
+
+ dst = _dump_buf_dif;
+ while (sgde) {
+ src = sg_virt(sgde);
+ memcpy(dst, src, sgde->length);
+ dst += sgde->length;
+ sgde = sg_next(sgde);
+ }
+}
+
/**
* lpfc_update_stats: Update statistical data for the command completion.
* @phba: Pointer to HBA object.
@@ -66,6 +134,8 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
if (cmd->result)
return;
+ latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
+
spin_lock_irqsave(shost->host_lock, flags);
if (!vport->stat_data_enabled ||
vport->stat_data_blocked ||
@@ -74,13 +144,15 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
spin_unlock_irqrestore(shost->host_lock, flags);
return;
}
- latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time);
if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
phba->bucket_step;
- if (i >= LPFC_MAX_BUCKET_COUNT)
- i = LPFC_MAX_BUCKET_COUNT;
+ /* check array subscript bounds */
+ if (i < 0)
+ i = 0;
+ else if (i >= LPFC_MAX_BUCKET_COUNT)
+ i = LPFC_MAX_BUCKET_COUNT - 1;
} else {
for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
if (latency <= (phba->bucket_base +
@@ -92,7 +164,6 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
spin_unlock_irqrestore(shost->host_lock, flags);
}
-
/**
* lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
* event.
@@ -148,12 +219,19 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
return;
}
-/*
- * This function is called with no lock held when there is a resource
- * error in driver or in firmware.
- */
+/**
+ * lpfc_rampdown_queue_depth: Post RAMP_DOWN_QUEUE event to worker thread.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine is called when there is resource error in driver or firmware.
+ * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
+ * posts at most 1 event each second. This routine wakes up worker thread of
+ * @phba to process WORKER_RAM_DOWN_EVENT event.
+ *
+ * This routine should be called with no lock held.
+ **/
void
-lpfc_adjust_queue_depth(struct lpfc_hba *phba)
+lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
{
unsigned long flags;
uint32_t evt_posted;
@@ -182,10 +260,17 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba)
return;
}
-/*
- * This function is called with no lock held when there is a successful
- * SCSI command completion.
- */
+/**
+ * lpfc_rampup_queue_depth: Post RAMP_UP_QUEUE event for worker thread.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
+ * post at most 1 event every 5 minute after last_ramp_up_time or
+ * last_rsrc_error_time. This routine wakes up worker thread of @phba
+ * to process WORKER_RAM_DOWN_EVENT event.
+ *
+ * This routine should be called with no lock held.
+ **/
static inline void
lpfc_rampup_queue_depth(struct lpfc_vport *vport,
struct scsi_device *sdev)
@@ -217,6 +302,14 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
return;
}
+/**
+ * lpfc_ramp_down_queue_handler: WORKER_RAMP_DOWN_QUEUE event handler.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
+ * thread.This routine reduces queue depth for all scsi device on each vport
+ * associated with @phba.
+ **/
void
lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
{
@@ -267,6 +360,15 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
atomic_set(&phba->num_cmd_success, 0);
}
+/**
+ * lpfc_ramp_up_queue_handler: WORKER_RAMP_UP_QUEUE event handler.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
+ * thread.This routine increases queue depth for all scsi device on each vport
+ * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
+ * num_cmd_success to zero.
+ **/
void
lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
{
@@ -336,14 +438,21 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
lpfc_destroy_vport_work_array(phba, vports);
}
-/*
+/**
+ * lpfc_new_scsi_buf: Scsi buffer allocator.
+ * @vport: The virtual port for which this call being executed.
+ *
* This routine allocates a scsi buffer, which contains all the necessary
* information needed to initiate a SCSI I/O. The non-DMAable buffer region
* contains information to build the IOCB. The DMAable region contains
- * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
- * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
+ * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to
+ * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
* and the BPL BDE is setup in the IOCB.
- */
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to lpfc_scsi_buf data structure - Success
+ **/
static struct lpfc_scsi_buf *
lpfc_new_scsi_buf(struct lpfc_vport *vport)
{
@@ -407,14 +516,14 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bpl[0].tus.w = le32_to_cpu(bpl->tus.w);
+ bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
/* Setup the physical region for the FCP RSP */
bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bpl[1].tus.w = le32_to_cpu(bpl->tus.w);
+ bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
/*
* Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
@@ -422,7 +531,8 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
*/
iocb = &psb->cur_iocbq.iocb;
iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
- if (phba->sli_rev == 3) {
+ if ((phba->sli_rev == 3) &&
+ !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
/* fill in immediate fcp command BDE */
iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
@@ -452,6 +562,17 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
return psb;
}
+/**
+ * lpfc_get_scsi_buf: Get a scsi buffer from lpfc_scsi_buf_list list of Hba.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to lpfc_scsi_buf - Success
+ **/
static struct lpfc_scsi_buf*
lpfc_get_scsi_buf(struct lpfc_hba * phba)
{
@@ -464,11 +585,20 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
if (lpfc_cmd) {
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->nonsg_phys = 0;
+ lpfc_cmd->prot_seg_cnt = 0;
}
spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
return lpfc_cmd;
}
+/**
+ * lpfc_release_scsi_buf: Return a scsi buffer back to hba lpfc_scsi_buf_list list.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list.
+ **/
static void
lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
@@ -480,6 +610,20 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
}
+/**
+ * lpfc_scsi_prep_dma_buf: Routine to do DMA mapping for scsi buffer.
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
+ * field of @lpfc_cmd. This routine scans through sg elements and format the
+ * bdea. This routine also initializes all IOCB fields which are dependent on
+ * scsi command request buffer.
+ *
+ * Return codes:
+ * 1 - Error
+ * 0 - Success
+ **/
static int
lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
@@ -516,7 +660,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
lpfc_cmd->seg_cnt = nseg;
if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
printk(KERN_ERR "%s: Too many sg segments from "
- "dma_map_sg. Config %d, seg_cnt %d",
+ "dma_map_sg. Config %d, seg_cnt %d\n",
__func__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
scsi_dma_unmap(scsi_cmnd);
@@ -535,6 +679,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
physaddr = sg_dma_address(sgel);
if (phba->sli_rev == 3 &&
+ !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
nseg <= LPFC_EXT_DATA_BDE_COUNT) {
data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
data_bde->tus.f.bdeSize = sg_dma_len(sgel);
@@ -560,7 +705,8 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* explicitly reinitialized and for SLI-3 the extended bde count is
* explicitly reinitialized since all iocb memory resources are reused.
*/
- if (phba->sli_rev == 3) {
+ if (phba->sli_rev == 3 &&
+ !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
/*
* The extended IOCB format can only fit 3 BDE or a BPL.
@@ -587,7 +733,683 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
((num_bde + 2) * sizeof(struct ulp_bde64));
}
fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
+
+ /*
+ * Due to difference in data length between DIF/non-DIF paths,
+ * we need to set word 4 of IOCB here
+ */
+ iocb_cmd->un.fcpi.fcpi_parm = le32_to_cpu(scsi_bufflen(scsi_cmnd));
+ return 0;
+}
+
+/*
+ * Given a scsi cmnd, determine the BlockGuard profile to be used
+ * with the cmd
+ */
+static int
+lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
+{
+ uint8_t guard_type = scsi_host_get_guard(sc->device->host);
+ uint8_t ret_prof = LPFC_PROF_INVALID;
+
+ if (guard_type == SHOST_DIX_GUARD_IP) {
+ switch (scsi_get_prot_op(sc)) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ ret_prof = LPFC_PROF_AST2;
+ break;
+
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ ret_prof = LPFC_PROF_A1;
+ break;
+
+ case SCSI_PROT_READ_CONVERT:
+ case SCSI_PROT_WRITE_CONVERT:
+ ret_prof = LPFC_PROF_AST1;
+ break;
+
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ case SCSI_PROT_NORMAL:
+ default:
+ printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
+ scsi_get_prot_op(sc), guard_type);
+ break;
+
+ }
+ } else if (guard_type == SHOST_DIX_GUARD_CRC) {
+ switch (scsi_get_prot_op(sc)) {
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ ret_prof = LPFC_PROF_A1;
+ break;
+
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ ret_prof = LPFC_PROF_C1;
+ break;
+
+ case SCSI_PROT_READ_CONVERT:
+ case SCSI_PROT_WRITE_CONVERT:
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ case SCSI_PROT_NORMAL:
+ default:
+ printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
+ scsi_get_prot_op(sc), guard_type);
+ break;
+ }
+ } else {
+ /* unsupported format */
+ BUG();
+ }
+
+ return ret_prof;
+}
+
+struct scsi_dif_tuple {
+ __be16 guard_tag; /* Checksum */
+ __be16 app_tag; /* Opaque storage */
+ __be32 ref_tag; /* Target LBA or indirect LBA */
+};
+
+static inline unsigned
+lpfc_cmd_blksize(struct scsi_cmnd *sc)
+{
+ return sc->device->sector_size;
+}
+
+/**
+ * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
+ * @sc: in: SCSI command
+ * @apptagmask out: app tag mask
+ * @apptagval out: app tag value
+ * @reftag out: ref tag (reference tag)
+ *
+ * Description:
+ * Extract DIF paramters from the command if possible. Otherwise,
+ * use default paratmers.
+ *
+ **/
+static inline void
+lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
+ uint16_t *apptagval, uint32_t *reftag)
+{
+ struct scsi_dif_tuple *spt;
+ unsigned char op = scsi_get_prot_op(sc);
+ unsigned int protcnt = scsi_prot_sg_count(sc);
+ static int cnt;
+
+ if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
+ op == SCSI_PROT_WRITE_PASS ||
+ op == SCSI_PROT_WRITE_CONVERT)) {
+
+ cnt++;
+ spt = page_address(sg_page(scsi_prot_sglist(sc))) +
+ scsi_prot_sglist(sc)[0].offset;
+ *apptagmask = 0;
+ *apptagval = 0;
+ *reftag = cpu_to_be32(spt->ref_tag);
+
+ } else {
+ /* SBC defines ref tag to be lower 32bits of LBA */
+ *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
+ *apptagmask = 0;
+ *apptagval = 0;
+ }
+}
+
+/*
+ * This function sets up buffer list for protection groups of
+ * type LPFC_PG_TYPE_NO_DIF
+ *
+ * This is usually used when the HBA is instructed to generate
+ * DIFs and insert them into data stream (or strip DIF from
+ * incoming data stream)
+ *
+ * The buffer list consists of just one protection group described
+ * below:
+ * +-------------------------+
+ * start of prot group --> | PDE_1 |
+ * +-------------------------+
+ * | Data BDE |
+ * +-------------------------+
+ * |more Data BDE's ... (opt)|
+ * +-------------------------+
+ *
+ * @sc: pointer to scsi command we're working on
+ * @bpl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ *
+ * Note: Data s/g buffers have been dma mapped
+ */
+static int
+lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ struct ulp_bde64 *bpl, int datasegcnt)
+{
+ struct scatterlist *sgde = NULL; /* s/g data entry */
+ struct lpfc_pde *pde1 = NULL;
+ dma_addr_t physaddr;
+ int i = 0, num_bde = 0;
+ int datadir = sc->sc_data_direction;
+ int prof = LPFC_PROF_INVALID;
+ unsigned blksize;
+ uint32_t reftag;
+ uint16_t apptagmask, apptagval;
+
+ pde1 = (struct lpfc_pde *) bpl;
+ prof = lpfc_sc_to_sli_prof(sc);
+
+ if (prof == LPFC_PROF_INVALID)
+ goto out;
+
+ /* extract some info from the scsi command for PDE1*/
+ blksize = lpfc_cmd_blksize(sc);
+ lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
+
+ /* setup PDE1 with what we have */
+ lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
+ BG_EC_STOP_ERR);
+ lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
+
+ num_bde++;
+ bpl++;
+
+ /* assumption: caller has already run dma_map_sg on command data */
+ scsi_for_each_sg(sc, sgde, datasegcnt, i) {
+ physaddr = sg_dma_address(sgde);
+ bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
+ bpl->tus.f.bdeSize = sg_dma_len(sgde);
+ if (datadir == DMA_TO_DEVICE)
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ else
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl++;
+ num_bde++;
+ }
+
+out:
+ return num_bde;
+}
+
+/*
+ * This function sets up buffer list for protection groups of
+ * type LPFC_PG_TYPE_DIF_BUF
+ *
+ * This is usually used when DIFs are in their own buffers,
+ * separate from the data. The HBA can then by instructed
+ * to place the DIFs in the outgoing stream. For read operations,
+ * The HBA could extract the DIFs and place it in DIF buffers.
+ *
+ * The buffer list for this type consists of one or more of the
+ * protection groups described below:
+ * +-------------------------+
+ * start of first prot group --> | PDE_1 |
+ * +-------------------------+
+ * | PDE_3 (Prot BDE) |
+ * +-------------------------+
+ * | Data BDE |
+ * +-------------------------+
+ * |more Data BDE's ... (opt)|
+ * +-------------------------+
+ * start of new prot group --> | PDE_1 |
+ * +-------------------------+
+ * | ... |
+ * +-------------------------+
+ *
+ * @sc: pointer to scsi command we're working on
+ * @bpl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ * @protcnt: number of segment of protection data that have been dma mapped
+ *
+ * Note: It is assumed that both data and protection s/g buffers have been
+ * mapped for DMA
+ */
+static int
+lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ struct ulp_bde64 *bpl, int datacnt, int protcnt)
+{
+ struct scatterlist *sgde = NULL; /* s/g data entry */
+ struct scatterlist *sgpe = NULL; /* s/g prot entry */
+ struct lpfc_pde *pde1 = NULL;
+ struct ulp_bde64 *prot_bde = NULL;
+ dma_addr_t dataphysaddr, protphysaddr;
+ unsigned short curr_data = 0, curr_prot = 0;
+ unsigned int split_offset, protgroup_len;
+ unsigned int protgrp_blks, protgrp_bytes;
+ unsigned int remainder, subtotal;
+ int prof = LPFC_PROF_INVALID;
+ int datadir = sc->sc_data_direction;
+ unsigned char pgdone = 0, alldone = 0;
+ unsigned blksize;
+ uint32_t reftag;
+ uint16_t apptagmask, apptagval;
+ int num_bde = 0;
+
+ sgpe = scsi_prot_sglist(sc);
+ sgde = scsi_sglist(sc);
+
+ if (!sgpe || !sgde) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
+ sgpe, sgde);
+ return 0;
+ }
+
+ prof = lpfc_sc_to_sli_prof(sc);
+ if (prof == LPFC_PROF_INVALID)
+ goto out;
+
+ /* extract some info from the scsi command for PDE1*/
+ blksize = lpfc_cmd_blksize(sc);
+ lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
+
+ split_offset = 0;
+ do {
+ /* setup the first PDE_1 */
+ pde1 = (struct lpfc_pde *) bpl;
+
+ lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
+ BG_EC_STOP_ERR);
+ lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
+
+ num_bde++;
+ bpl++;
+
+ /* setup the first BDE that points to protection buffer */
+ prot_bde = (struct ulp_bde64 *) bpl;
+ protphysaddr = sg_dma_address(sgpe);
+ prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
+ prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
+ protgroup_len = sg_dma_len(sgpe);
+
+
+ /* must be integer multiple of the DIF block length */
+ BUG_ON(protgroup_len % 8);
+
+ protgrp_blks = protgroup_len / 8;
+ protgrp_bytes = protgrp_blks * blksize;
+
+ prot_bde->tus.f.bdeSize = protgroup_len;
+ if (datadir == DMA_TO_DEVICE)
+ prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ else
+ prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
+
+ curr_prot++;
+ num_bde++;
+
+ /* setup BDE's for data blocks associated with DIF data */
+ pgdone = 0;
+ subtotal = 0; /* total bytes processed for current prot grp */
+ while (!pgdone) {
+ if (!sgde) {
+ printk(KERN_ERR "%s Invalid data segment\n",
+ __func__);
+ return 0;
+ }
+ bpl++;
+ dataphysaddr = sg_dma_address(sgde) + split_offset;
+ bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
+
+ remainder = sg_dma_len(sgde) - split_offset;
+
+ if ((subtotal + remainder) <= protgrp_bytes) {
+ /* we can use this whole buffer */
+ bpl->tus.f.bdeSize = remainder;
+ split_offset = 0;
+
+ if ((subtotal + remainder) == protgrp_bytes)
+ pgdone = 1;
+ } else {
+ /* must split this buffer with next prot grp */
+ bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
+ split_offset += bpl->tus.f.bdeSize;
+ }
+
+ subtotal += bpl->tus.f.bdeSize;
+
+ if (datadir == DMA_TO_DEVICE)
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ else
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+ num_bde++;
+ curr_data++;
+
+ if (split_offset)
+ break;
+
+ /* Move to the next s/g segment if possible */
+ sgde = sg_next(sgde);
+ }
+
+ /* are we done ? */
+ if (curr_prot == protcnt) {
+ alldone = 1;
+ } else if (curr_prot < protcnt) {
+ /* advance to next prot buffer */
+ sgpe = sg_next(sgpe);
+ bpl++;
+
+ /* update the reference tag */
+ reftag += protgrp_blks;
+ } else {
+ /* if we're here, we have a bug */
+ printk(KERN_ERR "BLKGRD: bug in %s\n", __func__);
+ }
+
+ } while (!alldone);
+
+out:
+
+
+ return num_bde;
+}
+/*
+ * Given a SCSI command that supports DIF, determine composition of protection
+ * groups involved in setting up buffer lists
+ *
+ * Returns:
+ * for DIF (for both read and write)
+ * */
+static int
+lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
+{
+ int ret = LPFC_PG_TYPE_INVALID;
+ unsigned char op = scsi_get_prot_op(sc);
+
+ switch (op) {
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ ret = LPFC_PG_TYPE_NO_DIF;
+ break;
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ case SCSI_PROT_WRITE_CONVERT:
+ case SCSI_PROT_READ_CONVERT:
+ ret = LPFC_PG_TYPE_DIF_BUF;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9021 Unsupported protection op:%d\n", op);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * This is the protection/DIF aware version of
+ * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
+ * two functions eventually, but for now, it's here
+ */
+static int
+lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
+ struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+ struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
+ IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ uint32_t num_bde = 0;
+ int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
+ int prot_group_type = 0;
+ int diflen, fcpdl;
+ unsigned blksize;
+
+ /*
+ * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
+ * fcp_rsp regions to the first data bde entry
+ */
+ bpl += 2;
+ if (scsi_sg_count(scsi_cmnd)) {
+ /*
+ * The driver stores the segment count returned from pci_map_sg
+ * because this a count of dma-mappings used to map the use_sg
+ * pages. They are not guaranteed to be the same for those
+ * architectures that implement an IOMMU.
+ */
+ datasegcnt = dma_map_sg(&phba->pcidev->dev,
+ scsi_sglist(scsi_cmnd),
+ scsi_sg_count(scsi_cmnd), datadir);
+ if (unlikely(!datasegcnt))
+ return 1;
+
+ lpfc_cmd->seg_cnt = datasegcnt;
+ if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+ printk(KERN_ERR "%s: Too many sg segments from "
+ "dma_map_sg. Config %d, seg_cnt %d\n",
+ __func__, phba->cfg_sg_seg_cnt,
+ lpfc_cmd->seg_cnt);
+ scsi_dma_unmap(scsi_cmnd);
+ return 1;
+ }
+
+ prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
+
+ switch (prot_group_type) {
+ case LPFC_PG_TYPE_NO_DIF:
+ num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
+ datasegcnt);
+ /* we shoud have 2 or more entries in buffer list */
+ if (num_bde < 2)
+ goto err;
+ break;
+ case LPFC_PG_TYPE_DIF_BUF:{
+ /*
+ * This type indicates that protection buffers are
+ * passed to the driver, so that needs to be prepared
+ * for DMA
+ */
+ protsegcnt = dma_map_sg(&phba->pcidev->dev,
+ scsi_prot_sglist(scsi_cmnd),
+ scsi_prot_sg_count(scsi_cmnd), datadir);
+ if (unlikely(!protsegcnt)) {
+ scsi_dma_unmap(scsi_cmnd);
+ return 1;
+ }
+
+ lpfc_cmd->prot_seg_cnt = protsegcnt;
+ if (lpfc_cmd->prot_seg_cnt
+ > phba->cfg_prot_sg_seg_cnt) {
+ printk(KERN_ERR "%s: Too many prot sg segments "
+ "from dma_map_sg. Config %d,"
+ "prot_seg_cnt %d\n", __func__,
+ phba->cfg_prot_sg_seg_cnt,
+ lpfc_cmd->prot_seg_cnt);
+ dma_unmap_sg(&phba->pcidev->dev,
+ scsi_prot_sglist(scsi_cmnd),
+ scsi_prot_sg_count(scsi_cmnd),
+ datadir);
+ scsi_dma_unmap(scsi_cmnd);
+ return 1;
+ }
+
+ num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
+ datasegcnt, protsegcnt);
+ /* we shoud have 3 or more entries in buffer list */
+ if (num_bde < 3)
+ goto err;
+ break;
+ }
+ case LPFC_PG_TYPE_INVALID:
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9022 Unexpected protection group %i\n",
+ prot_group_type);
+ return 1;
+ }
+ }
+
+ /*
+ * Finish initializing those IOCB fields that are dependent on the
+ * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
+ * reinitialized since all iocb memory resources are used many times
+ * for transmit, receive, and continuation bpl's.
+ */
+ iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+ iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
+ iocb_cmd->ulpBdeCount = 1;
+ iocb_cmd->ulpLe = 1;
+
+ fcpdl = scsi_bufflen(scsi_cmnd);
+
+ if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
+ /*
+ * We are in DIF Type 1 mode
+ * Every data block has a 8 byte DIF (trailer)
+ * attached to it. Must ajust FCP data length
+ */
+ blksize = lpfc_cmd_blksize(scsi_cmnd);
+ diflen = (fcpdl / blksize) * 8;
+ fcpdl += diflen;
+ }
+ fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
+
+ /*
+ * Due to difference in data length between DIF/non-DIF paths,
+ * we need to set word 4 of IOCB here
+ */
+ iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
+
return 0;
+err:
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9023 Could not setup all needed BDE's"
+ "prot_group_type=%d, num_bde=%d\n",
+ prot_group_type, num_bde);
+ return 1;
+}
+
+/*
+ * This function checks for BlockGuard errors detected by
+ * the HBA. In case of errors, the ASC/ASCQ fields in the
+ * sense buffer will be set accordingly, paired with
+ * ILLEGAL_REQUEST to signal to the kernel that the HBA
+ * detected corruption.
+ *
+ * Returns:
+ * 0 - No error found
+ * 1 - BlockGuard error found
+ * -1 - Internal error (bad profile, ...etc)
+ */
+static int
+lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
+ struct lpfc_iocbq *pIocbOut)
+{
+ struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+ struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
+ int ret = 0;
+ uint32_t bghm = bgf->bghm;
+ uint32_t bgstat = bgf->bgstat;
+ uint64_t failing_sector = 0;
+
+ printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx "
+ "bgstat=0x%x bghm=0x%x\n",
+ cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
+ cmd->request->nr_sectors, bgstat, bghm);
+
+ spin_lock(&_dump_buf_lock);
+ if (!_dump_buf_done) {
+ printk(KERN_ERR "Saving Data for %u blocks to debugfs\n",
+ (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
+ lpfc_debug_save_data(cmd);
+
+ /* If we have a prot sgl, save the DIF buffer */
+ if (lpfc_prot_group_type(phba, cmd) ==
+ LPFC_PG_TYPE_DIF_BUF) {
+ printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n",
+ (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
+ lpfc_debug_save_dif(cmd);
+ }
+
+ _dump_buf_done = 1;
+ }
+ spin_unlock(&_dump_buf_lock);
+
+ if (lpfc_bgs_get_invalid_prof(bgstat)) {
+ cmd->result = ScsiResult(DID_ERROR, 0);
+ printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n",
+ bgstat);
+ ret = (-1);
+ goto out;
+ }
+
+ if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
+ cmd->result = ScsiResult(DID_ERROR, 0);
+ printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
+ bgstat);
+ ret = (-1);
+ goto out;
+ }
+
+ if (lpfc_bgs_get_guard_err(bgstat)) {
+ ret = 1;
+
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x1);
+ cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
+ | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ phba->bg_guard_err_cnt++;
+ printk(KERN_ERR "BLKGRD: guard_tag error\n");
+ }
+
+ if (lpfc_bgs_get_reftag_err(bgstat)) {
+ ret = 1;
+
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x3);
+ cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
+ | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+ phba->bg_reftag_err_cnt++;
+ printk(KERN_ERR "BLKGRD: ref_tag error\n");
+ }
+
+ if (lpfc_bgs_get_apptag_err(bgstat)) {
+ ret = 1;
+
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x2);
+ cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
+ | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+ phba->bg_apptag_err_cnt++;
+ printk(KERN_ERR "BLKGRD: app_tag error\n");
+ }
+
+ if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
+ /*
+ * setup sense data descriptor 0 per SPC-4 as an information
+ * field, and put the failing LBA in it
+ */
+ cmd->sense_buffer[8] = 0; /* Information */
+ cmd->sense_buffer[9] = 0xa; /* Add. length */
+ do_div(bghm, cmd->device->sector_size);
+
+ failing_sector = scsi_get_lba(cmd);
+ failing_sector += bghm;
+
+ put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
+ }
+
+ if (!ret) {
+ /* No error was reported - problem in FW? */
+ cmd->result = ScsiResult(DID_ERROR, 0);
+ printk(KERN_ERR "BLKGRD: no errors reported!\n");
+ }
+
+out:
+ return ret;
}
/**
@@ -681,6 +1503,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
lpfc_worker_wake_up(phba);
return;
}
+
+/**
+ * lpfc_scsi_unprep_dma_buf: Routine to un-map DMA mapping of scatter gather.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is going to be un-mapped.
+ *
+ * This routine does DMA un-mapping of scatter gather list of scsi command
+ * field of @lpfc_cmd.
+ **/
static void
lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
{
@@ -692,8 +1523,22 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
*/
if (psb->seg_cnt > 0)
scsi_dma_unmap(psb->pCmd);
+ if (psb->prot_seg_cnt > 0)
+ dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
+ scsi_prot_sg_count(psb->pCmd),
+ psb->pCmd->sc_data_direction);
}
+/**
+ * lpfc_handler_fcp_err: FCP response handler.
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @rsp_iocb: The response IOCB which contains FCP error.
+ *
+ * This routine is called to process response IOCB with status field
+ * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
+ * based upon SCSI and FCP error.
+ **/
static void
lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
struct lpfc_iocbq *rsp_iocb)
@@ -735,7 +1580,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
logit = LOG_FCP;
lpfc_printf_vlog(vport, KERN_WARNING, logit,
- "0730 FCP command x%x failed: x%x SNS x%x x%x "
+ "9024 FCP command x%x failed: x%x SNS x%x x%x "
"Data: x%x x%x x%x x%x x%x\n",
cmnd->cmnd[0], scsi_status,
be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
@@ -758,7 +1603,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "0716 FCP Read Underrun, expected %d, "
+ "9025 FCP Read Underrun, expected %d, "
"residual %d Data: x%x x%x x%x\n",
be32_to_cpu(fcpcmd->fcpDl),
scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
@@ -774,7 +1619,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
(scsi_get_resid(cmnd) != fcpi_parm)) {
lpfc_printf_vlog(vport, KERN_WARNING,
LOG_FCP | LOG_FCP_ERROR,
- "0735 FCP Read Check Error "
+ "9026 FCP Read Check Error "
"and Underrun Data: x%x x%x x%x x%x\n",
be32_to_cpu(fcpcmd->fcpDl),
scsi_get_resid(cmnd), fcpi_parm,
@@ -793,7 +1638,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
(scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
< cmnd->underflow)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "0717 FCP command x%x residual "
+ "9027 FCP command x%x residual "
"underrun converted to error "
"Data: x%x x%x x%x\n",
cmnd->cmnd[0], scsi_bufflen(cmnd),
@@ -802,7 +1647,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
} else if (resp_info & RESID_OVER) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
- "0720 FCP command x%x residual overrun error. "
+ "9028 FCP command x%x residual overrun error. "
"Data: x%x x%x \n", cmnd->cmnd[0],
scsi_bufflen(cmnd), scsi_get_resid(cmnd));
host_status = DID_ERROR;
@@ -814,7 +1659,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
} else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
(cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
- "0734 FCP Read Check Error Data: "
+ "9029 FCP Read Check Error Data: "
"x%x x%x x%x x%x\n",
be32_to_cpu(fcpcmd->fcpDl),
be32_to_cpu(fcprsp->rspResId),
@@ -828,6 +1673,16 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
}
+/**
+ * lpfc_scsi_cmd_iocb_cmpl: Scsi cmnd IOCB completion routine.
+ * @phba: The Hba for which this call is being executed.
+ * @pIocbIn: The command IOCBQ for the scsi cmnd.
+ * @pIocbOut: The response IOCBQ for the scsi cmnd .
+ *
+ * This routine assigns scsi command result by looking into response IOCB
+ * status field appropriately. This routine handles QUEUE FULL condition as
+ * well by ramping down device queue depth.
+ **/
static void
lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct lpfc_iocbq *pIocbOut)
@@ -846,7 +1701,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
- atomic_dec(&pnode->cmd_pending);
+ if (pnode && NLP_CHK_NODE_ACT(pnode))
+ atomic_dec(&pnode->cmd_pending);
if (lpfc_cmd->status) {
if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
@@ -856,7 +1712,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->status = IOSTAT_DEFAULT;
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
- "0729 FCP cmd x%x failed <%d/%d> "
+ "9030 FCP cmd x%x failed <%d/%d> "
"status: x%x result: x%x Data: x%x x%x\n",
cmd->cmnd[0],
cmd->device ? cmd->device->id : 0xffff,
@@ -904,7 +1760,28 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
cmd->result = ScsiResult(DID_REQUEUE, 0);
break;
- } /* else: fall through */
+ }
+
+ if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
+ lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
+ pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
+ if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+ /*
+ * This is a response for a BG enabled
+ * cmd. Parse BG error
+ */
+ lpfc_parse_bg_err(phba, lpfc_cmd,
+ pIocbOut);
+ break;
+ } else {
+ lpfc_printf_vlog(vport, KERN_WARNING,
+ LOG_BG,
+ "9031 non-zero BGSTAT "
+ "on unprotected cmd");
+ }
+ }
+
+ /* else: fall through */
default:
cmd->result = ScsiResult(DID_ERROR, 0);
break;
@@ -936,23 +1813,31 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
spin_lock_irqsave(sdev->host->host_lock, flags);
- if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) &&
- (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) &&
- ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10))))
- pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending);
-
- pnode->last_change_time = jiffies;
+ if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+ if (pnode->cmd_qdepth >
+ atomic_read(&pnode->cmd_pending) &&
+ (atomic_read(&pnode->cmd_pending) >
+ LPFC_MIN_TGT_QDEPTH) &&
+ ((cmd->cmnd[0] == READ_10) ||
+ (cmd->cmnd[0] == WRITE_10)))
+ pnode->cmd_qdepth =
+ atomic_read(&pnode->cmd_pending);
+
+ pnode->last_change_time = jiffies;
+ }
spin_unlock_irqrestore(sdev->host->host_lock, flags);
- } else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
+ } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+ if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
time_after(jiffies, pnode->last_change_time +
- msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
- spin_lock_irqsave(sdev->host->host_lock, flags);
- pnode->cmd_qdepth += pnode->cmd_qdepth *
- LPFC_TGTQ_RAMPUP_PCENT / 100;
- if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
- pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
- pnode->last_change_time = jiffies;
- spin_unlock_irqrestore(sdev->host->host_lock, flags);
+ msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
+ spin_lock_irqsave(sdev->host->host_lock, flags);
+ pnode->cmd_qdepth += pnode->cmd_qdepth *
+ LPFC_TGTQ_RAMPUP_PCENT / 100;
+ if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
+ pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
+ pnode->last_change_time = jiffies;
+ spin_unlock_irqrestore(sdev->host->host_lock, flags);
+ }
}
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
@@ -1067,6 +1952,15 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
}
}
+/**
+ * lpfc_scsi_prep_cmnd: Routine to convert scsi cmnd to FCP information unit.
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: The scsi command which needs to send.
+ * @pnode: Pointer to lpfc_nodelist.
+ *
+ * This routine initializes fcp_cmnd and iocb data structure from scsi command
+ * to transfer.
+ **/
static void
lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
struct lpfc_nodelist *pnode)
@@ -1122,7 +2016,6 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
} else {
iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
iocb_cmd->ulpPU = PARM_READ_CHECK;
- iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
fcp_cmnd->fcpCntl3 = READ_DATA;
phba->fc4InputRequests++;
}
@@ -1133,7 +2026,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
fcp_cmnd->fcpCntl3 = 0;
phba->fc4ControlRequests++;
}
- if (phba->sli_rev == 3)
+ if (phba->sli_rev == 3 &&
+ !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
/*
* Finish initializing those IOCB fields that are independent
@@ -1152,6 +2046,19 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
piocbq->vport = vport;
}
+/**
+ * lpfc_scsi_prep_task_mgmt_cmnd: Convert scsi TM cmnd to FCP information unit.
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lun: Logical unit number.
+ * @task_mgmt_cmd: SCSI task management command.
+ *
+ * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
+ *
+ * Return codes:
+ * 0 - Error
+ * 1 - Success
+ **/
static int
lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
struct lpfc_scsi_buf *lpfc_cmd,
@@ -1178,7 +2085,8 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
- if (vport->phba->sli_rev == 3)
+ if (vport->phba->sli_rev == 3 &&
+ !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
piocb->ulpContext = ndlp->nlp_rpi;
@@ -1201,6 +2109,15 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
return 1;
}
+/**
+ * lpc_taskmgmt_def_cmpl: IOCB completion routine for task management command.
+ * @phba: The Hba for which this call is being executed.
+ * @cmdiocbq: Pointer to lpfc_iocbq data structure.
+ * @rspiocbq: Pointer to lpfc_iocbq data structure.
+ *
+ * This routine is IOCB completion routine for device reset and target reset
+ * routine. This routine release scsi buffer associated with lpfc_cmd.
+ **/
static void
lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocbq,
@@ -1213,6 +2130,20 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
return;
}
+/**
+ * lpfc_scsi_tgt_reset: Target reset handler.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
+ * @vport: The virtual port for which this call is being executed.
+ * @tgt_id: Target ID.
+ * @lun: Lun number.
+ * @rdata: Pointer to lpfc_rport_data.
+ *
+ * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
+ *
+ * Return Code:
+ * 0x2003 - Error
+ * 0x2002 - Success.
+ **/
static int
lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
unsigned tgt_id, unsigned int lun,
@@ -1266,6 +2197,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
return ret;
}
+/**
+ * lpfc_info: Info entry point of scsi_host_template data structure.
+ * @host: The scsi host for which this call is being executed.
+ *
+ * This routine provides module information about hba.
+ *
+ * Reutrn code:
+ * Pointer to char - Success.
+ **/
const char *
lpfc_info(struct Scsi_Host *host)
{
@@ -1295,6 +2235,13 @@ lpfc_info(struct Scsi_Host *host)
return lpfcinfobuf;
}
+/**
+ * lpfc_poll_rearm_time: Routine to modify fcp_poll timer of hba.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
+ * The default value of cfg_poll_tmo is 10 milliseconds.
+ **/
static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
{
unsigned long poll_tmo_expires =
@@ -1305,11 +2252,25 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
poll_tmo_expires);
}
+/**
+ * lpfc_poll_start_timer: Routine to start fcp_poll_timer of HBA.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine starts the fcp_poll_timer of @phba.
+ **/
void lpfc_poll_start_timer(struct lpfc_hba * phba)
{
lpfc_poll_rearm_timer(phba);
}
+/**
+ * lpfc_poll_timeout: Restart polling timer.
+ * @ptr: Map to lpfc_hba data structure pointer.
+ *
+ * This routine restarts fcp_poll timer, when FCP ring polling is enable
+ * and FCP Ring interrupt is disable.
+ **/
+
void lpfc_poll_timeout(unsigned long ptr)
{
struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
@@ -1321,6 +2282,20 @@ void lpfc_poll_timeout(unsigned long ptr)
}
}
+/**
+ * lpfc_queuecommand: Queuecommand entry point of Scsi Host Templater data
+ * structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ * @done: Pointer to done routine.
+ *
+ * Driver registers this routine to scsi midlayer to submit a @cmd to process.
+ * This routine prepares an IOCB from scsi command and provides to firmware.
+ * The @done callback is invoked after driver finished processing the command.
+ *
+ * Return value :
+ * 0 - Success
+ * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
+ **/
static int
lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
{
@@ -1340,6 +2315,17 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
goto out_fail_command;
}
+ if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+ scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
+
+ printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x "
+ "str=%s without registering for BlockGuard - "
+ "Rejecting command\n",
+ cmnd->cmnd[0], scsi_get_prot_op(cmnd),
+ dif_op_str[scsi_get_prot_op(cmnd)]);
+ goto out_fail_command;
+ }
+
/*
* Catch race where our node has transitioned, but the
* transport is still transitioning.
@@ -1348,12 +2334,13 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
goto out_fail_command;
}
- if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
+ if (vport->cfg_max_scsicmpl_time &&
+ (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
goto out_host_busy;
lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL) {
- lpfc_adjust_queue_depth(phba);
+ lpfc_rampdown_queue_depth(phba);
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0707 driver's buffer pool is empty, "
@@ -1361,7 +2348,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
goto out_host_busy;
}
- lpfc_cmd->start_time = jiffies;
/*
* Store the midlayer's command structure for the completion phase
* and complete the command initialization.
@@ -1373,7 +2359,65 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
cmnd->scsi_done = done;
- err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
+ if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
+ "str=%s\n",
+ cmnd->cmnd[0], scsi_get_prot_op(cmnd),
+ dif_op_str[scsi_get_prot_op(cmnd)]);
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x \n",
+ cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
+ cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
+ cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
+ cmnd->cmnd[9]);
+ if (cmnd->cmnd[0] == READ_10)
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9035 BLKGRD: READ @ sector %llu, "
+ "count %lu\n",
+ (unsigned long long)scsi_get_lba(cmnd),
+ cmnd->request->nr_sectors);
+ else if (cmnd->cmnd[0] == WRITE_10)
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9036 BLKGRD: WRITE @ sector %llu, "
+ "count %lu cmd=%p\n",
+ (unsigned long long)scsi_get_lba(cmnd),
+ cmnd->request->nr_sectors,
+ cmnd);
+
+ err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
+ } else {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x"
+ " str=%s\n",
+ cmnd->cmnd[0], scsi_get_prot_op(cmnd),
+ dif_op_str[scsi_get_prot_op(cmnd)]);
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x \n",
+ cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
+ cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
+ cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
+ cmnd->cmnd[9]);
+ if (cmnd->cmnd[0] == READ_10)
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9040 dbg: READ @ sector %llu, "
+ "count %lu\n",
+ (unsigned long long)scsi_get_lba(cmnd),
+ cmnd->request->nr_sectors);
+ else if (cmnd->cmnd[0] == WRITE_10)
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9041 dbg: WRITE @ sector %llu, "
+ "count %lu cmd=%p\n",
+ (unsigned long long)scsi_get_lba(cmnd),
+ cmnd->request->nr_sectors, cmnd);
+ else
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9042 dbg: parser not implemented\n");
+ err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
+ }
+
if (err)
goto out_host_busy_free_buf;
@@ -1382,9 +2426,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
atomic_inc(&ndlp->cmd_pending);
err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
- if (err)
+ if (err) {
+ atomic_dec(&ndlp->cmd_pending);
goto out_host_busy_free_buf;
-
+ }
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_poll_fcp_ring(phba);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
@@ -1394,7 +2439,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
return 0;
out_host_busy_free_buf:
- atomic_dec(&ndlp->cmd_pending);
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
@@ -1405,6 +2449,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
return 0;
}
+/**
+ * lpfc_block_error_handler: Routine to block error handler.
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
+ **/
static void
lpfc_block_error_handler(struct scsi_cmnd *cmnd)
{
@@ -1421,6 +2471,17 @@ lpfc_block_error_handler(struct scsi_cmnd *cmnd)
return;
}
+/**
+ * lpfc_abort_handler: Eh_abort_handler entry point of Scsi Host Template data
+ *structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine aborts @cmnd pending in base driver.
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
static int
lpfc_abort_handler(struct scsi_cmnd *cmnd)
{
@@ -1516,6 +2577,18 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
return ret;
}
+/**
+ * lpfc_device_reset_handler: eh_device_reset entry point of Scsi Host Template
+ *data structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does a device reset by sending a TARGET_RESET task management
+ * command.
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0ex2002 - Success
+ **/
static int
lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
@@ -1560,7 +2633,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
fc_get_event_number(),
sizeof(scsi_event),
(char *)&scsi_event,
- SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+ LPFC_NL_VENDOR_ID);
if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -1633,6 +2706,17 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
return ret;
}
+/**
+ * lpfc_bus_reset_handler: eh_bus_reset_handler entry point of Scsi Host
+ * Template data structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does target reset to all target on @cmnd->device->host.
+ *
+ * Return Code:
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
static int
lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
{
@@ -1657,7 +2741,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
fc_get_event_number(),
sizeof(scsi_event),
(char *)&scsi_event,
- SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+ LPFC_NL_VENDOR_ID);
lpfc_block_error_handler(cmnd);
/*
@@ -1723,6 +2807,20 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
return ret;
}
+/**
+ * lpfc_slave_alloc: slave_alloc entry point of Scsi Host Template data
+ * structure.
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
+ * globally available list of scsi buffers. This routine also makes sure scsi
+ * buffer is not allocated more than HBA limit conveyed to midlayer. This list
+ * of scsi buffer exists for the lifetime of the driver.
+ *
+ * Return codes:
+ * non-0 - Error
+ * 0 - Success
+ **/
static int
lpfc_slave_alloc(struct scsi_device *sdev)
{
@@ -1784,6 +2882,19 @@ lpfc_slave_alloc(struct scsi_device *sdev)
return 0;
}
+/**
+ * lpfc_slave_configure: slave_configure entry point of Scsi Host Templater data
+ * structure.
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine configures following items
+ * - Tag command queuing support for @sdev if supported.
+ * - Dev loss time out value of fc_rport.
+ * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
+ *
+ * Return codes:
+ * 0 - Success
+ **/
static int
lpfc_slave_configure(struct scsi_device *sdev)
{
@@ -1813,6 +2924,12 @@ lpfc_slave_configure(struct scsi_device *sdev)
return 0;
}
+/**
+ * lpfc_slave_destroy: slave_destroy entry point of SHT data structure.
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine sets @sdev hostatdata filed to null.
+ **/
static void
lpfc_slave_destroy(struct scsi_device *sdev)
{