diff options
author | Jiri Kosina <jkosina@suse.cz> | 2013-12-19 15:08:03 +0100 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2013-12-19 15:08:32 +0100 |
commit | e23c34bb41da65f354fb7eee04300c56ee48f60c (patch) | |
tree | 549fbe449d55273b81ef104a9755109bf4ae7817 /drivers/scsi | |
parent | staging: usbip: Remove double initialization of msg_namelen variable (diff) | |
parent | Linux 3.13-rc4 (diff) | |
download | linux-e23c34bb41da65f354fb7eee04300c56ee48f60c.tar.xz linux-e23c34bb41da65f354fb7eee04300c56ee48f60c.zip |
Merge branch 'master' into for-next
Sync with Linus' tree to be able to apply fixes on top of newer things
in tree (efi-stub).
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'drivers/scsi')
135 files changed, 8131 insertions, 2394 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 5e1e12c0cf42..0a7325361d29 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -2025,7 +2025,8 @@ static struct scsi_host_template driver_template = { .cmd_per_lun = TW_MAX_CMDS_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = twa_host_attrs, - .emulated = 1 + .emulated = 1, + .no_write_same = 1, }; /* This function will probe and initialize a card */ diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index c845bdbeb6c0..4de346017e9f 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -1600,7 +1600,8 @@ static struct scsi_host_template driver_template = { .cmd_per_lun = TW_MAX_CMDS_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = twl_host_attrs, - .emulated = 1 + .emulated = 1, + .no_write_same = 1, }; /* This function will probe and initialize a card */ diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index b9276d10b25c..752624e6bc00 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -2279,7 +2279,8 @@ static struct scsi_host_template driver_template = { .cmd_per_lun = TW_MAX_CMDS_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = tw_host_attrs, - .emulated = 1 + .emulated = 1, + .no_write_same = 1, }; /* This function will probe and initialize a card */ diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index feab3a5e50b5..972f8176665f 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -26,8 +26,8 @@ */ -#define blogic_drvr_version "2.1.16" -#define blogic_drvr_date "18 July 2002" +#define blogic_drvr_version "2.1.17" +#define blogic_drvr_date "12 September 2013" #include <linux/module.h> #include <linux/init.h> @@ -311,12 +311,14 @@ static struct blogic_ccb *blogic_alloc_ccb(struct blogic_adapter *adapter) caller. */ -static void blogic_dealloc_ccb(struct blogic_ccb *ccb) +static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap) { struct blogic_adapter *adapter = ccb->adapter; - scsi_dma_unmap(ccb->command); - pci_unmap_single(adapter->pci_device, ccb->sensedata, + if (ccb->command != NULL) + scsi_dma_unmap(ccb->command); + if (dma_unmap) + pci_unmap_single(adapter->pci_device, ccb->sensedata, ccb->sense_datalen, PCI_DMA_FROMDEVICE); ccb->command = NULL; @@ -696,7 +698,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter) while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, pci_device)) != NULL) { - struct blogic_adapter *adapter = adapter; + struct blogic_adapter *host_adapter = adapter; struct blogic_adapter_info adapter_info; enum blogic_isa_ioport mod_ioaddr_req; unsigned char bus; @@ -744,9 +746,9 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter) known and enabled, note that the particular Standard ISA I/O Address should not be probed. */ - adapter->io_addr = io_addr; - blogic_intreset(adapter); - if (blogic_cmd(adapter, BLOGIC_INQ_PCI_INFO, NULL, 0, + host_adapter->io_addr = io_addr; + blogic_intreset(host_adapter); + if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0, &adapter_info, sizeof(adapter_info)) == sizeof(adapter_info)) { if (adapter_info.isa_port < 6) @@ -762,7 +764,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter) I/O Address assigned at system initialization. */ mod_ioaddr_req = BLOGIC_IO_DISABLE; - blogic_cmd(adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req, + blogic_cmd(host_adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req, sizeof(mod_ioaddr_req), NULL, 0); /* For the first MultiMaster Host Adapter enumerated, @@ -779,12 +781,12 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter) fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45; fetch_localram.count = sizeof(autoscsi_byte45); - blogic_cmd(adapter, BLOGIC_FETCH_LOCALRAM, + blogic_cmd(host_adapter, BLOGIC_FETCH_LOCALRAM, &fetch_localram, sizeof(fetch_localram), &autoscsi_byte45, sizeof(autoscsi_byte45)); - blogic_cmd(adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id, - sizeof(id)); + blogic_cmd(host_adapter, BLOGIC_GET_BOARD_ID, NULL, 0, + &id, sizeof(id)); if (id.fw_ver_digit1 == '5') force_scan_order = autoscsi_byte45.force_scan_order; @@ -2762,8 +2764,8 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter) /* Place CCB back on the Host Adapter's free list. */ - blogic_dealloc_ccb(ccb); -#if 0 /* this needs to be redone different for new EH */ + blogic_dealloc_ccb(ccb, 1); +#if 0 /* this needs to be redone different for new EH */ /* Bus Device Reset CCBs have the command field non-NULL only when a Bus Device Reset was requested @@ -2791,7 +2793,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter) if (ccb->status == BLOGIC_CCB_RESET && ccb->tgt_id == tgt_id) { command = ccb->command; - blogic_dealloc_ccb(ccb); + blogic_dealloc_ccb(ccb, 1); adapter->active_cmds[tgt_id]--; command->result = DID_RESET << 16; command->scsi_done(command); @@ -2862,7 +2864,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter) /* Place CCB back on the Host Adapter's free list. */ - blogic_dealloc_ccb(ccb); + blogic_dealloc_ccb(ccb, 1); /* Call the SCSI Command Completion Routine. */ @@ -3034,6 +3036,7 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command, int buflen = scsi_bufflen(command); int count; struct blogic_ccb *ccb; + dma_addr_t sense_buf; /* SCSI REQUEST_SENSE commands will be executed automatically by the @@ -3179,10 +3182,17 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command, } memcpy(ccb->cdb, cdb, cdblen); ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE; - ccb->sensedata = pci_map_single(adapter->pci_device, + ccb->command = command; + sense_buf = pci_map_single(adapter->pci_device, command->sense_buffer, ccb->sense_datalen, PCI_DMA_FROMDEVICE); - ccb->command = command; + if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) { + blogic_err("DMA mapping for sense data buffer failed\n", + adapter); + blogic_dealloc_ccb(ccb, 0); + return SCSI_MLQUEUE_HOST_BUSY; + } + ccb->sensedata = sense_buf; command->scsi_done = comp_cb; if (blogic_multimaster_type(adapter)) { /* @@ -3203,7 +3213,7 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command, if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START, ccb)) { blogic_warn("Still unable to write Outgoing Mailbox - " "Host Adapter Dead?\n", adapter); - blogic_dealloc_ccb(ccb); + blogic_dealloc_ccb(ccb, 1); command->result = DID_ERROR << 16; command->scsi_done(command); } @@ -3337,7 +3347,7 @@ static int blogic_resetadapter(struct blogic_adapter *adapter, bool hard_reset) for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all) if (ccb->status == BLOGIC_CCB_ACTIVE) - blogic_dealloc_ccb(ccb); + blogic_dealloc_ccb(ccb, 1); /* * Wait a few seconds between the Host Adapter Hard Reset which * initiates a SCSI Bus Reset and issuing any SCSI Commands. Some diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index d85ac1a9d2c0..fbcd48d0bfc3 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -511,7 +511,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) goto cleanup; } - if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) { + if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || + (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) { rcode = -EINVAL; goto cleanup; } diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 408a42ef787a..4921ed19a027 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -771,6 +771,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) { struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg); } @@ -1079,6 +1081,7 @@ static struct scsi_host_template aac_driver_template = { #endif .use_clustering = ENABLE_CLUSTERING, .emulated = 1, + .no_write_same = 1, }; static void __aac_shutdown(struct aac_dev * aac) diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index c67e401954c5..d8145888e66a 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -2511,8 +2511,8 @@ static void asc_prt_scsi_host(struct Scsi_Host *s) struct asc_board *boardp = shost_priv(s); printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev)); - printk(" host_busy %u, host_no %d, last_reset %d,\n", - s->host_busy, s->host_no, (unsigned)s->last_reset); + printk(" host_busy %u, host_no %d,\n", + s->host_busy, s->host_no); printk(" base 0x%lx, io_port 0x%lx, irq %d,\n", (ulong)s->base, (ulong)s->io_port, boardp->irq); @@ -3345,8 +3345,8 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost) shost->host_no); seq_printf(m, - " host_busy %u, last_reset %lu, max_id %u, max_lun %u, max_channel %u\n", - shost->host_busy, shost->last_reset, shost->max_id, + " host_busy %u, max_id %u, max_lun %u, max_channel %u\n", + shost->host_busy, shost->max_id, shost->max_lun, shost->max_channel); seq_printf(m, diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c index 6917b4f5ac9e..22d5a949ec83 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c @@ -692,7 +692,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci) * ID as valid. */ if (ahc_get_pci_function(pci) > 0 - && ahc_9005_subdevinfo_valid(vendor, device, subvendor, subdevice) + && ahc_9005_subdevinfo_valid(device, vendor, subdevice, subvendor) && SUBID_9005_MFUNCENB(subdevice) == 0) return (NULL); diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 97fd450aff09..4f6a30b8e5f9 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -137,6 +137,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = { .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = arcmsr_host_attrs, + .no_write_same = 1, }; static struct pci_device_id arcmsr_device_id_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h index 777e7c0bbb4b..2e28f6c419fe 100644 --- a/drivers/scsi/be2iscsi/be.h +++ b/drivers/scsi/be2iscsi/be.h @@ -128,7 +128,7 @@ struct be_ctrl_info { #define PAGE_SHIFT_4K 12 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) -#define mcc_timeout 120000 /* 5s timeout */ +#define mcc_timeout 120000 /* 12s timeout */ /* Returns number of pages spanned by the data starting at the given addr */ #define PAGES_4K_SPANNED(_address, size) \ diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index e66aa7c11a8a..3338391b64de 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -17,9 +17,9 @@ #include <scsi/iscsi_proto.h> +#include "be_main.h" #include "be.h" #include "be_mgmt.h" -#include "be_main.h" int beiscsi_pci_soft_reset(struct beiscsi_hba *phba) { @@ -158,8 +158,10 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, struct be_cmd_resp_hdr *ioctl_resp_hdr; struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; - if (beiscsi_error(phba)) + if (beiscsi_error(phba)) { + free_mcc_tag(&phba->ctrl, tag); return -EIO; + } /* wait for the mccq completion */ rc = wait_event_interruptible_timeout( @@ -173,7 +175,11 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, BEISCSI_LOG_INIT | BEISCSI_LOG_EH | BEISCSI_LOG_CONFIG, "BC_%d : MBX Cmd Completion timed out\n"); - rc = -EAGAIN; + rc = -EBUSY; + + /* decrement the mccq used count */ + atomic_dec(&phba->ctrl.mcc_obj.q.used); + goto release_mcc_tag; } else rc = 0; @@ -208,10 +214,18 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, if (status == MCC_STATUS_INSUFFICIENT_BUFFER) { ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr; - if (ioctl_resp_hdr->response_length) - goto release_mcc_tag; + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT | BEISCSI_LOG_EH | + BEISCSI_LOG_CONFIG, + "BC_%d : Insufficent Buffer Error " + "Resp_Len : %d Actual_Resp_Len : %d\n", + ioctl_resp_hdr->response_length, + ioctl_resp_hdr->actual_resp_len); + + rc = -EAGAIN; + goto release_mcc_tag; } - rc = -EAGAIN; + rc = -EIO; } release_mcc_tag: @@ -363,7 +377,7 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba, } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) || ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) { - phba->state = BE_ADAPTER_UP; + phba->state = BE_ADAPTER_LINK_UP; beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, @@ -486,33 +500,47 @@ int be_mcc_notify_wait(struct beiscsi_hba *phba) **/ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl) { +#define BEISCSI_MBX_RDY_BIT_TIMEOUT 4000 /* 4sec */ void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); - uint32_t wait = 0; + unsigned long timeout; + bool read_flag = false; + int ret = 0, i; u32 ready; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(rdybit_check_q); - do { + if (beiscsi_error(phba)) + return -EIO; - if (beiscsi_error(phba)) - return -EIO; + timeout = jiffies + (HZ * 110); - ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK; - if (ready) - break; + do { + for (i = 0; i < BEISCSI_MBX_RDY_BIT_TIMEOUT; i++) { + ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK; + if (ready) { + read_flag = true; + break; + } + mdelay(1); + } - if (wait > BEISCSI_HOST_MBX_TIMEOUT) { - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, - "BC_%d : FW Timed Out\n"); + if (!read_flag) { + wait_event_timeout(rdybit_check_q, + (read_flag != true), + HZ * 5); + } + } while ((time_before(jiffies, timeout)) && !read_flag); + + if (!read_flag) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BC_%d : FW Timed Out\n"); phba->fw_timeout = true; beiscsi_ue_detect(phba); - return -EBUSY; - } + ret = -EBUSY; + } - mdelay(1); - wait++; - } while (true); - return 0; + return ret; } /* @@ -699,7 +727,7 @@ struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba) struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; struct be_mcc_wrb *wrb; - BUG_ON(atomic_read(&mccq->used) >= mccq->len); + WARN_ON(atomic_read(&mccq->used) >= mccq->len); wrb = queue_head_node(mccq); memset(wrb, 0, sizeof(*wrb)); wrb->tag0 = (mccq->head & 0x000000FF) << 16; @@ -1009,10 +1037,29 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, return status; } +/** + * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter + * @ctrl: ptr to ctrl_info + * @cq: Completion Queue + * @dq: Default Queue + * @lenght: ring size + * @entry_size: size of each entry in DEFQ + * @is_header: Header or Data DEFQ + * @ulp_num: Bind to which ULP + * + * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted + * on this queue by the FW + * + * return + * Success: 0 + * Failure: Non-Zero Value + * + **/ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, struct be_queue_info *cq, struct be_queue_info *dq, int length, - int entry_size) + int entry_size, uint8_t is_header, + uint8_t ulp_num) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_defq_create_req *req = embedded_payload(wrb); @@ -1030,6 +1077,11 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req)); req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); + if (phba->fw_config.dual_ulp_aware) { + req->ulp_num = ulp_num; + req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT); + req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT); + } if (is_chip_be2_be3r(phba)) { AMAP_SET_BITS(struct amap_be_default_pdu_context, @@ -1067,22 +1119,53 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, status = be_mbox_notify(ctrl); if (!status) { + struct be_ring *defq_ring; struct be_defq_create_resp *resp = embedded_payload(wrb); dq->id = le16_to_cpu(resp->id); dq->created = true; + if (is_header) + defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num]; + else + defq_ring = &phba->phwi_ctrlr-> + default_pdu_data[ulp_num]; + + defq_ring->id = dq->id; + + if (!phba->fw_config.dual_ulp_aware) { + defq_ring->ulp_num = BEISCSI_ULP0; + defq_ring->doorbell_offset = DB_RXULP0_OFFSET; + } else { + defq_ring->ulp_num = resp->ulp_num; + defq_ring->doorbell_offset = resp->doorbell_offset; + } } spin_unlock(&ctrl->mbox_lock); return status; } -int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, - struct be_queue_info *wrbq) +/** + * be_cmd_wrbq_create()- Create WRBQ + * @ctrl: ptr to ctrl_info + * @q_mem: memory details for the queue + * @wrbq: queue info + * @pwrb_context: ptr to wrb_context + * @ulp_num: ULP on which the WRBQ is to be created + * + * Create WRBQ on the passed ULP_NUM. + * + **/ +int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, + struct be_dma_mem *q_mem, + struct be_queue_info *wrbq, + struct hwi_wrb_context *pwrb_context, + uint8_t ulp_num) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_wrbq_create_req *req = embedded_payload(wrb); struct be_wrbq_create_resp *resp = embedded_payload(wrb); + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); int status; spin_lock(&ctrl->mbox_lock); @@ -1093,17 +1176,78 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req)); req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); + + if (phba->fw_config.dual_ulp_aware) { + req->ulp_num = ulp_num; + req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT); + req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT); + } + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify(ctrl); if (!status) { wrbq->id = le16_to_cpu(resp->cid); wrbq->created = true; + + pwrb_context->cid = wrbq->id; + if (!phba->fw_config.dual_ulp_aware) { + pwrb_context->doorbell_offset = DB_TXULP0_OFFSET; + pwrb_context->ulp_num = BEISCSI_ULP0; + } else { + pwrb_context->ulp_num = resp->ulp_num; + pwrb_context->doorbell_offset = resp->doorbell_offset; + } } spin_unlock(&ctrl->mbox_lock); return status; } +int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl, + struct be_dma_mem *q_mem) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_post_template_pages_req *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->mbox_lock); + + memset(wrb, 0, sizeof(*wrb)); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS, + sizeof(*req)); + + req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); + req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI; + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_notify(ctrl); + spin_unlock(&ctrl->mbox_lock); + return status; +} + +int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_remove_template_pages_req *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->mbox_lock); + + memset(wrb, 0, sizeof(*wrb)); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS, + sizeof(*req)); + + req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI; + + status = be_mbox_notify(ctrl); + spin_unlock(&ctrl->mbox_lock); + return status; +} + int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, u32 page_offset, u32 num_pages) diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 99073086dfe0..627ebbe0172c 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -40,6 +40,7 @@ struct be_mcc_wrb { u32 tag1; /* dword 3 */ u32 rsvd; /* dword 4 */ union { +#define EMBED_MBX_MAX_PAYLOAD_SIZE 220 u8 embedded_payload[236]; /* used by embedded cmds */ struct be_sge sgl[19]; /* used by non-embedded cmds */ } payload; @@ -162,6 +163,8 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_CQ_CREATE 12 #define OPCODE_COMMON_EQ_CREATE 13 #define OPCODE_COMMON_MCC_CREATE 21 +#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS 24 +#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS 25 #define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32 #define OPCODE_COMMON_GET_FW_VERSION 35 #define OPCODE_COMMON_MODIFY_EQ_DELAY 41 @@ -217,6 +220,10 @@ struct phys_addr { u32 hi; }; +struct virt_addr { + u32 lo; + u32 hi; +}; /************************** * BE Command definitions * **************************/ @@ -722,7 +729,13 @@ int be_mbox_notify(struct be_ctrl_info *ctrl); int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, struct be_queue_info *cq, struct be_queue_info *dq, int length, - int entry_size); + int entry_size, uint8_t is_header, + uint8_t ulp_num); + +int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl, + struct be_dma_mem *q_mem); + +int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl); int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, u32 page_offset, @@ -731,7 +744,9 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl, int beiscsi_cmd_reset_function(struct beiscsi_hba *phba); int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, - struct be_queue_info *wrbq); + struct be_queue_info *wrbq, + struct hwi_wrb_context *pwrb_context, + uint8_t ulp_num); bool is_link_state_evt(u32 trailer); @@ -776,7 +791,9 @@ struct be_defq_create_req { struct be_cmd_req_hdr hdr; u16 num_pages; u8 ulp_num; - u8 rsvd0; +#define BEISCSI_DUAL_ULP_AWARE_BIT 0 /* Byte 3 - Bit 0 */ +#define BEISCSI_BIND_Q_TO_ULP_BIT 1 /* Byte 3 - Bit 1 */ + u8 dua_feature; struct be_default_pdu_context context; struct phys_addr pages[8]; } __packed; @@ -784,6 +801,27 @@ struct be_defq_create_req { struct be_defq_create_resp { struct be_cmd_req_hdr hdr; u16 id; + u8 rsvd0; + u8 ulp_num; + u32 doorbell_offset; + u16 register_set; + u16 doorbell_format; +} __packed; + +struct be_post_template_pages_req { + struct be_cmd_req_hdr hdr; + u16 num_pages; +#define BEISCSI_TEMPLATE_HDR_TYPE_ISCSI 0x1 + u16 type; + struct phys_addr scratch_pa; + struct virt_addr scratch_va; + struct virt_addr pages_va; + struct phys_addr pages[16]; +} __packed; + +struct be_remove_template_pages_req { + struct be_cmd_req_hdr hdr; + u16 type; u16 rsvd0; } __packed; @@ -800,14 +838,18 @@ struct be_wrbq_create_req { struct be_cmd_req_hdr hdr; u16 num_pages; u8 ulp_num; - u8 rsvd0; + u8 dua_feature; struct phys_addr pages[8]; } __packed; struct be_wrbq_create_resp { struct be_cmd_resp_hdr resp_hdr; u16 cid; - u16 rsvd0; + u8 rsvd0; + u8 ulp_num; + u32 doorbell_offset; + u16 register_set; + u16 doorbell_format; } __packed; #define SOL_CID_MASK 0x0000FFC0 @@ -1002,6 +1044,7 @@ union tcp_upload_params { } __packed; struct be_ulp_fw_cfg { +#define BEISCSI_ULP_ISCSI_INI_MODE 0x10 u32 ulp_mode; u32 etx_base; u32 etx_count; @@ -1017,14 +1060,26 @@ struct be_ulp_fw_cfg { u32 icd_count; }; +struct be_ulp_chain_icd { + u32 chain_base; + u32 chain_count; +}; + struct be_fw_cfg { struct be_cmd_req_hdr hdr; u32 be_config_number; u32 asic_revision; u32 phys_port; +#define BEISCSI_FUNC_ISCSI_INI_MODE 0x10 +#define BEISCSI_FUNC_DUA_MODE 0x800 u32 function_mode; struct be_ulp_fw_cfg ulp[2]; u32 function_caps; + u32 cqid_base; + u32 cqid_count; + u32 eqid_base; + u32 eqid_count; + struct be_ulp_chain_icd chain_icd[2]; } __packed; struct be_cmd_get_all_if_id_req { diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index ef36be003f67..ffadbee0b4d9 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -58,10 +58,15 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep, } beiscsi_ep = ep->dd_data; phba = beiscsi_ep->phba; - shost = phba->shost; - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, - "BS_%d : In beiscsi_session_create\n"); + if (phba->state & BE_ADAPTER_PCI_ERR) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : PCI_ERROR Recovery\n"); + return NULL; + } else { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_session_create\n"); + } if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, @@ -74,6 +79,7 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep, cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn; } + shost = phba->shost; cls_session = iscsi_session_setup(&beiscsi_iscsi_transport, shost, cmds_max, sizeof(*beiscsi_sess), @@ -194,6 +200,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, struct beiscsi_conn *beiscsi_conn = conn->dd_data; struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct beiscsi_hba *phba = iscsi_host_priv(shost); + struct hwi_controller *phwi_ctrlr = phba->phwi_ctrlr; + struct hwi_wrb_context *pwrb_context; struct beiscsi_endpoint *beiscsi_ep; struct iscsi_endpoint *ep; @@ -214,9 +222,13 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, return -EEXIST; } + pwrb_context = &phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID( + beiscsi_ep->ep_cid)]; + beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid; beiscsi_conn->ep = beiscsi_ep; beiscsi_ep->conn = beiscsi_conn; + beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n", @@ -265,13 +277,17 @@ static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba) void beiscsi_create_def_ifaces(struct beiscsi_hba *phba) { - struct be_cmd_get_if_info_resp if_info; + struct be_cmd_get_if_info_resp *if_info; - if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) + if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) { beiscsi_create_ipv4_iface(phba); + kfree(if_info); + } - if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) + if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) { beiscsi_create_ipv6_iface(phba); + kfree(if_info); + } } void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba) @@ -467,6 +483,12 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost, uint32_t rm_len = dt_len; int ret = 0 ; + if (phba->state & BE_ADAPTER_PCI_ERR) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : In PCI_ERROR Recovery\n"); + return -EBUSY; + } + nla_for_each_attr(attrib, data, dt_len, rm_len) { iface_param = nla_data(attrib); @@ -512,59 +534,60 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba, struct iscsi_iface *iface, int param, char *buf) { - struct be_cmd_get_if_info_resp if_info; + struct be_cmd_get_if_info_resp *if_info; int len, ip_type = BE2_IPV4; - memset(&if_info, 0, sizeof(if_info)); - if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) ip_type = BE2_IPV6; len = mgmt_get_if_info(phba, ip_type, &if_info); - if (len) + if (len) { + kfree(if_info); return len; + } switch (param) { case ISCSI_NET_PARAM_IPV4_ADDR: - len = sprintf(buf, "%pI4\n", &if_info.ip_addr.addr); + len = sprintf(buf, "%pI4\n", if_info->ip_addr.addr); break; case ISCSI_NET_PARAM_IPV6_ADDR: - len = sprintf(buf, "%pI6\n", &if_info.ip_addr.addr); + len = sprintf(buf, "%pI6\n", if_info->ip_addr.addr); break; case ISCSI_NET_PARAM_IPV4_BOOTPROTO: - if (!if_info.dhcp_state) + if (!if_info->dhcp_state) len = sprintf(buf, "static\n"); else len = sprintf(buf, "dhcp\n"); break; case ISCSI_NET_PARAM_IPV4_SUBNET: - len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask); + len = sprintf(buf, "%pI4\n", if_info->ip_addr.subnet_mask); break; case ISCSI_NET_PARAM_VLAN_ENABLED: len = sprintf(buf, "%s\n", - (if_info.vlan_priority == BEISCSI_VLAN_DISABLE) + (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) ? "Disabled\n" : "Enabled\n"); break; case ISCSI_NET_PARAM_VLAN_ID: - if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE) + if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) return -EINVAL; else len = sprintf(buf, "%d\n", - (if_info.vlan_priority & + (if_info->vlan_priority & ISCSI_MAX_VLAN_ID)); break; case ISCSI_NET_PARAM_VLAN_PRIORITY: - if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE) + if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) return -EINVAL; else len = sprintf(buf, "%d\n", - ((if_info.vlan_priority >> 13) & + ((if_info->vlan_priority >> 13) & ISCSI_MAX_VLAN_PRIORITY)); break; default: WARN_ON(1); } + kfree(if_info); return len; } @@ -577,6 +600,12 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface, struct be_cmd_get_def_gateway_resp gateway; int len = -ENOSYS; + if (phba->state & BE_ADAPTER_PCI_ERR) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : In PCI_ERROR Recovery\n"); + return -EBUSY; + } + switch (param) { case ISCSI_NET_PARAM_IPV4_ADDR: case ISCSI_NET_PARAM_IPV4_SUBNET: @@ -672,8 +701,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn, session->max_burst = 262144; break; case ISCSI_PARAM_MAX_XMIT_DLENGTH: - if ((conn->max_xmit_dlength > 65536) || - (conn->max_xmit_dlength == 0)) + if (conn->max_xmit_dlength > 65536) conn->max_xmit_dlength = 65536; default: return 0; @@ -727,7 +755,7 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost) struct beiscsi_hba *phba = iscsi_host_priv(shost); struct iscsi_cls_host *ihost = shost->shost_data; - ihost->port_state = (phba->state == BE_ADAPTER_UP) ? + ihost->port_state = (phba->state == BE_ADAPTER_LINK_UP) ? ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN; } @@ -795,9 +823,16 @@ int beiscsi_get_host_param(struct Scsi_Host *shost, struct beiscsi_hba *phba = iscsi_host_priv(shost); int status = 0; - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, - "BS_%d : In beiscsi_get_host_param," - " param= %d\n", param); + + if (phba->state & BE_ADAPTER_PCI_ERR) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : In PCI_ERROR Recovery\n"); + return -EBUSY; + } else { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_get_host_param," + " param = %d\n", param); + } switch (param) { case ISCSI_HOST_PARAM_HWADDRESS: @@ -840,7 +875,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) struct be_cmd_get_nic_conf_resp resp; int rc; - if (strlen(phba->mac_address)) + if (phba->mac_addr_set) return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); memset(&resp, 0, sizeof(resp)); @@ -848,6 +883,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) if (rc) return rc; + phba->mac_addr_set = true; memcpy(phba->mac_address, resp.mac_address, ETH_ALEN); return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); } @@ -923,6 +959,10 @@ static void beiscsi_set_params_for_offld(struct beiscsi_conn *beiscsi_conn, session->max_r2t); AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params, (conn->exp_statsn - 1)); + AMAP_SET_BITS(struct amap_beiscsi_offload_params, + max_recv_data_segment_length, params, + conn->max_recv_dlength); + } /** @@ -935,10 +975,19 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn) struct beiscsi_conn *beiscsi_conn = conn->dd_data; struct beiscsi_endpoint *beiscsi_ep; struct beiscsi_offload_params params; + struct beiscsi_hba *phba; - beiscsi_log(beiscsi_conn->phba, KERN_INFO, - BEISCSI_LOG_CONFIG, - "BS_%d : In beiscsi_conn_start\n"); + phba = ((struct beiscsi_conn *)conn->dd_data)->phba; + + if (phba->state & BE_ADAPTER_PCI_ERR) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : In PCI_ERROR Recovery\n"); + return -EBUSY; + } else { + beiscsi_log(beiscsi_conn->phba, KERN_INFO, + BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_conn_start\n"); + } memset(¶ms, 0, sizeof(struct beiscsi_offload_params)); beiscsi_ep = beiscsi_conn->ep; @@ -960,15 +1009,31 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn) */ static int beiscsi_get_cid(struct beiscsi_hba *phba) { - unsigned short cid = 0xFFFF; - - if (!phba->avlbl_cids) - return cid; - - cid = phba->cid_array[phba->cid_alloc++]; - if (phba->cid_alloc == phba->params.cxns_per_ctrl) - phba->cid_alloc = 0; - phba->avlbl_cids--; + unsigned short cid = 0xFFFF, cid_from_ulp; + struct ulp_cid_info *cid_info = NULL; + uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1; + + /* Find the ULP which has more CID available */ + cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ? + BEISCSI_ULP0_AVLBL_CID(phba) : 0; + cid_avlbl_ulp1 = (phba->cid_array_info[BEISCSI_ULP1]) ? + BEISCSI_ULP1_AVLBL_CID(phba) : 0; + cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ? + BEISCSI_ULP0 : BEISCSI_ULP1; + + if (test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) { + cid_info = phba->cid_array_info[cid_from_ulp]; + if (!cid_info->avlbl_cids) + return cid; + + cid = cid_info->cid_array[cid_info->cid_alloc++]; + + if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT( + phba, cid_from_ulp)) + cid_info->cid_alloc = 0; + + cid_info->avlbl_cids--; + } return cid; } @@ -979,10 +1044,22 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba) */ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid) { - phba->avlbl_cids++; - phba->cid_array[phba->cid_free++] = cid; - if (phba->cid_free == phba->params.cxns_per_ctrl) - phba->cid_free = 0; + uint16_t cid_post_ulp; + struct hwi_controller *phwi_ctrlr; + struct hwi_wrb_context *pwrb_context; + struct ulp_cid_info *cid_info = NULL; + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); + + phwi_ctrlr = phba->phwi_ctrlr; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + cid_post_ulp = pwrb_context->ulp_num; + + cid_info = phba->cid_array_info[cid_post_ulp]; + cid_info->avlbl_cids++; + + cid_info->cid_array[cid_info->cid_free++] = cid; + if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp)) + cid_info->cid_free = 0; } /** @@ -1135,7 +1212,12 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, return ERR_PTR(ret); } - if (phba->state != BE_ADAPTER_UP) { + if (phba->state & BE_ADAPTER_PCI_ERR) { + ret = -EBUSY; + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : In PCI_ERROR Recovery\n"); + return ERR_PTR(ret); + } else if (phba->state & BE_ADAPTER_LINK_DOWN) { ret = -EBUSY; beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BS_%d : The Adapter Port state is Down!!!\n"); @@ -1260,6 +1342,12 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep) tcp_upload_flag = CONNECTION_UPLOAD_ABORT; } + if (phba->state & BE_ADAPTER_PCI_ERR) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : PCI_ERROR Recovery\n"); + goto free_ep; + } + tag = mgmt_invalidate_connection(phba, beiscsi_ep, beiscsi_ep->ep_cid, mgmt_invalidate_flag, @@ -1272,6 +1360,7 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep) beiscsi_mccq_compl(phba, tag, NULL, NULL); beiscsi_close_conn(beiscsi_ep, tcp_upload_flag); +free_ep: beiscsi_free_ep(beiscsi_ep); beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid); iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep); diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index a1f5ac7a9806..1f375051483a 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -149,18 +149,25 @@ BEISCSI_RW_ATTR(log_enable, 0x00, "\t\t\t\tMiscellaneous Events : 0x04\n" "\t\t\t\tError Handling : 0x08\n" "\t\t\t\tIO Path Events : 0x10\n" - "\t\t\t\tConfiguration Path : 0x20\n"); + "\t\t\t\tConfiguration Path : 0x20\n" + "\t\t\t\tiSCSI Protocol : 0x40\n"); DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); -DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL); +DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); +DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, + beiscsi_active_session_disp, NULL); +DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, + beiscsi_free_session_disp, NULL); struct device_attribute *beiscsi_attrs[] = { &dev_attr_beiscsi_log_enable, &dev_attr_beiscsi_drvr_ver, &dev_attr_beiscsi_adapter_family, &dev_attr_beiscsi_fw_ver, - &dev_attr_beiscsi_active_cid_count, + &dev_attr_beiscsi_active_session_count, + &dev_attr_beiscsi_free_session_count, + &dev_attr_beiscsi_phys_port, NULL, }; @@ -239,6 +246,11 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc) return SUCCESS; } spin_unlock_bh(&session->lock); + /* Invalidate WRB Posted for this Task */ + AMAP_SET_BITS(struct amap_iscsi_wrb, invld, + aborted_io_task->pwrb_handle->pwrb, + 1); + conn = aborted_task->conn; beiscsi_conn = conn->dd_data; phba = beiscsi_conn->phba; @@ -316,6 +328,11 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) if (abrt_task->sc->device->lun != abrt_task->sc->device->lun) continue; + /* Invalidate WRB Posted for this Task */ + AMAP_SET_BITS(struct amap_iscsi_wrb, invld, + abrt_io_task->pwrb_handle->pwrb, + 1); + inv_tbl->cid = cid; inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index; num_invalidate++; @@ -699,30 +716,85 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) return status; } +/** + * beiscsi_get_params()- Set the config paramters + * @phba: ptr device priv structure + **/ static void beiscsi_get_params(struct beiscsi_hba *phba) { - phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count - - (phba->fw_config.iscsi_cid_count - + BE2_TMFS - + BE2_NOPOUT_REQ)); - phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; - phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count; - phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count; + uint32_t total_cid_count = 0; + uint32_t total_icd_count = 0; + uint8_t ulp_num = 0; + + total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + + BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + uint32_t align_mask = 0; + uint32_t icd_post_per_page = 0; + uint32_t icd_count_unavailable = 0; + uint32_t icd_start = 0, icd_count = 0; + uint32_t icd_start_align = 0, icd_count_align = 0; + + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; + icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; + + /* Get ICD count that can be posted on each page */ + icd_post_per_page = (PAGE_SIZE / (BE2_SGE * + sizeof(struct iscsi_sge))); + align_mask = (icd_post_per_page - 1); + + /* Check if icd_start is aligned ICD per page posting */ + if (icd_start % icd_post_per_page) { + icd_start_align = ((icd_start + + icd_post_per_page) & + ~(align_mask)); + phba->fw_config. + iscsi_icd_start[ulp_num] = + icd_start_align; + } + + icd_count_align = (icd_count & ~align_mask); + + /* ICD discarded in the process of alignment */ + if (icd_start_align) + icd_count_unavailable = ((icd_start_align - + icd_start) + + (icd_count - + icd_count_align)); + + /* Updated ICD count available */ + phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - + icd_count_unavailable); + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : Aligned ICD values\n" + "\t ICD Start : %d\n" + "\t ICD Count : %d\n" + "\t ICD Discarded : %d\n", + phba->fw_config. + iscsi_icd_start[ulp_num], + phba->fw_config. + iscsi_icd_count[ulp_num], + icd_count_unavailable); + break; + } + } + + total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; + phba->params.ios_per_ctrl = (total_icd_count - + (total_cid_count + + BE2_TMFS + BE2_NOPOUT_REQ)); + phba->params.cxns_per_ctrl = total_cid_count; + phba->params.asyncpdus_per_ctrl = total_cid_count; + phba->params.icds_per_ctrl = total_icd_count; phba->params.num_sge_per_io = BE2_SGE; phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; phba->params.eq_timer = 64; - phba->params.num_eq_entries = - (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 - + BE2_TMFS) / 512) + 1) * 512; - phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024) - ? 1024 : phba->params.num_eq_entries; - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : phba->params.num_eq_entries=%d\n", - phba->params.num_eq_entries); - phba->params.num_cq_entries = - (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 - + BE2_TMFS) / 512) + 1) * 512; + phba->params.num_eq_entries = 1024; + phba->params.num_cq_entries = 1024; phba->params.wrbs_per_cxn = 256; } @@ -1613,8 +1685,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba, WARN_ON(!pasync_handle); - pasync_handle->cri = - BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); + pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); pasync_handle->is_header = is_header; pasync_handle->buffer_len = dpl; *pcq_index = index; @@ -1674,18 +1746,13 @@ hwi_update_async_writables(struct beiscsi_hba *phba, } static void hwi_free_async_msg(struct beiscsi_hba *phba, - unsigned int cri) + struct hwi_async_pdu_context *pasync_ctx, + unsigned int cri) { - struct hwi_controller *phwi_ctrlr; - struct hwi_async_pdu_context *pasync_ctx; struct async_pdu_handle *pasync_handle, *tmp_handle; struct list_head *plist; - phwi_ctrlr = phba->phwi_ctrlr; - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); - plist = &pasync_ctx->async_entry[cri].wait_queue.list; - list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) { list_del(&pasync_handle->link); @@ -1720,7 +1787,7 @@ hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx, } static void hwi_post_async_buffers(struct beiscsi_hba *phba, - unsigned int is_header) + unsigned int is_header, uint8_t ulp_num) { struct hwi_controller *phwi_ctrlr; struct hwi_async_pdu_context *pasync_ctx; @@ -1728,13 +1795,13 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba, struct list_head *pfree_link, *pbusy_list; struct phys_addr *pasync_sge; unsigned int ring_id, num_entries; - unsigned int host_write_num; + unsigned int host_write_num, doorbell_offset; unsigned int writables; unsigned int i = 0; u32 doorbell = 0; phwi_ctrlr = phba->phwi_ctrlr; - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); num_entries = pasync_ctx->num_entries; if (is_header) { @@ -1742,13 +1809,17 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba, pasync_ctx->async_header.free_entries); pfree_link = pasync_ctx->async_header.free_list.next; host_write_num = pasync_ctx->async_header.host_write_ptr; - ring_id = phwi_ctrlr->default_pdu_hdr.id; + ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; + doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. + doorbell_offset; } else { writables = min(pasync_ctx->async_data.writables, pasync_ctx->async_data.free_entries); pfree_link = pasync_ctx->async_data.free_list.next; host_write_num = pasync_ctx->async_data.host_write_ptr; - ring_id = phwi_ctrlr->default_pdu_data.id; + ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; + doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. + doorbell_offset; } writables = (writables / 8) * 8; @@ -1796,7 +1867,7 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba, doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT; - iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET); + iowrite32(doorbell, phba->db_va + doorbell_offset); } } @@ -1808,9 +1879,13 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba, struct hwi_async_pdu_context *pasync_ctx; struct async_pdu_handle *pasync_handle = NULL; unsigned int cq_index = -1; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); phwi_ctrlr = phba->phwi_ctrlr; - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, + BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, + cri_index)); pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, pdpdu_cqe, &cq_index); @@ -1819,8 +1894,10 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba, hwi_update_async_writables(phba, pasync_ctx, pasync_handle->is_header, cq_index); - hwi_free_async_msg(phba, pasync_handle->cri); - hwi_post_async_buffers(phba, pasync_handle->is_header); + hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri); + hwi_post_async_buffers(phba, pasync_handle->is_header, + BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, + cri_index)); } static unsigned int @@ -1859,7 +1936,7 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn, phdr, hdr_len, pfirst_buffer, offset); - hwi_free_async_msg(phba, cri); + hwi_free_async_msg(phba, pasync_ctx, cri); return 0; } @@ -1875,13 +1952,16 @@ hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn, struct pdu_base *ppdu; phwi_ctrlr = phba->phwi_ctrlr; - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, + BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, + BE_GET_CRI_FROM_CID(beiscsi_conn-> + beiscsi_conn_cid))); list_del(&pasync_handle->link); if (pasync_handle->is_header) { pasync_ctx->async_header.busy_entries--; if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { - hwi_free_async_msg(phba, cri); + hwi_free_async_msg(phba, pasync_ctx, cri); BUG(); } @@ -1936,9 +2016,14 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn, struct hwi_async_pdu_context *pasync_ctx; struct async_pdu_handle *pasync_handle = NULL; unsigned int cq_index = -1; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); phwi_ctrlr = phba->phwi_ctrlr; - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, + BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, + cri_index)); + pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, pdpdu_cqe, &cq_index); @@ -1947,7 +2032,9 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn, pasync_handle->is_header, cq_index); hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle); - hwi_post_async_buffers(phba, pasync_handle->is_header); + hwi_post_async_buffers(phba, pasync_handle->is_header, + BEISCSI_GET_ULP_FROM_CRI( + phwi_ctrlr, cri_index)); } static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) @@ -2072,8 +2159,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) "BM_%d : Received %s[%d] on CID : %d\n", cqe_desc[code], code, cid); + spin_lock_bh(&phba->async_pdu_lock); hwi_process_default_pdu_ring(beiscsi_conn, phba, (struct i_t_dpdu_cqe *)sol); + spin_unlock_bh(&phba->async_pdu_lock); break; case UNSOL_DATA_NOTIFY: beiscsi_log(phba, KERN_INFO, @@ -2081,8 +2170,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) "BM_%d : Received %s[%d] on CID : %d\n", cqe_desc[code], code, cid); + spin_lock_bh(&phba->async_pdu_lock); hwi_process_default_pdu_ring(beiscsi_conn, phba, (struct i_t_dpdu_cqe *)sol); + spin_unlock_bh(&phba->async_pdu_lock); break; case CXN_INVALIDATE_INDEX_NOTIFY: case CMD_INVALIDATED_NOTIFY: @@ -2110,8 +2201,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", cqe_desc[code], code, cid); + spin_lock_bh(&phba->async_pdu_lock); hwi_flush_default_pdu_buffer(phba, beiscsi_conn, (struct i_t_dpdu_cqe *) sol); + spin_unlock_bh(&phba->async_pdu_lock); break; case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: case CXN_KILLED_BURST_LEN_MISMATCH: @@ -2476,26 +2569,19 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); } +/** + * beiscsi_find_mem_req()- Find mem needed + * @phba: ptr to HBA struct + **/ static void beiscsi_find_mem_req(struct beiscsi_hba *phba) { + uint8_t mem_descr_index, ulp_num; unsigned int num_cq_pages, num_async_pdu_buf_pages; unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ sizeof(struct sol_cqe)); - num_async_pdu_buf_pages = - PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ - phba->params.defpdu_hdr_sz); - num_async_pdu_buf_sgl_pages = - PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ - sizeof(struct phys_addr)); - num_async_pdu_data_pages = - PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ - phba->params.defpdu_data_sz); - num_async_pdu_data_sgl_pages = - PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ - sizeof(struct phys_addr)); phba->params.hwi_ws_sz = sizeof(struct hwi_controller); @@ -2517,24 +2603,79 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba) phba->params.icds_per_ctrl; phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * phba->params.icds_per_ctrl; - - phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] = - num_async_pdu_buf_pages * PAGE_SIZE; - phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] = - num_async_pdu_data_pages * PAGE_SIZE; - phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] = - num_async_pdu_buf_sgl_pages * PAGE_SIZE; - phba->mem_req[HWI_MEM_ASYNC_DATA_RING] = - num_async_pdu_data_sgl_pages * PAGE_SIZE; - phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] = - phba->params.asyncpdus_per_ctrl * - sizeof(struct async_pdu_handle); - phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] = - phba->params.asyncpdus_per_ctrl * - sizeof(struct async_pdu_handle); - phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] = - sizeof(struct hwi_async_pdu_context) + - (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry)); + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + + num_async_pdu_buf_sgl_pages = + PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( + phba, ulp_num) * + sizeof(struct phys_addr)); + + num_async_pdu_buf_pages = + PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( + phba, ulp_num) * + phba->params.defpdu_hdr_sz); + + num_async_pdu_data_pages = + PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( + phba, ulp_num) * + phba->params.defpdu_data_sz); + + num_async_pdu_data_sgl_pages = + PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( + phba, ulp_num) * + sizeof(struct phys_addr)); + + mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + BEISCSI_GET_CID_COUNT(phba, ulp_num) * + BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + num_async_pdu_buf_pages * + PAGE_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + num_async_pdu_data_pages * + PAGE_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + num_async_pdu_buf_sgl_pages * + PAGE_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + num_async_pdu_data_sgl_pages * + PAGE_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + BEISCSI_GET_CID_COUNT(phba, ulp_num) * + sizeof(struct async_pdu_handle); + + mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + BEISCSI_GET_CID_COUNT(phba, ulp_num) * + sizeof(struct async_pdu_handle); + + mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + sizeof(struct hwi_async_pdu_context) + + (BEISCSI_GET_CID_COUNT(phba, ulp_num) * + sizeof(struct hwi_async_entry)); + } + } } static int beiscsi_alloc_mem(struct beiscsi_hba *phba) @@ -2576,6 +2717,12 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba) mem_descr = phba->init_mem; for (i = 0; i < SE_MEM_MAX; i++) { + if (!phba->mem_req[i]) { + mem_descr->mem_array = NULL; + mem_descr++; + continue; + } + j = 0; mem_arr = mem_arr_orig; alloc_size = phba->mem_req[i]; @@ -2697,7 +2844,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) /* Allocate memory for WRBQ */ phwi_ctxt = phwi_ctrlr->phwi_ctxt; phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) * - phba->fw_config.iscsi_cid_count, + phba->params.cxns_per_ctrl, GFP_KERNEL); if (!phwi_ctxt->be_wrbq) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, @@ -2779,6 +2926,7 @@ init_wrb_hndl_failed: static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) { + uint8_t ulp_num; struct hwi_controller *phwi_ctrlr; struct hba_parameters *p = &phba->params; struct hwi_async_pdu_context *pasync_ctx; @@ -2786,155 +2934,150 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) unsigned int index, idx, num_per_mem, num_async_data; struct be_mem_descriptor *mem_descr; - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT; - - phwi_ctrlr = phba->phwi_ctrlr; - phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *) + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = + (struct hwi_async_pdu_context *) + mem_descr->mem_array[0].virtual_address; + + pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; + memset(pasync_ctx, 0, sizeof(*pasync_ctx)); + + pasync_ctx->async_entry = + (struct hwi_async_entry *) + ((long unsigned int)pasync_ctx + + sizeof(struct hwi_async_pdu_context)); + + pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba, + ulp_num); + pasync_ctx->buffer_size = p->defpdu_hdr_sz; + + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + pasync_ctx->async_header.va_base = mem_descr->mem_array[0].virtual_address; - pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; - memset(pasync_ctx, 0, sizeof(*pasync_ctx)); - pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) * - phba->fw_config.iscsi_cid_count, - GFP_KERNEL); - if (!pasync_ctx->async_entry) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n"); - return -ENOMEM; - } - - pasync_ctx->num_entries = p->asyncpdus_per_ctrl; - pasync_ctx->buffer_size = p->defpdu_hdr_sz; - - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_HEADER_BUF; - if (mem_descr->mem_array[0].virtual_address) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : hwi_init_async_pdu_ctx" - " HWI_MEM_ASYNC_HEADER_BUF va=%p\n", - mem_descr->mem_array[0].virtual_address); - } else - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); - - pasync_ctx->async_header.va_base = - mem_descr->mem_array[0].virtual_address; - - pasync_ctx->async_header.pa_base.u.a64.address = - mem_descr->mem_array[0].bus_address.u.a64.address; - - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_HEADER_RING; - if (mem_descr->mem_array[0].virtual_address) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : hwi_init_async_pdu_ctx" - " HWI_MEM_ASYNC_HEADER_RING va=%p\n", - mem_descr->mem_array[0].virtual_address); - } else - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); - - pasync_ctx->async_header.ring_base = - mem_descr->mem_array[0].virtual_address; - - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE; - if (mem_descr->mem_array[0].virtual_address) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : hwi_init_async_pdu_ctx" - " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n", - mem_descr->mem_array[0].virtual_address); - } else - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); - - pasync_ctx->async_header.handle_base = - mem_descr->mem_array[0].virtual_address; - pasync_ctx->async_header.writables = 0; - INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); - - - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_DATA_RING; - if (mem_descr->mem_array[0].virtual_address) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : hwi_init_async_pdu_ctx" - " HWI_MEM_ASYNC_DATA_RING va=%p\n", - mem_descr->mem_array[0].virtual_address); - } else - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); - - pasync_ctx->async_data.ring_base = - mem_descr->mem_array[0].virtual_address; - - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_DATA_HANDLE; - if (!mem_descr->mem_array[0].virtual_address) - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); + pasync_ctx->async_header.pa_base.u.a64.address = + mem_descr->mem_array[0]. + bus_address.u.a64.address; - pasync_ctx->async_data.handle_base = - mem_descr->mem_array[0].virtual_address; - pasync_ctx->async_data.writables = 0; - INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + pasync_ctx->async_header.ring_base = + mem_descr->mem_array[0].virtual_address; - pasync_header_h = - (struct async_pdu_handle *)pasync_ctx->async_header.handle_base; - pasync_data_h = - (struct async_pdu_handle *)pasync_ctx->async_data.handle_base; + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + pasync_ctx->async_header.handle_base = + mem_descr->mem_array[0].virtual_address; + pasync_ctx->async_header.writables = 0; + INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); + + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + pasync_ctx->async_data.ring_base = + mem_descr->mem_array[0].virtual_address; - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_DATA_BUF; - if (mem_descr->mem_array[0].virtual_address) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : hwi_init_async_pdu_ctx" - " HWI_MEM_ASYNC_DATA_BUF va=%p\n", - mem_descr->mem_array[0].virtual_address); - } else - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (!mem_descr->mem_array[0].virtual_address) + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); - idx = 0; - pasync_ctx->async_data.va_base = - mem_descr->mem_array[idx].virtual_address; - pasync_ctx->async_data.pa_base.u.a64.address = - mem_descr->mem_array[idx].bus_address.u.a64.address; - - num_async_data = ((mem_descr->mem_array[idx].size) / - phba->params.defpdu_data_sz); - num_per_mem = 0; - - for (index = 0; index < p->asyncpdus_per_ctrl; index++) { - pasync_header_h->cri = -1; - pasync_header_h->index = (char)index; - INIT_LIST_HEAD(&pasync_header_h->link); - pasync_header_h->pbuffer = - (void *)((unsigned long) - (pasync_ctx->async_header.va_base) + - (p->defpdu_hdr_sz * index)); - - pasync_header_h->pa.u.a64.address = - pasync_ctx->async_header.pa_base.u.a64.address + - (p->defpdu_hdr_sz * index); - - list_add_tail(&pasync_header_h->link, - &pasync_ctx->async_header.free_list); - pasync_header_h++; - pasync_ctx->async_header.free_entries++; - pasync_ctx->async_header.writables++; - - INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list); - INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. - header_busy_list); - pasync_data_h->cri = -1; - pasync_data_h->index = (char)index; - INIT_LIST_HEAD(&pasync_data_h->link); - - if (!num_async_data) { - num_per_mem = 0; - idx++; + pasync_ctx->async_data.handle_base = + mem_descr->mem_array[0].virtual_address; + pasync_ctx->async_data.writables = 0; + INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); + + pasync_header_h = + (struct async_pdu_handle *) + pasync_ctx->async_header.handle_base; + pasync_data_h = + (struct async_pdu_handle *) + pasync_ctx->async_data.handle_base; + + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + idx = 0; pasync_ctx->async_data.va_base = mem_descr->mem_array[idx].virtual_address; pasync_ctx->async_data.pa_base.u.a64.address = @@ -2943,32 +3086,83 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) num_async_data = ((mem_descr->mem_array[idx].size) / phba->params.defpdu_data_sz); - } - pasync_data_h->pbuffer = - (void *)((unsigned long) - (pasync_ctx->async_data.va_base) + - (p->defpdu_data_sz * num_per_mem)); - - pasync_data_h->pa.u.a64.address = - pasync_ctx->async_data.pa_base.u.a64.address + - (p->defpdu_data_sz * num_per_mem); - num_per_mem++; - num_async_data--; + num_per_mem = 0; - list_add_tail(&pasync_data_h->link, - &pasync_ctx->async_data.free_list); - pasync_data_h++; - pasync_ctx->async_data.free_entries++; - pasync_ctx->async_data.writables++; + for (index = 0; index < BEISCSI_GET_CID_COUNT + (phba, ulp_num); index++) { + pasync_header_h->cri = -1; + pasync_header_h->index = (char)index; + INIT_LIST_HEAD(&pasync_header_h->link); + pasync_header_h->pbuffer = + (void *)((unsigned long) + (pasync_ctx-> + async_header.va_base) + + (p->defpdu_hdr_sz * index)); + + pasync_header_h->pa.u.a64.address = + pasync_ctx->async_header.pa_base.u.a64. + address + (p->defpdu_hdr_sz * index); + + list_add_tail(&pasync_header_h->link, + &pasync_ctx->async_header. + free_list); + pasync_header_h++; + pasync_ctx->async_header.free_entries++; + pasync_ctx->async_header.writables++; + + INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. + wait_queue.list); + INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. + header_busy_list); + pasync_data_h->cri = -1; + pasync_data_h->index = (char)index; + INIT_LIST_HEAD(&pasync_data_h->link); + + if (!num_async_data) { + num_per_mem = 0; + idx++; + pasync_ctx->async_data.va_base = + mem_descr->mem_array[idx]. + virtual_address; + pasync_ctx->async_data.pa_base.u. + a64.address = + mem_descr->mem_array[idx]. + bus_address.u.a64.address; + num_async_data = + ((mem_descr->mem_array[idx]. + size) / + phba->params.defpdu_data_sz); + } + pasync_data_h->pbuffer = + (void *)((unsigned long) + (pasync_ctx->async_data.va_base) + + (p->defpdu_data_sz * num_per_mem)); + + pasync_data_h->pa.u.a64.address = + pasync_ctx->async_data.pa_base.u.a64. + address + (p->defpdu_data_sz * + num_per_mem); + num_per_mem++; + num_async_data--; + + list_add_tail(&pasync_data_h->link, + &pasync_ctx->async_data. + free_list); + pasync_data_h++; + pasync_ctx->async_data.free_entries++; + pasync_ctx->async_data.writables++; + + INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. + data_busy_list); + } - INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list); + pasync_ctx->async_header.host_write_ptr = 0; + pasync_ctx->async_header.ep_read_ptr = -1; + pasync_ctx->async_data.host_write_ptr = 0; + pasync_ctx->async_data.ep_read_ptr = -1; + } } - pasync_ctx->async_header.host_write_ptr = 0; - pasync_ctx->async_header.ep_read_ptr = -1; - pasync_ctx->async_data.host_write_ptr = 0; - pasync_ctx->async_data.ep_read_ptr = -1; - return 0; } @@ -3164,7 +3358,7 @@ static int beiscsi_create_def_hdr(struct beiscsi_hba *phba, struct hwi_context_memory *phwi_context, struct hwi_controller *phwi_ctrlr, - unsigned int def_pdu_ring_sz) + unsigned int def_pdu_ring_sz, uint8_t ulp_num) { unsigned int idx; int ret; @@ -3174,36 +3368,42 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba, void *dq_vaddress; idx = 0; - dq = &phwi_context->be_def_hdrq; + dq = &phwi_context->be_def_hdrq[ulp_num]; cq = &phwi_context->be_cq[0]; mem = &dq->dma_mem; mem_descr = phba->init_mem; - mem_descr += HWI_MEM_ASYNC_HEADER_RING; + mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); dq_vaddress = mem_descr->mem_array[idx].virtual_address; ret = be_fill_queue(dq, mem_descr->mem_array[0].size / sizeof(struct phys_addr), sizeof(struct phys_addr), dq_vaddress); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : be_fill_queue Failed for DEF PDU HDR\n"); + "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", + ulp_num); + return ret; } mem->dma = (unsigned long)mem_descr->mem_array[idx]. bus_address.u.a64.address; ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, def_pdu_ring_sz, - phba->params.defpdu_hdr_sz); + phba->params.defpdu_hdr_sz, + BEISCSI_DEFQ_HDR, ulp_num); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n"); + "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", + ulp_num); + return ret; } - phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id; - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : iscsi def pdu id is %d\n", - phwi_context->be_def_hdrq.id); - hwi_post_async_buffers(phba, 1); + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", + ulp_num, + phwi_context->be_def_hdrq[ulp_num].id); + hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num); return 0; } @@ -3211,7 +3411,7 @@ static int beiscsi_create_def_data(struct beiscsi_hba *phba, struct hwi_context_memory *phwi_context, struct hwi_controller *phwi_ctrlr, - unsigned int def_pdu_ring_sz) + unsigned int def_pdu_ring_sz, uint8_t ulp_num) { unsigned int idx; int ret; @@ -3221,43 +3421,86 @@ beiscsi_create_def_data(struct beiscsi_hba *phba, void *dq_vaddress; idx = 0; - dataq = &phwi_context->be_def_dataq; + dataq = &phwi_context->be_def_dataq[ulp_num]; cq = &phwi_context->be_cq[0]; mem = &dataq->dma_mem; mem_descr = phba->init_mem; - mem_descr += HWI_MEM_ASYNC_DATA_RING; + mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); dq_vaddress = mem_descr->mem_array[idx].virtual_address; ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / sizeof(struct phys_addr), sizeof(struct phys_addr), dq_vaddress); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : be_fill_queue Failed for DEF PDU DATA\n"); + "BM_%d : be_fill_queue Failed for DEF PDU " + "DATA on ULP : %d\n", + ulp_num); + return ret; } mem->dma = (unsigned long)mem_descr->mem_array[idx]. bus_address.u.a64.address; ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, def_pdu_ring_sz, - phba->params.defpdu_data_sz); + phba->params.defpdu_data_sz, + BEISCSI_DEFQ_DATA, ulp_num); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d be_cmd_create_default_pdu_queue" - " Failed for DEF PDU DATA\n"); + " Failed for DEF PDU DATA on ULP : %d\n", + ulp_num); return ret; } - phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : iscsi def data id is %d\n", - phwi_context->be_def_dataq.id); + "BM_%d : iscsi def data id on ULP : %d is %d\n", + ulp_num, + phwi_context->be_def_dataq[ulp_num].id); - hwi_post_async_buffers(phba, 0); + hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : DEFAULT PDU DATA RING CREATED\n"); + "BM_%d : DEFAULT PDU DATA RING CREATED" + "on ULP : %d\n", ulp_num); return 0; } + +static int +beiscsi_post_template_hdr(struct beiscsi_hba *phba) +{ + struct be_mem_descriptor *mem_descr; + struct mem_array *pm_arr; + struct be_dma_mem sgl; + int status, ulp_num; + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + pm_arr = mem_descr->mem_array; + + hwi_build_be_sgl_arr(phba, pm_arr, &sgl); + status = be_cmd_iscsi_post_template_hdr( + &phba->ctrl, &sgl); + + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Post Template HDR Failed for" + "ULP_%d\n", ulp_num); + return status; + } + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : Template HDR Pages Posted for" + "ULP_%d\n", ulp_num); + } + } + return 0; +} + static int beiscsi_post_pages(struct beiscsi_hba *phba) { @@ -3265,14 +3508,18 @@ beiscsi_post_pages(struct beiscsi_hba *phba) struct mem_array *pm_arr; unsigned int page_offset, i; struct be_dma_mem sgl; - int status; + int status, ulp_num = 0; mem_descr = phba->init_mem; mem_descr += HWI_MEM_SGE; pm_arr = mem_descr->mem_array; + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) + break; + page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * - phba->fw_config.iscsi_icd_start) / PAGE_SIZE; + phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE; for (i = 0; i < mem_descr->num_elements; i++) { hwi_build_be_sgl_arr(phba, pm_arr, &sgl); status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, @@ -3324,13 +3571,15 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, { unsigned int wrb_mem_index, offset, size, num_wrb_rings; u64 pa_addr_lo; - unsigned int idx, num, i; + unsigned int idx, num, i, ulp_num; struct mem_array *pwrb_arr; void *wrb_vaddr; struct be_dma_mem sgl; struct be_mem_descriptor *mem_descr; struct hwi_wrb_context *pwrb_context; int status; + uint8_t ulp_count = 0, ulp_base_num = 0; + uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 }; idx = 0; mem_descr = phba->init_mem; @@ -3374,14 +3623,37 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, num_wrb_rings--; } } + + /* Get the ULP Count */ + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + ulp_count++; + ulp_base_num = ulp_num; + cid_count_ulp[ulp_num] = + BEISCSI_GET_CID_COUNT(phba, ulp_num); + } + for (i = 0; i < phba->params.cxns_per_ctrl; i++) { wrb_mem_index = 0; offset = 0; size = 0; + if (ulp_count > 1) { + ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; + + if (!cid_count_ulp[ulp_base_num]) + ulp_base_num = (ulp_base_num + 1) % + BEISCSI_ULP_COUNT; + + cid_count_ulp[ulp_base_num]--; + } + + hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); status = be_cmd_wrbq_create(&phba->ctrl, &sgl, - &phwi_context->be_wrbq[i]); + &phwi_context->be_wrbq[i], + &phwi_ctrlr->wrb_context[i], + ulp_base_num); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : wrbq create failed."); @@ -3389,7 +3661,6 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, return status; } pwrb_context = &phwi_ctrlr->wrb_context[i]; - pwrb_context->cid = phwi_context->be_wrbq[i].id; BE_SET_CID_TO_CRI(i, pwrb_context->cid); } kfree(pwrb_arr); @@ -3433,10 +3704,13 @@ static void hwi_cleanup(struct beiscsi_hba *phba) struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; struct hwi_async_pdu_context *pasync_ctx; - int i, eq_num; + int i, eq_num, ulp_num; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; + + be_cmd_iscsi_remove_template_hdr(ctrl); + for (i = 0; i < phba->params.cxns_per_ctrl; i++) { q = &phwi_context->be_wrbq[i]; if (q->created) @@ -3445,13 +3719,20 @@ static void hwi_cleanup(struct beiscsi_hba *phba) kfree(phwi_context->be_wrbq); free_wrb_handles(phba); - q = &phwi_context->be_def_hdrq; - if (q->created) - beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { - q = &phwi_context->be_def_dataq; - if (q->created) - beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); + q = &phwi_context->be_def_hdrq[ulp_num]; + if (q->created) + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); + + q = &phwi_context->be_def_dataq[ulp_num]; + if (q->created) + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); + + pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; + } + } beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); @@ -3470,9 +3751,6 @@ static void hwi_cleanup(struct beiscsi_hba *phba) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); } be_mcc_queues_destroy(phba); - - pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; - kfree(pasync_ctx->async_entry); be_cmd_fw_uninit(ctrl); } @@ -3538,8 +3816,19 @@ static void find_num_cpus(struct beiscsi_hba *phba) BEISCSI_MAX_NUM_CPUS : num_cpus; break; case BE_GEN4: - phba->num_cpus = (num_cpus > OC_SKH_MAX_NUM_CPUS) ? - OC_SKH_MAX_NUM_CPUS : num_cpus; + /* + * If eqid_count == 1 fall back to + * INTX mechanism + **/ + if (phba->fw_config.eqid_count == 1) { + enable_msix = 0; + phba->num_cpus = 1; + return; + } + + phba->num_cpus = + (num_cpus > (phba->fw_config.eqid_count - 1)) ? + (phba->fw_config.eqid_count - 1) : num_cpus; break; default: phba->num_cpus = 1; @@ -3552,10 +3841,8 @@ static int hwi_init_port(struct beiscsi_hba *phba) struct hwi_context_memory *phwi_context; unsigned int def_pdu_ring_sz; struct be_ctrl_info *ctrl = &phba->ctrl; - int status; + int status, ulp_num; - def_pdu_ring_sz = - phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr); phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; phwi_context->max_eqd = 0; @@ -3588,27 +3875,48 @@ static int hwi_init_port(struct beiscsi_hba *phba) goto error; } - status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr, - def_pdu_ring_sz); - if (status != 0) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Default Header not created\n"); - goto error; + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + + def_pdu_ring_sz = + BEISCSI_GET_CID_COUNT(phba, ulp_num) * + sizeof(struct phys_addr); + + status = beiscsi_create_def_hdr(phba, phwi_context, + phwi_ctrlr, + def_pdu_ring_sz, + ulp_num); + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Default Header not created for ULP : %d\n", + ulp_num); + goto error; + } + + status = beiscsi_create_def_data(phba, phwi_context, + phwi_ctrlr, + def_pdu_ring_sz, + ulp_num); + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Default Data not created for ULP : %d\n", + ulp_num); + goto error; + } + } } - status = beiscsi_create_def_data(phba, phwi_context, - phwi_ctrlr, def_pdu_ring_sz); + status = beiscsi_post_pages(phba); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Default Data not created\n"); + "BM_%d : Post SGL Pages Failed\n"); goto error; } - status = beiscsi_post_pages(phba); + status = beiscsi_post_template_hdr(phba); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Post SGL Pages Failed\n"); - goto error; + "BM_%d : Template HDR Posting for CXN Failed\n"); } status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); @@ -3618,6 +3926,26 @@ static int hwi_init_port(struct beiscsi_hba *phba) goto error; } + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + uint16_t async_arr_idx = 0; + + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + uint16_t cri = 0; + struct hwi_async_pdu_context *pasync_ctx; + + pasync_ctx = HWI_GET_ASYNC_PDU_CTX( + phwi_ctrlr, ulp_num); + for (cri = 0; cri < + phba->params.cxns_per_ctrl; cri++) { + if (ulp_num == BEISCSI_GET_ULP_FROM_CRI + (phwi_ctrlr, cri)) + pasync_ctx->cid_to_async_cri_map[ + phwi_ctrlr->wrb_context[cri].cid] = + async_arr_idx++; + } + } + } + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : hwi_init_port success\n"); return 0; @@ -3682,6 +4010,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba) (unsigned long)mem_descr->mem_array[j - 1]. bus_address.u.a64.address); } + kfree(mem_descr->mem_array); mem_descr++; } @@ -3721,6 +4050,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) struct sgl_handle *psgl_handle; struct iscsi_sge *pfrag; unsigned int arr_index, i, idx; + unsigned int ulp_icd_start, ulp_num = 0; phba->io_sgl_hndl_avbl = 0; phba->eh_sgl_hndl_avbl = 0; @@ -3787,6 +4117,12 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) "\n BM_%d : mem_descr_sg->num_elements=%d\n", mem_descr_sg->num_elements); + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) + break; + + ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; + arr_index = 0; idx = 0; while (idx < mem_descr_sg->num_elements) { @@ -3805,8 +4141,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); pfrag += phba->params.num_sge_per_io; - psgl_handle->sgl_index = - phba->fw_config.iscsi_icd_start + arr_index++; + psgl_handle->sgl_index = ulp_icd_start + arr_index++; } idx++; } @@ -3819,15 +4154,46 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) static int hba_setup_cid_tbls(struct beiscsi_hba *phba) { - int i; + int ret; + uint16_t i, ulp_num; + struct ulp_cid_info *ptr_cid_info = NULL; - phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl, - GFP_KERNEL); - if (!phba->cid_array) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Failed to allocate memory in " - "hba_setup_cid_tbls\n"); - return -ENOMEM; + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), + GFP_KERNEL); + + if (!ptr_cid_info) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Failed to allocate memory" + "for ULP_CID_INFO for ULP : %d\n", + ulp_num); + ret = -ENOMEM; + goto free_memory; + + } + + /* Allocate memory for CID array */ + ptr_cid_info->cid_array = kzalloc(sizeof(void *) * + BEISCSI_GET_CID_COUNT(phba, + ulp_num), GFP_KERNEL); + if (!ptr_cid_info->cid_array) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Failed to allocate memory" + "for CID_ARRAY for ULP : %d\n", + ulp_num); + kfree(ptr_cid_info); + ptr_cid_info = NULL; + ret = -ENOMEM; + + goto free_memory; + } + ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( + phba, ulp_num); + + /* Save the cid_info_array ptr */ + phba->cid_array_info[ulp_num] = ptr_cid_info; + } } phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * phba->params.cxns_per_ctrl, GFP_KERNEL); @@ -3835,9 +4201,9 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba) beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Failed to allocate memory in " "hba_setup_cid_tbls\n"); - kfree(phba->cid_array); - phba->cid_array = NULL; - return -ENOMEM; + ret = -ENOMEM; + + goto free_memory; } phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) * @@ -3847,18 +4213,44 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba) "BM_%d : Failed to allocate memory in" "hba_setup_cid_tbls\n"); - kfree(phba->cid_array); kfree(phba->ep_array); - phba->cid_array = NULL; phba->ep_array = NULL; - return -ENOMEM; + ret = -ENOMEM; + } + + for (i = 0; i < phba->params.cxns_per_ctrl; i++) { + ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; + + ptr_cid_info = phba->cid_array_info[ulp_num]; + ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = + phba->phwi_ctrlr->wrb_context[i].cid; + } - for (i = 0; i < phba->params.cxns_per_ctrl; i++) - phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid; + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + ptr_cid_info = phba->cid_array_info[ulp_num]; - phba->avlbl_cids = phba->params.cxns_per_ctrl; + ptr_cid_info->cid_alloc = 0; + ptr_cid_info->cid_free = 0; + } + } return 0; + +free_memory: + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + ptr_cid_info = phba->cid_array_info[ulp_num]; + + if (ptr_cid_info) { + kfree(ptr_cid_info->cid_array); + kfree(ptr_cid_info); + phba->cid_array_info[ulp_num] = NULL; + } + } + } + + return ret; } static void hwi_enable_intr(struct beiscsi_hba *phba) @@ -4113,20 +4505,39 @@ static void hwi_purge_eq(struct beiscsi_hba *phba) static void beiscsi_clean_port(struct beiscsi_hba *phba) { - int mgmt_status; - - mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0); - if (mgmt_status) - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : mgmt_epfw_cleanup FAILED\n"); + int mgmt_status, ulp_num; + struct ulp_cid_info *ptr_cid_info = NULL; + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + mgmt_status = mgmt_epfw_cleanup(phba, ulp_num); + if (mgmt_status) + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : mgmt_epfw_cleanup FAILED" + " for ULP_%d\n", ulp_num); + } + } hwi_purge_eq(phba); hwi_cleanup(phba); kfree(phba->io_sgl_hndl_base); kfree(phba->eh_sgl_hndl_base); - kfree(phba->cid_array); kfree(phba->ep_array); kfree(phba->conn_table); + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + ptr_cid_info = phba->cid_array_info[ulp_num]; + + if (ptr_cid_info) { + kfree(ptr_cid_info->cid_array); + kfree(ptr_cid_info); + phba->cid_array_info[ulp_num] = NULL; + } + } + } + } /** @@ -4255,8 +4666,8 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; - - iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); + iowrite32(doorbell, phba->db_va + + beiscsi_conn->doorbell_offset); } static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, @@ -4481,7 +4892,8 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; - iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); + iowrite32(doorbell, phba->db_va + + beiscsi_conn->doorbell_offset); return 0; } @@ -4536,7 +4948,8 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; - iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); + iowrite32(doorbell, phba->db_va + + beiscsi_conn->doorbell_offset); return 0; } @@ -4638,7 +5051,8 @@ static int beiscsi_mtask(struct iscsi_task *task) doorbell |= (io_task->pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; - iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); + iowrite32(doorbell, phba->db_va + + beiscsi_conn->doorbell_offset); return 0; } @@ -4663,8 +5077,12 @@ static int beiscsi_task_xmit(struct iscsi_task *task) struct beiscsi_hba *phba = NULL; phba = ((struct beiscsi_conn *)conn->dd_data)->phba; - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO, - "BM_%d : scsi_dma_map Failed\n"); + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, + "BM_%d : scsi_dma_map Failed " + "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", + be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), + io_task->libiscsi_itt, scsi_bufflen(sc)); return num_sg; } @@ -4769,10 +5187,12 @@ void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) /* * beiscsi_quiesce()- Cleanup Driver resources * @phba: Instance Priv structure + * @unload_state:i Clean or EEH unload state * * Free the OS and HW resources held by the driver **/ -static void beiscsi_quiesce(struct beiscsi_hba *phba) +static void beiscsi_quiesce(struct beiscsi_hba *phba, + uint32_t unload_state) { struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; @@ -4785,28 +5205,37 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba) if (phba->msix_enabled) { for (i = 0; i <= phba->num_cpus; i++) { msix_vec = phba->msix_entries[i].vector; + synchronize_irq(msix_vec); free_irq(msix_vec, &phwi_context->be_eq[i]); kfree(phba->msi_name[i]); } } else - if (phba->pcidev->irq) + if (phba->pcidev->irq) { + synchronize_irq(phba->pcidev->irq); free_irq(phba->pcidev->irq, phba); + } pci_disable_msix(phba->pcidev); - destroy_workqueue(phba->wq); + if (blk_iopoll_enabled) for (i = 0; i < phba->num_cpus; i++) { pbe_eq = &phwi_context->be_eq[i]; blk_iopoll_disable(&pbe_eq->iopoll); } - beiscsi_clean_port(phba); - beiscsi_free_mem(phba); + if (unload_state == BEISCSI_CLEAN_UNLOAD) { + destroy_workqueue(phba->wq); + beiscsi_clean_port(phba); + beiscsi_free_mem(phba); - beiscsi_unmap_pci_function(phba); - pci_free_consistent(phba->pcidev, - phba->ctrl.mbox_mem_alloced.size, - phba->ctrl.mbox_mem_alloced.va, - phba->ctrl.mbox_mem_alloced.dma); + beiscsi_unmap_pci_function(phba); + pci_free_consistent(phba->pcidev, + phba->ctrl.mbox_mem_alloced.size, + phba->ctrl.mbox_mem_alloced.va, + phba->ctrl.mbox_mem_alloced.dma); + } else { + hwi_purge_eq(phba); + hwi_cleanup(phba); + } cancel_delayed_work_sync(&phba->beiscsi_hw_check_task); } @@ -4823,11 +5252,13 @@ static void beiscsi_remove(struct pci_dev *pcidev) } beiscsi_destroy_def_ifaces(phba); - beiscsi_quiesce(phba); + beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); iscsi_boot_destroy_kset(phba->boot_kset); iscsi_host_remove(phba->shost); pci_dev_put(phba->pcidev); iscsi_host_free(phba->shost); + pci_disable_pcie_error_reporting(pcidev); + pci_set_drvdata(pcidev, NULL); pci_disable_device(pcidev); } @@ -4842,7 +5273,7 @@ static void beiscsi_shutdown(struct pci_dev *pcidev) return; } - beiscsi_quiesce(phba); + beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); pci_disable_device(pcidev); } @@ -4880,6 +5311,167 @@ beiscsi_hw_health_check(struct work_struct *work) msecs_to_jiffies(1000)); } + +static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct beiscsi_hba *phba = NULL; + + phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); + phba->state |= BE_ADAPTER_PCI_ERR; + + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : EEH error detected\n"); + + beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD); + + if (state == pci_channel_io_perm_failure) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : EEH : State PERM Failure"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_disable_device(pdev); + + /* The error could cause the FW to trigger a flash debug dump. + * Resetting the card while flash dump is in progress + * can cause it not to recover; wait for it to finish. + * Wait only for first function as it is needed only once per + * adapter. + **/ + if (pdev->devfn == 0) + ssleep(30); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) +{ + struct beiscsi_hba *phba = NULL; + int status = 0; + + phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); + + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : EEH Reset\n"); + + status = pci_enable_device(pdev); + if (status) + return PCI_ERS_RESULT_DISCONNECT; + + pci_set_master(pdev); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + /* Wait for the CHIP Reset to complete */ + status = be_chk_reset_complete(phba); + if (!status) { + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, + "BM_%d : EEH Reset Completed\n"); + } else { + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, + "BM_%d : EEH Reset Completion Failure\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_cleanup_aer_uncorrect_error_status(pdev); + return PCI_ERS_RESULT_RECOVERED; +} + +static void beiscsi_eeh_resume(struct pci_dev *pdev) +{ + int ret = 0, i; + struct be_eq_obj *pbe_eq; + struct beiscsi_hba *phba = NULL; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + + phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); + pci_save_state(pdev); + + if (enable_msix) + find_num_cpus(phba); + else + phba->num_cpus = 1; + + if (enable_msix) { + beiscsi_msix_enable(phba); + if (!phba->msix_enabled) + phba->num_cpus = 1; + } + + ret = beiscsi_cmd_reset_function(phba); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Reset Failed\n"); + goto ret_err; + } + + ret = be_chk_reset_complete(phba); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Failed to get out of reset.\n"); + goto ret_err; + } + + beiscsi_get_params(phba); + phba->shost->max_id = phba->params.cxns_per_ctrl; + phba->shost->can_queue = phba->params.ios_per_ctrl; + ret = hwi_init_controller(phba); + + for (i = 0; i < MAX_MCC_CMD; i++) { + init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); + phba->ctrl.mcc_tag[i] = i + 1; + phba->ctrl.mcc_numtag[i + 1] = 0; + phba->ctrl.mcc_tag_available++; + } + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + + if (blk_iopoll_enabled) { + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, + be_iopoll); + blk_iopoll_enable(&pbe_eq->iopoll); + } + + i = (phba->msix_enabled) ? i : 0; + /* Work item for MCC handling */ + pbe_eq = &phwi_context->be_eq[i]; + INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); + } else { + if (phba->msix_enabled) { + for (i = 0; i <= phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + INIT_WORK(&pbe_eq->work_cqs, + beiscsi_process_all_cqs); + } + } else { + pbe_eq = &phwi_context->be_eq[0]; + INIT_WORK(&pbe_eq->work_cqs, + beiscsi_process_all_cqs); + } + } + + ret = beiscsi_init_irqs(phba); + if (ret < 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : beiscsi_eeh_resume - " + "Failed to beiscsi_init_irqs\n"); + goto ret_err; + } + + hwi_enable_intr(phba); + phba->state &= ~BE_ADAPTER_PCI_ERR; + + return; +ret_err: + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : AER EEH Resume Failed\n"); +} + static int beiscsi_dev_probe(struct pci_dev *pcidev, const struct pci_device_id *id) { @@ -4887,7 +5479,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; struct be_eq_obj *pbe_eq; - int ret, i; + int ret = 0, i; ret = beiscsi_enable_pci(pcidev); if (ret < 0) { @@ -4903,10 +5495,20 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, goto disable_pci; } + /* Enable EEH reporting */ + ret = pci_enable_pcie_error_reporting(pcidev); + if (ret) + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, + "BM_%d : PCIe Error Reporting " + "Enabling Failed\n"); + + pci_save_state(pcidev); + /* Initialize Driver configuration Paramters */ beiscsi_hba_attrs_init(phba); phba->fw_timeout = false; + phba->mac_addr_set = false; switch (pcidev->device) { @@ -4929,20 +5531,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, phba->generation = 0; } - if (enable_msix) - find_num_cpus(phba); - else - phba->num_cpus = 1; - - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : num_cpus = %d\n", - phba->num_cpus); - - if (enable_msix) { - beiscsi_msix_enable(phba); - if (!phba->msix_enabled) - phba->num_cpus = 1; - } ret = be_ctrl_init(phba, pcidev); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, @@ -4954,27 +5542,43 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, ret = beiscsi_cmd_reset_function(phba); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Reset Failed. Aborting Crashdump\n"); + "BM_%d : Reset Failed\n"); goto hba_free; } ret = be_chk_reset_complete(phba); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Failed to get out of reset." - "Aborting Crashdump\n"); + "BM_%d : Failed to get out of reset.\n"); goto hba_free; } spin_lock_init(&phba->io_sgl_lock); spin_lock_init(&phba->mgmt_sgl_lock); spin_lock_init(&phba->isr_lock); + spin_lock_init(&phba->async_pdu_lock); ret = mgmt_get_fw_config(&phba->ctrl, phba); if (ret != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Error getting fw config\n"); goto free_port; } - phba->shost->max_id = phba->fw_config.iscsi_cid_count; + + if (enable_msix) + find_num_cpus(phba); + else + phba->num_cpus = 1; + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : num_cpus = %d\n", + phba->num_cpus); + + if (enable_msix) { + beiscsi_msix_enable(phba); + if (!phba->msix_enabled) + phba->num_cpus = 1; + } + + phba->shost->max_id = phba->params.cxns_per_ctrl; beiscsi_get_params(phba); phba->shost->can_queue = phba->params.ios_per_ctrl; ret = beiscsi_init_port(phba); @@ -4985,7 +5589,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, goto free_port; } - for (i = 0; i < MAX_MCC_CMD ; i++) { + for (i = 0; i < MAX_MCC_CMD; i++) { init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); phba->ctrl.mcc_tag[i] = i + 1; phba->ctrl.mcc_numtag[i + 1] = 0; @@ -5089,6 +5693,12 @@ disable_pci: return ret; } +static struct pci_error_handlers beiscsi_eeh_handlers = { + .error_detected = beiscsi_eeh_err_detected, + .slot_reset = beiscsi_eeh_reset, + .resume = beiscsi_eeh_resume, +}; + struct iscsi_transport beiscsi_iscsi_transport = { .owner = THIS_MODULE, .name = DRV_NAME, @@ -5127,7 +5737,8 @@ static struct pci_driver beiscsi_pci_driver = { .probe = beiscsi_dev_probe, .remove = beiscsi_remove, .shutdown = beiscsi_shutdown, - .id_table = beiscsi_pci_id_table + .id_table = beiscsi_pci_id_table, + .err_handler = &beiscsi_eeh_handlers }; diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 2c06ef3c02ac..31fa27b4a9b2 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -26,6 +26,7 @@ #include <linux/in.h> #include <linux/ctype.h> #include <linux/module.h> +#include <linux/aer.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> @@ -34,9 +35,8 @@ #include <scsi/libiscsi.h> #include <scsi/scsi_transport_iscsi.h> -#include "be.h" #define DRV_NAME "be2iscsi" -#define BUILD_STR "10.0.467.0" +#define BUILD_STR "10.0.659.0" #define BE_NAME "Emulex OneConnect" \ "Open-iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" @@ -66,7 +66,6 @@ #define MAX_CPUS 64 #define BEISCSI_MAX_NUM_CPUS 7 -#define OC_SKH_MAX_NUM_CPUS 31 #define BEISCSI_VER_STRLEN 32 @@ -74,6 +73,7 @@ #define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ #define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */ +#define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */ #define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ #define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */ @@ -97,14 +97,19 @@ #define INVALID_SESS_HANDLE 0xFFFFFFFF -#define BE_ADAPTER_UP 0x00000000 -#define BE_ADAPTER_LINK_DOWN 0x00000001 +#define BE_ADAPTER_LINK_UP 0x001 +#define BE_ADAPTER_LINK_DOWN 0x002 +#define BE_ADAPTER_PCI_ERR 0x004 + +#define BEISCSI_CLEAN_UNLOAD 0x01 +#define BEISCSI_EEH_UNLOAD 0x02 /** * hardware needs the async PDU buffers to be posted in multiples of 8 * So have atleast 8 of them by default */ -#define HWI_GET_ASYNC_PDU_CTX(phwi) (phwi->phwi_ctxt->pasync_ctx) +#define HWI_GET_ASYNC_PDU_CTX(phwi, ulp_num) \ + (phwi->phwi_ctxt->pasync_ctx[ulp_num]) /********* Memory BAR register ************/ #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc @@ -149,29 +154,41 @@ #define DB_CQ_REARM_SHIFT (29) /* bit 29 */ #define GET_HWI_CONTROLLER_WS(pc) (pc->phwi_ctrlr) -#define HWI_GET_DEF_BUFQ_ID(pc) (((struct hwi_controller *)\ - (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data.id) -#define HWI_GET_DEF_HDRQ_ID(pc) (((struct hwi_controller *)\ - (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr.id) +#define HWI_GET_DEF_BUFQ_ID(pc, ulp_num) (((struct hwi_controller *)\ + (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data[ulp_num].id) +#define HWI_GET_DEF_HDRQ_ID(pc, ulp_num) (((struct hwi_controller *)\ + (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr[ulp_num].id) #define PAGES_REQUIRED(x) \ ((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE)) #define BEISCSI_MSI_NAME 20 /* size of msi_name string */ +#define MEM_DESCR_OFFSET 8 +#define BEISCSI_DEFQ_HDR 1 +#define BEISCSI_DEFQ_DATA 0 enum be_mem_enum { HWI_MEM_ADDN_CONTEXT, HWI_MEM_WRB, HWI_MEM_WRBH, HWI_MEM_SGLH, HWI_MEM_SGE, - HWI_MEM_ASYNC_HEADER_BUF, /* 5 */ - HWI_MEM_ASYNC_DATA_BUF, - HWI_MEM_ASYNC_HEADER_RING, - HWI_MEM_ASYNC_DATA_RING, - HWI_MEM_ASYNC_HEADER_HANDLE, - HWI_MEM_ASYNC_DATA_HANDLE, /* 10 */ - HWI_MEM_ASYNC_PDU_CONTEXT, + HWI_MEM_TEMPLATE_HDR_ULP0, + HWI_MEM_ASYNC_HEADER_BUF_ULP0, /* 6 */ + HWI_MEM_ASYNC_DATA_BUF_ULP0, + HWI_MEM_ASYNC_HEADER_RING_ULP0, + HWI_MEM_ASYNC_DATA_RING_ULP0, + HWI_MEM_ASYNC_HEADER_HANDLE_ULP0, + HWI_MEM_ASYNC_DATA_HANDLE_ULP0, /* 11 */ + HWI_MEM_ASYNC_PDU_CONTEXT_ULP0, + HWI_MEM_TEMPLATE_HDR_ULP1, + HWI_MEM_ASYNC_HEADER_BUF_ULP1, /* 14 */ + HWI_MEM_ASYNC_DATA_BUF_ULP1, + HWI_MEM_ASYNC_HEADER_RING_ULP1, + HWI_MEM_ASYNC_DATA_RING_ULP1, + HWI_MEM_ASYNC_HEADER_HANDLE_ULP1, + HWI_MEM_ASYNC_DATA_HANDLE_ULP1, /* 19 */ + HWI_MEM_ASYNC_PDU_CONTEXT_ULP1, ISCSI_MEM_GLOBAL_HEADER, SE_MEM_MAX }; @@ -266,9 +283,49 @@ struct invalidate_command_table { unsigned short cid; } __packed; +#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \ + (phwi_ctrlr->wrb_context[cri].ulp_num) +struct hwi_wrb_context { + struct list_head wrb_handle_list; + struct list_head wrb_handle_drvr_list; + struct wrb_handle **pwrb_handle_base; + struct wrb_handle **pwrb_handle_basestd; + struct iscsi_wrb *plast_wrb; + unsigned short alloc_index; + unsigned short free_index; + unsigned short wrb_handles_available; + unsigned short cid; + uint8_t ulp_num; /* ULP to which CID binded */ + uint16_t register_set; + uint16_t doorbell_format; + uint32_t doorbell_offset; +}; + +struct ulp_cid_info { + unsigned short *cid_array; + unsigned short avlbl_cids; + unsigned short cid_alloc; + unsigned short cid_free; +}; + +#include "be.h" #define chip_be2(phba) (phba->generation == BE_GEN2) #define chip_be3_r(phba) (phba->generation == BE_GEN3) #define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba))) + +#define BEISCSI_ULP0 0 +#define BEISCSI_ULP1 1 +#define BEISCSI_ULP_COUNT 2 +#define BEISCSI_ULP0_LOADED 0x01 +#define BEISCSI_ULP1_LOADED 0x02 + +#define BEISCSI_ULP_AVLBL_CID(phba, ulp_num) \ + (((struct ulp_cid_info *)phba->cid_array_info[ulp_num])->avlbl_cids) +#define BEISCSI_ULP0_AVLBL_CID(phba) \ + BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP0) +#define BEISCSI_ULP1_AVLBL_CID(phba) \ + BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP1) + struct beiscsi_hba { struct hba_parameters params; struct hwi_controller *phwi_ctrlr; @@ -303,17 +360,15 @@ struct beiscsi_hba { spinlock_t io_sgl_lock; spinlock_t mgmt_sgl_lock; spinlock_t isr_lock; + spinlock_t async_pdu_lock; unsigned int age; - unsigned short avlbl_cids; - unsigned short cid_alloc; - unsigned short cid_free; struct list_head hba_queue; #define BE_MAX_SESSION 2048 #define BE_SET_CID_TO_CRI(cri_index, cid) \ (phba->cid_to_cri_map[cid] = cri_index) #define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid]) unsigned short cid_to_cri_map[BE_MAX_SESSION]; - unsigned short *cid_array; + struct ulp_cid_info *cid_array_info[BEISCSI_ULP_COUNT]; struct iscsi_endpoint **ep_array; struct beiscsi_conn **conn_table; struct iscsi_boot_kset *boot_kset; @@ -325,20 +380,21 @@ struct beiscsi_hba { * group together since they are used most frequently * for cid to cri conversion */ - unsigned int iscsi_cid_start; unsigned int phys_port; + unsigned int eqid_count; + unsigned int cqid_count; + unsigned int iscsi_cid_start[BEISCSI_ULP_COUNT]; +#define BEISCSI_GET_CID_COUNT(phba, ulp_num) \ + (phba->fw_config.iscsi_cid_count[ulp_num]) + unsigned int iscsi_cid_count[BEISCSI_ULP_COUNT]; + unsigned int iscsi_icd_count[BEISCSI_ULP_COUNT]; + unsigned int iscsi_icd_start[BEISCSI_ULP_COUNT]; + unsigned int iscsi_chain_start[BEISCSI_ULP_COUNT]; + unsigned int iscsi_chain_count[BEISCSI_ULP_COUNT]; - unsigned int isr_offset; - unsigned int iscsi_icd_start; - unsigned int iscsi_cid_count; - unsigned int iscsi_icd_count; - unsigned int pci_function; - - unsigned short cid_alloc; - unsigned short cid_free; - unsigned short avlbl_cids; unsigned short iscsi_features; - spinlock_t cid_lock; + uint16_t dual_ulp_aware; + unsigned long ulp_supported; } fw_config; unsigned int state; @@ -346,6 +402,7 @@ struct beiscsi_hba { bool ue_detected; struct delayed_work beiscsi_hw_check_task; + bool mac_addr_set; u8 mac_address[ETH_ALEN]; char fw_ver_str[BEISCSI_VER_STRLEN]; char wq_name[20]; @@ -374,6 +431,7 @@ struct beiscsi_conn { struct iscsi_conn *conn; struct beiscsi_hba *phba; u32 exp_statsn; + u32 doorbell_offset; u32 beiscsi_conn_cid; struct beiscsi_endpoint *ep; unsigned short login_in_progress; @@ -474,7 +532,7 @@ struct amap_iscsi_sge { }; struct beiscsi_offload_params { - u32 dw[5]; + u32 dw[6]; }; #define OFFLD_PARAMS_ERL 0x00000003 @@ -504,6 +562,7 @@ struct amap_beiscsi_offload_params { u8 max_r2t[16]; u8 pad[8]; u8 exp_statsn[32]; + u8 max_recv_data_segment_length[32]; }; /* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, @@ -567,7 +626,8 @@ struct hwi_async_pdu_context { unsigned int buffer_size; unsigned int num_entries; - +#define BE_GET_ASYNC_CRI_FROM_CID(cid) (pasync_ctx->cid_to_async_cri_map[cid]) + unsigned short cid_to_async_cri_map[BE_MAX_SESSION]; /** * This is a varying size list! Do not add anything * after this entry!! @@ -885,30 +945,32 @@ struct amap_iscsi_target_context_update_wrb_v2 { u8 first_burst_length[24]; /* DWORD 3 */ u8 rsvd3[8]; /* DOWRD 3 */ u8 max_r2t[16]; /* DWORD 4 */ - u8 rsvd4[10]; /* DWORD 4 */ + u8 rsvd4; /* DWORD 4 */ u8 hde; /* DWORD 4 */ u8 dde; /* DWORD 4 */ u8 erl[2]; /* DWORD 4 */ + u8 rsvd5[6]; /* DWORD 4 */ u8 imd; /* DWORD 4 */ u8 ir2t; /* DWORD 4 */ + u8 rsvd6[3]; /* DWORD 4 */ u8 stat_sn[32]; /* DWORD 5 */ - u8 rsvd5[32]; /* DWORD 6 */ - u8 rsvd6[32]; /* DWORD 7 */ + u8 rsvd7[32]; /* DWORD 6 */ + u8 rsvd8[32]; /* DWORD 7 */ u8 max_recv_dataseg_len[24]; /* DWORD 8 */ - u8 rsvd7[8]; /* DWORD 8 */ - u8 rsvd8[32]; /* DWORD 9 */ - u8 rsvd9[32]; /* DWORD 10 */ + u8 rsvd9[8]; /* DWORD 8 */ + u8 rsvd10[32]; /* DWORD 9 */ + u8 rsvd11[32]; /* DWORD 10 */ u8 max_cxns[16]; /* DWORD 11 */ - u8 rsvd10[11]; /* DWORD 11*/ + u8 rsvd12[11]; /* DWORD 11*/ u8 invld; /* DWORD 11 */ - u8 rsvd11;/* DWORD 11*/ + u8 rsvd13;/* DWORD 11*/ u8 dmsg; /* DWORD 11 */ u8 data_seq_inorder; /* DWORD 11 */ u8 pdu_seq_inorder; /* DWORD 11 */ - u8 rsvd12[32]; /*DWORD 12 */ - u8 rsvd13[32]; /* DWORD 13 */ - u8 rsvd14[32]; /* DWORD 14 */ - u8 rsvd15[32]; /* DWORD 15 */ + u8 rsvd14[32]; /*DWORD 12 */ + u8 rsvd15[32]; /* DWORD 13 */ + u8 rsvd16[32]; /* DWORD 14 */ + u8 rsvd17[32]; /* DWORD 15 */ } __packed; @@ -919,6 +981,10 @@ struct be_ring { u32 cidx; /* consumer index */ u32 pidx; /* producer index -- not used by most rings */ u32 item_size; /* size in bytes of one object */ + u8 ulp_num; /* ULP to which CID binded */ + u16 register_set; + u16 doorbell_format; + u32 doorbell_offset; void *va; /* The virtual address of the ring. This * should be last to allow 32 & 64 bit debugger @@ -926,18 +992,6 @@ struct be_ring { */ }; -struct hwi_wrb_context { - struct list_head wrb_handle_list; - struct list_head wrb_handle_drvr_list; - struct wrb_handle **pwrb_handle_base; - struct wrb_handle **pwrb_handle_basestd; - struct iscsi_wrb *plast_wrb; - unsigned short alloc_index; - unsigned short free_index; - unsigned short wrb_handles_available; - unsigned short cid; -}; - struct hwi_controller { struct list_head io_sgl_list; struct list_head eh_sgl_list; @@ -946,8 +1000,8 @@ struct hwi_controller { struct hwi_wrb_context *wrb_context; struct mcc_wrb *pmcc_wrb_base; - struct be_ring default_pdu_hdr; - struct be_ring default_pdu_data; + struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT]; + struct be_ring default_pdu_data[BEISCSI_ULP_COUNT]; struct hwi_context_memory *phwi_ctxt; }; @@ -978,11 +1032,10 @@ struct hwi_context_memory { struct be_eq_obj be_eq[MAX_CPUS]; struct be_queue_info be_cq[MAX_CPUS - 1]; - struct be_queue_info be_def_hdrq; - struct be_queue_info be_def_dataq; - struct be_queue_info *be_wrbq; - struct hwi_async_pdu_context *pasync_ctx; + struct be_queue_info be_def_hdrq[BEISCSI_ULP_COUNT]; + struct be_queue_info be_def_dataq[BEISCSI_ULP_COUNT]; + struct hwi_async_pdu_context *pasync_ctx[BEISCSI_ULP_COUNT]; }; /* Logging related definitions */ @@ -992,6 +1045,7 @@ struct hwi_context_memory { #define BEISCSI_LOG_EH 0x0008 /* Error Handler */ #define BEISCSI_LOG_IO 0x0010 /* IO Code Path */ #define BEISCSI_LOG_CONFIG 0x0020 /* CONFIG Code Path */ +#define BEISCSI_LOG_ISCSI 0x0040 /* SCSI/iSCSI Protocol related Logs */ #define beiscsi_log(phba, level, mask, fmt, arg...) \ do { \ diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 245a9595a93a..b2fcac78feaa 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -278,6 +278,18 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba, return tag; } +/** + * mgmt_get_fw_config()- Get the FW config for the function + * @ctrl: ptr to Ctrl Info + * @phba: ptr to the dev priv structure + * + * Get the FW config and resources available for the function. + * The resources are created based on the count received here. + * + * return + * Success: 0 + * Failure: Non-Zero Value + **/ int mgmt_get_fw_config(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba) { @@ -291,31 +303,79 @@ int mgmt_get_fw_config(struct be_ctrl_info *ctrl, be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, - OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req)); + OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, + EMBED_MBX_MAX_PAYLOAD_SIZE); status = be_mbox_notify(ctrl); if (!status) { + uint8_t ulp_num = 0; struct be_fw_cfg *pfw_cfg; pfw_cfg = req; + + if (!is_chip_be2_be3r(phba)) { + phba->fw_config.eqid_count = pfw_cfg->eqid_count; + phba->fw_config.cqid_count = pfw_cfg->cqid_count; + + beiscsi_log(phba, KERN_INFO, + BEISCSI_LOG_INIT, + "BG_%d : EQ_Count : %d CQ_Count : %d\n", + phba->fw_config.eqid_count, + phba->fw_config.cqid_count); + } + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) + if (pfw_cfg->ulp[ulp_num].ulp_mode & + BEISCSI_ULP_ISCSI_INI_MODE) + set_bit(ulp_num, + &phba->fw_config.ulp_supported); + phba->fw_config.phys_port = pfw_cfg->phys_port; - phba->fw_config.iscsi_icd_start = - pfw_cfg->ulp[0].icd_base; - phba->fw_config.iscsi_icd_count = - pfw_cfg->ulp[0].icd_count; - phba->fw_config.iscsi_cid_start = - pfw_cfg->ulp[0].sq_base; - phba->fw_config.iscsi_cid_count = - pfw_cfg->ulp[0].sq_count; - if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BG_%d : FW reported MAX CXNS as %d\t" - "Max Supported = %d.\n", - phba->fw_config.iscsi_cid_count, - BE2_MAX_SESSIONS); - phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2; + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + + phba->fw_config.iscsi_cid_start[ulp_num] = + pfw_cfg->ulp[ulp_num].sq_base; + phba->fw_config.iscsi_cid_count[ulp_num] = + pfw_cfg->ulp[ulp_num].sq_count; + + phba->fw_config.iscsi_icd_start[ulp_num] = + pfw_cfg->ulp[ulp_num].icd_base; + phba->fw_config.iscsi_icd_count[ulp_num] = + pfw_cfg->ulp[ulp_num].icd_count; + + phba->fw_config.iscsi_chain_start[ulp_num] = + pfw_cfg->chain_icd[ulp_num].chain_base; + phba->fw_config.iscsi_chain_count[ulp_num] = + pfw_cfg->chain_icd[ulp_num].chain_count; + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BG_%d : Function loaded on ULP : %d\n" + "\tiscsi_cid_count : %d\n" + "\tiscsi_cid_start : %d\n" + "\t iscsi_icd_count : %d\n" + "\t iscsi_icd_start : %d\n", + ulp_num, + phba->fw_config. + iscsi_cid_count[ulp_num], + phba->fw_config. + iscsi_cid_start[ulp_num], + phba->fw_config. + iscsi_icd_count[ulp_num], + phba->fw_config. + iscsi_icd_start[ulp_num]); + } } + + phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode & + BEISCSI_FUNC_DUA_MODE); + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BG_%d : DUA Mode : 0x%x\n", + phba->fw_config.dual_ulp_aware); + } else { - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d : Failed in mgmt_get_fw_config\n"); + status = -EINVAL; } spin_unlock(&ctrl->mbox_lock); @@ -448,7 +508,16 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, return tag; } -int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute) +/** + * mgmt_epfw_cleanup()- Inform FW to cleanup data structures. + * @phba: pointer to dev priv structure + * @ulp_num: ULP number. + * + * return + * Success: 0 + * Failure: Non-Zero Value + **/ +int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb = wrb_from_mccq(phba); @@ -462,9 +531,9 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute) be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req)); - req->chute = chute; - req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba)); - req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba)); + req->chute = (1 << ulp_num); + req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num)); + req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num)); status = be_mcc_notify_wait(phba); if (status) @@ -585,6 +654,16 @@ unsigned int mgmt_upload_connection(struct beiscsi_hba *phba, return tag; } +/** + * mgmt_open_connection()- Establish a TCP CXN + * @dst_addr: Destination Address + * @beiscsi_ep: ptr to device endpoint struct + * @nonemb_cmd: ptr to memory allocated for command + * + * return + * Success: Tag number of the MBX Command issued + * Failure: Error code + **/ int mgmt_open_connection(struct beiscsi_hba *phba, struct sockaddr *dst_addr, struct beiscsi_endpoint *beiscsi_ep, @@ -602,14 +681,17 @@ int mgmt_open_connection(struct beiscsi_hba *phba, struct phys_addr template_address = { 0, 0 }; struct phys_addr *ptemplate_address; unsigned int tag = 0; - unsigned int i; + unsigned int i, ulp_num; unsigned short cid = beiscsi_ep->ep_cid; struct be_sge *sge; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; - def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba); - def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba); + + ulp_num = phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(cid)].ulp_num; + + def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba, ulp_num); + def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba, ulp_num); ptemplate_address = &template_address; ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address); @@ -748,11 +830,14 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba, rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va); if (rc) { + /* Check if the IOCTL needs to be re-issued */ + if (rc == -EAGAIN) + return rc; + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BG_%d : mgmt_exec_nonemb_cmd Failed status\n"); - rc = -EIO; goto free_cmd; } @@ -861,7 +946,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba, uint32_t boot_proto) { struct be_cmd_get_def_gateway_resp gtway_addr_set; - struct be_cmd_get_if_info_resp if_info; + struct be_cmd_get_if_info_resp *if_info; struct be_cmd_set_dhcp_req *dhcpreq; struct be_cmd_rel_dhcp_req *reldhcp; struct be_dma_mem nonemb_cmd; @@ -872,16 +957,17 @@ int mgmt_set_ip(struct beiscsi_hba *phba, if (mgmt_get_all_if_id(phba)) return -EIO; - memset(&if_info, 0, sizeof(if_info)); ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ? BE2_IPV6 : BE2_IPV4 ; rc = mgmt_get_if_info(phba, ip_type, &if_info); - if (rc) + if (rc) { + kfree(if_info); return rc; + } if (boot_proto == ISCSI_BOOTPROTO_DHCP) { - if (if_info.dhcp_state) { + if (if_info->dhcp_state) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : DHCP Already Enabled\n"); return 0; @@ -894,9 +980,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba, IP_V6_LEN : IP_V4_LEN; } else { - if (if_info.dhcp_state) { + if (if_info->dhcp_state) { - memset(&if_info, 0, sizeof(if_info)); + memset(if_info, 0, sizeof(*if_info)); rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR, sizeof(*reldhcp)); @@ -919,8 +1005,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba, } /* Delete the Static IP Set */ - if (if_info.ip_addr.addr[0]) { - rc = mgmt_static_ip_modify(phba, &if_info, ip_param, NULL, + if (if_info->ip_addr.addr[0]) { + rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL, IP_ACTION_DEL); if (rc) return rc; @@ -966,7 +1052,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba, return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); } else { - return mgmt_static_ip_modify(phba, &if_info, ip_param, + return mgmt_static_ip_modify(phba, if_info, ip_param, subnet_param, IP_ACTION_ADD); } @@ -1031,27 +1117,64 @@ int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type, } int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type, - struct be_cmd_get_if_info_resp *if_info) + struct be_cmd_get_if_info_resp **if_info) { struct be_cmd_get_if_info_req *req; struct be_dma_mem nonemb_cmd; + uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp); int rc; if (mgmt_get_all_if_id(phba)) return -EIO; - rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, - OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO, - sizeof(*if_info)); - if (rc) - return rc; + do { + rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, + OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO, + ioctl_size); + if (rc) + return rc; - req = nonemb_cmd.va; - req->interface_hndl = phba->interface_handle; - req->ip_type = ip_type; + req = nonemb_cmd.va; + req->interface_hndl = phba->interface_handle; + req->ip_type = ip_type; + + /* Allocate memory for if_info */ + *if_info = kzalloc(ioctl_size, GFP_KERNEL); + if (!*if_info) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, + "BG_%d : Memory Allocation Failure\n"); + + /* Free the DMA memory for the IOCTL issuing */ + pci_free_consistent(phba->ctrl.pdev, + nonemb_cmd.size, + nonemb_cmd.va, + nonemb_cmd.dma); + return -ENOMEM; + } + + rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, *if_info, + ioctl_size); + + /* Check if the error is because of Insufficent_Buffer */ + if (rc == -EAGAIN) { + + /* Get the new memory size */ + ioctl_size = ((struct be_cmd_resp_hdr *) + nonemb_cmd.va)->actual_resp_len; + ioctl_size += sizeof(struct be_cmd_req_hdr); - return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, if_info, - sizeof(*if_info)); + /* Free the previous allocated DMA memory */ + pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, + nonemb_cmd.va, + nonemb_cmd.dma); + + /* Free the virtual memory */ + kfree(*if_info); + } else + break; + } while (true); + return rc; } int mgmt_get_nic_conf(struct beiscsi_hba *phba, @@ -1281,7 +1404,7 @@ beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr, } /** - * beiscsi_active_cid_disp()- Display Sessions Active + * beiscsi_active_session_disp()- Display Sessions Active * @dev: ptr to device not used. * @attr: device attribute, not used. * @buf: contains formatted text Session Count @@ -1290,14 +1413,56 @@ beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr, * size of the formatted string **/ ssize_t -beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr, +beiscsi_active_session_disp(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct beiscsi_hba *phba = iscsi_host_priv(shost); + uint16_t avlbl_cids = 0, ulp_num, len = 0, total_cids = 0; + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + avlbl_cids = BEISCSI_ULP_AVLBL_CID(phba, ulp_num); + total_cids = BEISCSI_GET_CID_COUNT(phba, ulp_num); + len += snprintf(buf+len, PAGE_SIZE - len, + "ULP%d : %d\n", ulp_num, + (total_cids - avlbl_cids)); + } else + len += snprintf(buf+len, PAGE_SIZE - len, + "ULP%d : %d\n", ulp_num, 0); + } - return snprintf(buf, PAGE_SIZE, "%d\n", - (phba->params.cxns_per_ctrl - phba->avlbl_cids)); + return len; +} + +/** + * beiscsi_free_session_disp()- Display Avaliable Session + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text Session Count + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_free_session_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + uint16_t ulp_num, len = 0; + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) + len += snprintf(buf+len, PAGE_SIZE - len, + "ULP%d : %d\n", ulp_num, + BEISCSI_ULP_AVLBL_CID(phba, ulp_num)); + else + len += snprintf(buf+len, PAGE_SIZE - len, + "ULP%d : %d\n", ulp_num, 0); + } + + return len; } /** @@ -1338,6 +1503,25 @@ beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr, } } +/** + * beiscsi_phys_port()- Display Physical Port Identifier + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text port identifier + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_phys_port_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + + return snprintf(buf, PAGE_SIZE, "Port Identifier : %d\n", + phba->fw_config.phys_port); +} void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params, struct wrb_handle *pwrb_handle, @@ -1411,10 +1595,6 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, memset(pwrb, 0, sizeof(*pwrb)); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, - max_burst_length, pwrb, params->dw[offsetof - (struct amap_beiscsi_offload_params, - max_burst_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_burst_length, pwrb, params->dw[offsetof (struct amap_beiscsi_offload_params, @@ -1436,7 +1616,9 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, params->dw[offsetof(struct amap_beiscsi_offload_params, first_burst_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, - max_recv_dataseg_len, pwrb, BEISCSI_MAX_RECV_DATASEG_LEN); + max_recv_dataseg_len, pwrb, + params->dw[offsetof(struct amap_beiscsi_offload_params, + max_recv_data_segment_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_cxns, pwrb, BEISCSI_MAX_CXNS); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, erl, pwrb, diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index 04af7e74fe48..01b8c97284c0 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -294,7 +294,7 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba, struct be_cmd_get_nic_conf_resp *mac); int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type, - struct be_cmd_get_if_info_resp *if_info); + struct be_cmd_get_if_info_resp **if_info); int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type, struct be_cmd_get_def_gateway_resp *gateway); @@ -315,12 +315,19 @@ ssize_t beiscsi_drvr_ver_disp(struct device *dev, ssize_t beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr, char *buf); -ssize_t beiscsi_active_cid_disp(struct device *dev, - struct device_attribute *attr, char *buf); +ssize_t beiscsi_active_session_disp(struct device *dev, + struct device_attribute *attr, char *buf); ssize_t beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr, char *buf); + +ssize_t beiscsi_free_session_disp(struct device *dev, + struct device_attribute *attr, char *buf); + +ssize_t beiscsi_phys_port_disp(struct device *dev, + struct device_attribute *attr, char *buf); + void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params, struct wrb_handle *pwrb_handle, struct be_mem_descriptor *mem_descr); diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index 94d5d0102f7d..42bcb970445a 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h @@ -296,6 +296,7 @@ wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn); +void bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, char *symname); void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, struct bfa_lport_info_s *port_info); void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port, diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 2f61a5af3658..f5e4e61a0fd7 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -1097,6 +1097,17 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport, bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); } +void +bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, + char *symname) +{ + strcpy(port->port_cfg.sym_name.symname, symname); + + if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online)) + bfa_fcs_lport_ns_util_send_rspn_id( + BFA_FCS_GET_NS_FROM_PORT(port), NULL); +} + /* * fcs_lport_api */ @@ -5140,9 +5151,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced) u8 *psymbl = &symbl[0]; int len; - if (!bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online)) - return; - /* Avoid sending RSPN in the following states. */ if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) || bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) || diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 2e6e3ac2d41b..fc80a325a1e6 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -766,49 +766,20 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) bfad->pcidev = pdev; /* Adjust PCIe Maximum Read Request Size */ - if (pcie_max_read_reqsz > 0) { - int pcie_cap_reg; - u16 pcie_dev_ctl; - u16 mask = 0xffff; - - switch (pcie_max_read_reqsz) { - case 128: - mask = 0x0; - break; - case 256: - mask = 0x1000; - break; - case 512: - mask = 0x2000; - break; - case 1024: - mask = 0x3000; - break; - case 2048: - mask = 0x4000; - break; - case 4096: - mask = 0x5000; - break; - default: - break; - } - - pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); - if (mask != 0xffff && pcie_cap_reg) { - pcie_cap_reg += 0x08; - pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl); - if ((pcie_dev_ctl & 0x7000) != mask) { - printk(KERN_WARNING "BFA[%s]: " + if (pci_is_pcie(pdev) && pcie_max_read_reqsz) { + if (pcie_max_read_reqsz >= 128 && + pcie_max_read_reqsz <= 4096 && + is_power_of_2(pcie_max_read_reqsz)) { + int max_rq = pcie_get_readrq(pdev); + printk(KERN_WARNING "BFA[%s]: " "pcie_max_read_request_size is %d, " - "reset to %d\n", bfad->pci_name, - (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7, + "reset to %d\n", bfad->pci_name, max_rq, pcie_max_read_reqsz); - - pcie_dev_ctl &= ~0x7000; - pci_write_config_word(pdev, pcie_cap_reg, - pcie_dev_ctl | mask); - } + pcie_set_readrq(pdev, pcie_max_read_reqsz); + } else { + printk(KERN_WARNING "BFA[%s]: invalid " + "pcie_max_read_request_size %d ignored\n", + bfad->pci_name, pcie_max_read_reqsz); } } diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index e9a681d31223..40be670a1cbc 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -593,11 +593,8 @@ bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport) return; spin_lock_irqsave(&bfad->bfad_lock, flags); - if (strlen(sym_name) > 0) { - strcpy(fcs_vport->lport.port_cfg.sym_name.symname, sym_name); - bfa_fcs_lport_ns_util_send_rspn_id( - BFA_FCS_GET_NS_FROM_PORT((&fcs_vport->lport)), NULL); - } + if (strlen(sym_name) > 0) + bfa_fcs_lport_set_symname(&fcs_vport->lport, sym_name); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 08b22a901c25..1ebf3fb683e6 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -64,7 +64,7 @@ #include "bnx2fc_constants.h" #define BNX2FC_NAME "bnx2fc" -#define BNX2FC_VERSION "1.0.14" +#define BNX2FC_VERSION "2.4.1" #define PFX "bnx2fc: " @@ -105,7 +105,7 @@ #define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ) #define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe)) #define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) -#define BNX2FC_5771X_DB_PAGE_SIZE 128 +#define BNX2X_DB_SHIFT 3 #define BNX2FC_TASK_SIZE 128 #define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 69ac55495c1d..9b948505d118 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); #define DRV_MODULE_NAME "bnx2fc" #define DRV_MODULE_VERSION BNX2FC_VERSION -#define DRV_MODULE_RELDATE "Mar 08, 2013" +#define DRV_MODULE_RELDATE "Sep 17, 2013" static char version[] = @@ -542,8 +542,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); if (vn_port) { port = lport_priv(vn_port); - if (compare_ether_addr(port->data_src_addr, dest_mac) - != 0) { + if (!ether_addr_equal(port->data_src_addr, dest_mac)) { BNX2FC_HBA_DBG(lport, "fpma mismatch\n"); put_cpu(); kfree_skb(skb); @@ -1381,6 +1380,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba, return NULL; } ctlr = fcoe_ctlr_device_priv(ctlr_dev); + ctlr->cdev = ctlr_dev; interface = fcoe_ctlr_priv(ctlr); dev_hold(netdev); kref_init(&interface->kref); @@ -2004,6 +2004,24 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev) set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); } +/* Assumes rtnl_lock and the bnx2fc_dev_lock are already taken */ +static int __bnx2fc_disable(struct fcoe_ctlr *ctlr) +{ + struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); + + if (interface->enabled == true) { + if (!ctlr->lp) { + pr_err(PFX "__bnx2fc_disable: lport not found\n"); + return -ENODEV; + } else { + interface->enabled = false; + fcoe_ctlr_link_down(ctlr); + fcoe_clean_pending_queue(ctlr->lp); + } + } + return 0; +} + /** * Deperecated: Use bnx2fc_enabled() */ @@ -2018,20 +2036,34 @@ static int bnx2fc_disable(struct net_device *netdev) interface = bnx2fc_interface_lookup(netdev); ctlr = bnx2fc_to_ctlr(interface); - if (!interface || !ctlr->lp) { + + if (!interface) { rc = -ENODEV; - printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n"); + pr_err(PFX "bnx2fc_disable: interface not found\n"); } else { - interface->enabled = false; - fcoe_ctlr_link_down(ctlr); - fcoe_clean_pending_queue(ctlr->lp); + rc = __bnx2fc_disable(ctlr); } - mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); return rc; } +static int __bnx2fc_enable(struct fcoe_ctlr *ctlr) +{ + struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); + + if (interface->enabled == false) { + if (!ctlr->lp) { + pr_err(PFX "__bnx2fc_enable: lport not found\n"); + return -ENODEV; + } else if (!bnx2fc_link_ok(ctlr->lp)) { + fcoe_ctlr_link_up(ctlr); + interface->enabled = true; + } + } + return 0; +} + /** * Deprecated: Use bnx2fc_enabled() */ @@ -2046,12 +2078,11 @@ static int bnx2fc_enable(struct net_device *netdev) interface = bnx2fc_interface_lookup(netdev); ctlr = bnx2fc_to_ctlr(interface); - if (!interface || !ctlr->lp) { + if (!interface) { rc = -ENODEV; - printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n"); - } else if (!bnx2fc_link_ok(ctlr->lp)) { - fcoe_ctlr_link_up(ctlr); - interface->enabled = true; + pr_err(PFX "bnx2fc_enable: interface not found\n"); + } else { + rc = __bnx2fc_enable(ctlr); } mutex_unlock(&bnx2fc_dev_lock); @@ -2072,14 +2103,12 @@ static int bnx2fc_enable(struct net_device *netdev) static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev) { struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev); - struct fc_lport *lport = ctlr->lp; - struct net_device *netdev = bnx2fc_netdev(lport); switch (cdev->enabled) { case FCOE_CTLR_ENABLED: - return bnx2fc_enable(netdev); + return __bnx2fc_enable(ctlr); case FCOE_CTLR_DISABLED: - return bnx2fc_disable(netdev); + return __bnx2fc_disable(ctlr); case FCOE_CTLR_UNUSED: default: return -ENOTSUPP; diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index c0d035a8f8f9..46a37657307f 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -1421,8 +1421,7 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) reg_base = pci_resource_start(hba->pcidev, BNX2X_DOORBELL_PCI_BAR); - reg_off = BNX2FC_5771X_DB_PAGE_SIZE * - (context_id & 0x1FFFF) + DPM_TRIGER_TYPE; + reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF); tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4); if (!tgt->ctx_base) return -ENOMEM; diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 575142e92d9c..ed880891cb7c 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1246,6 +1246,12 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) kref_put(&io_req->refcount, bnx2fc_cmd_release); /* drop timer hold */ rc = bnx2fc_expl_logo(lport, io_req); + /* This only occurs when an task abort was requested while ABTS + is in progress. Setting the IO_CLEANUP flag will skip the + RRQ process in the case when the fw generated SCSI_CMD cmpl + was a result from the ABTS request rather than the CLEANUP + request */ + set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); goto out; } diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index 6940f0930a84..c73bbcb63c02 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -64,7 +64,7 @@ #define MAX_PAGES_PER_CTRL_STRUCT_POOL 8 #define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4 -#define BNX2I_5771X_DBELL_PAGE_SIZE 128 +#define BNX2X_DB_SHIFT 3 /* 5706/08 hardware has limit on maximum buffer size per BD it can handle */ #define MAX_BD_LENGTH 65535 diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 886e2f9eb0ea..e4cf23df4b4f 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -2738,8 +2738,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { reg_base = pci_resource_start(ep->hba->pcidev, BNX2X_DOORBELL_PCI_BAR); - reg_off = BNX2I_5771X_DBELL_PAGE_SIZE * (cid_num & 0x1FFFF) + - DPM_TRIGER_TYPE; + reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF); ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); goto arm_cq; } diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 0eb35b9b3784..0eaec4748957 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -852,22 +852,6 @@ csio_hw_get_flash_params(struct csio_hw *hw) return 0; } -static void -csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range) -{ - uint16_t val; - int pcie_cap; - - if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) { - pci_read_config_word(hw->pdev, - pcie_cap + PCI_EXP_DEVCTL2, &val); - val &= 0xfff0; - val |= range ; - pci_write_config_word(hw->pdev, - pcie_cap + PCI_EXP_DEVCTL2, val); - } -} - /*****************************************************************************/ /* HW State machine assists */ /*****************************************************************************/ @@ -2069,8 +2053,10 @@ csio_hw_configure(struct csio_hw *hw) goto out; } - /* Set pci completion timeout value to 4 seconds. */ - csio_set_pcie_completion_timeout(hw, 0xd); + /* Set PCIe completion timeout to 4 seconds */ + if (pci_is_pcie(hw->pdev)) + pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd); hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR); diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index a726187abe5c..83d9bf6fa6ca 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -308,6 +308,8 @@ struct AdapterCtlBlk { struct timer_list waiting_timer; struct timer_list selto_timer; + unsigned long last_reset; + u16 srb_count; u8 sel_timeout; @@ -860,9 +862,9 @@ static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to) init_timer(&acb->waiting_timer); acb->waiting_timer.function = waiting_timeout; acb->waiting_timer.data = (unsigned long) acb; - if (time_before(jiffies + to, acb->scsi_host->last_reset - HZ / 2)) + if (time_before(jiffies + to, acb->last_reset - HZ / 2)) acb->waiting_timer.expires = - acb->scsi_host->last_reset - HZ / 2 + 1; + acb->last_reset - HZ / 2 + 1; else acb->waiting_timer.expires = jiffies + to + 1; add_timer(&acb->waiting_timer); @@ -1319,7 +1321,7 @@ static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd) udelay(500); /* We may be in serious trouble. Wait some seconds */ - acb->scsi_host->last_reset = + acb->last_reset = jiffies + 3 * HZ / 2 + HZ * acb->eeprom.delay_time; @@ -1462,9 +1464,9 @@ static void selto_timer(struct AdapterCtlBlk *acb) acb->selto_timer.function = selection_timeout_missed; acb->selto_timer.data = (unsigned long) acb; if (time_before - (jiffies + HZ, acb->scsi_host->last_reset + HZ / 2)) + (jiffies + HZ, acb->last_reset + HZ / 2)) acb->selto_timer.expires = - acb->scsi_host->last_reset + HZ / 2 + 1; + acb->last_reset + HZ / 2 + 1; else acb->selto_timer.expires = jiffies + HZ + 1; add_timer(&acb->selto_timer); @@ -1535,7 +1537,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, } /* Allow starting of SCSI commands half a second before we allow the mid-level * to queue them again after a reset */ - if (time_before(jiffies, acb->scsi_host->last_reset - HZ / 2)) { + if (time_before(jiffies, acb->last_reset - HZ / 2)) { dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n"); return 1; } @@ -3031,7 +3033,7 @@ static void disconnect(struct AdapterCtlBlk *acb) dprintkl(KERN_ERR, "disconnect: No such device\n"); udelay(500); /* Suspend queue for a while */ - acb->scsi_host->last_reset = + acb->last_reset = jiffies + HZ / 2 + HZ * acb->eeprom.delay_time; clear_fifo(acb, "disconnectEx"); @@ -3053,7 +3055,7 @@ static void disconnect(struct AdapterCtlBlk *acb) waiting_process_next(acb); } else if (srb->state & SRB_ABORT_SENT) { dcb->flag &= ~ABORT_DEV_; - acb->scsi_host->last_reset = jiffies + HZ / 2 + 1; + acb->last_reset = jiffies + HZ / 2 + 1; dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n"); doing_srb_done(acb, DID_ABORT, srb->cmd, 1); waiting_process_next(acb); @@ -3649,7 +3651,7 @@ static void scsi_reset_detect(struct AdapterCtlBlk *acb) /*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */ udelay(500); /* Maybe we locked up the bus? Then lets wait even longer ... */ - acb->scsi_host->last_reset = + acb->last_reset = jiffies + 5 * HZ / 2 + HZ * acb->eeprom.delay_time; @@ -4426,7 +4428,7 @@ static void adapter_init_scsi_host(struct Scsi_Host *host) host->dma_channel = -1; host->unique_id = acb->io_port_base; host->irq = acb->irq_level; - host->last_reset = jiffies; + acb->last_reset = jiffies; host->max_id = 16; if (host->max_id - 1 == eeprom->scsi_id) @@ -4484,7 +4486,7 @@ static void adapter_init_chip(struct AdapterCtlBlk *acb) /*spin_unlock_irq (&io_request_lock); */ udelay(500); - acb->scsi_host->last_reset = + acb->last_reset = jiffies + HZ / 2 + HZ * acb->eeprom.delay_time; diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 68adb8955d2d..5248c888552b 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -481,6 +481,11 @@ static int alua_check_sense(struct scsi_device *sdev, * Power On, Reset, or Bus Device Reset, just retry. */ return ADD_TO_MLQUEUE; + if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04) + /* + * Device internal reset + */ + return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01) /* * Mode Parameters Changed @@ -517,12 +522,13 @@ static int alua_check_sense(struct scsi_device *sdev, /* * alua_rtpg - Evaluate REPORT TARGET GROUP STATES * @sdev: the device to be evaluated. + * @wait_for_transition: if nonzero, wait ALUA_FAILOVER_TIMEOUT seconds for device to exit transitioning state * * Evaluate the Target Port Group State. * Returns SCSI_DH_DEV_OFFLINED if the path is * found to be unusable. */ -static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) +static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_for_transition) { struct scsi_sense_hdr sense_hdr; int len, k, off, valid_states = 0; @@ -594,7 +600,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) else h->transition_tmo = ALUA_FAILOVER_TIMEOUT; - if (orig_transition_tmo != h->transition_tmo) { + if (wait_for_transition && (orig_transition_tmo != h->transition_tmo)) { sdev_printk(KERN_INFO, sdev, "%s: transition timeout set to %d seconds\n", ALUA_DH_NAME, h->transition_tmo); @@ -632,14 +638,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) switch (h->state) { case TPGS_STATE_TRANSITIONING: - if (time_before(jiffies, expiry)) { - /* State transition, retry */ - interval += 2000; - msleep(interval); - goto retry; + if (wait_for_transition) { + if (time_before(jiffies, expiry)) { + /* State transition, retry */ + interval += 2000; + msleep(interval); + goto retry; + } + err = SCSI_DH_RETRY; + } else { + err = SCSI_DH_OK; } + /* Transitioning time exceeded, set port to standby */ - err = SCSI_DH_RETRY; h->state = TPGS_STATE_STANDBY; break; case TPGS_STATE_OFFLINE: @@ -673,7 +684,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h) if (err != SCSI_DH_OK) goto out; - err = alua_rtpg(sdev, h); + err = alua_rtpg(sdev, h, 0); if (err != SCSI_DH_OK) goto out; @@ -733,7 +744,7 @@ static int alua_activate(struct scsi_device *sdev, int err = SCSI_DH_OK; int stpg = 0; - err = alua_rtpg(sdev, h); + err = alua_rtpg(sdev, h, 1); if (err != SCSI_DH_OK) goto out; diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 69c915aa77c2..4b9cf93f3fb6 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -786,6 +786,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = { {"IBM", "1742"}, {"IBM", "1745"}, {"IBM", "1746"}, + {"IBM", "1813"}, {"IBM", "1814"}, {"IBM", "1815"}, {"IBM", "1818"}, diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 19e1b422260a..c0ae8fa57a3b 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -448,19 +448,8 @@ static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd } rmb(); - /* - * TODO: I need to block here if I am processing ioctl cmds - * but if the outstanding cmds all finish before the ioctl, - * the scsi-core will not know to start sending cmds to me again. - * I need to a way to restart the scsi-cores queues or should I block - * calling scsi_done on the outstanding cmds instead - * for now we don't set the IOCTL state - */ - if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) { - pHba->host->last_reset = jiffies; - pHba->host->resetting = 1; - return 1; - } + if ((pHba->state) & DPTI_STATE_RESET) + return SCSI_MLQUEUE_HOST_BUSY; // TODO if the cmd->device if offline then I may need to issue a bus rescan // followed by a get_lct to see if the device is there anymore @@ -1811,21 +1800,23 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg) } do { - if(pHba->host) + /* + * Stop any new commands from enterring the + * controller while processing the ioctl + */ + if (pHba->host) { + scsi_block_requests(pHba->host); spin_lock_irqsave(pHba->host->host_lock, flags); - // This state stops any new commands from enterring the - // controller while processing the ioctl -// pHba->state |= DPTI_STATE_IOCTL; -// We can't set this now - The scsi subsystem sets host_blocked and -// the queue empties and stops. We need a way to restart the queue + } rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER); if (rcode != 0) printk("adpt_i2o_passthru: post wait failed %d %p\n", rcode, reply); -// pHba->state &= ~DPTI_STATE_IOCTL; - if(pHba->host) + if (pHba->host) { spin_unlock_irqrestore(pHba->host->host_lock, flags); - } while(rcode == -ETIMEDOUT); + scsi_unblock_requests(pHba->host); + } + } while (rcode == -ETIMEDOUT); if(rcode){ goto cleanup; diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h index beded716f93f..aeb046186c84 100644 --- a/drivers/scsi/dpti.h +++ b/drivers/scsi/dpti.h @@ -202,7 +202,6 @@ struct adpt_channel { // HBA state flags #define DPTI_STATE_RESET (0x01) -#define DPTI_STATE_IOCTL (0x02) typedef struct _adpt_hba { struct _adpt_hba *next; diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h index 0838e265e0b9..3fd305d6b67d 100644 --- a/drivers/scsi/esas2r/esas2r.h +++ b/drivers/scsi/esas2r/esas2r.h @@ -799,47 +799,47 @@ struct esas2r_adapter { struct esas2r_target *targetdb_end; unsigned char *regs; unsigned char *data_window; - u32 volatile flags; - #define AF_PORT_CHANGE (u32)(0x00000001) - #define AF_CHPRST_NEEDED (u32)(0x00000004) - #define AF_CHPRST_PENDING (u32)(0x00000008) - #define AF_CHPRST_DETECTED (u32)(0x00000010) - #define AF_BUSRST_NEEDED (u32)(0x00000020) - #define AF_BUSRST_PENDING (u32)(0x00000040) - #define AF_BUSRST_DETECTED (u32)(0x00000080) - #define AF_DISABLED (u32)(0x00000100) - #define AF_FLASH_LOCK (u32)(0x00000200) - #define AF_OS_RESET (u32)(0x00002000) - #define AF_FLASHING (u32)(0x00004000) - #define AF_POWER_MGT (u32)(0x00008000) - #define AF_NVR_VALID (u32)(0x00010000) - #define AF_DEGRADED_MODE (u32)(0x00020000) - #define AF_DISC_PENDING (u32)(0x00040000) - #define AF_TASKLET_SCHEDULED (u32)(0x00080000) - #define AF_HEARTBEAT (u32)(0x00200000) - #define AF_HEARTBEAT_ENB (u32)(0x00400000) - #define AF_NOT_PRESENT (u32)(0x00800000) - #define AF_CHPRST_STARTED (u32)(0x01000000) - #define AF_FIRST_INIT (u32)(0x02000000) - #define AF_POWER_DOWN (u32)(0x04000000) - #define AF_DISC_IN_PROG (u32)(0x08000000) - #define AF_COMM_LIST_TOGGLE (u32)(0x10000000) - #define AF_LEGACY_SGE_MODE (u32)(0x20000000) - #define AF_DISC_POLLED (u32)(0x40000000) - u32 volatile flags2; - #define AF2_SERIAL_FLASH (u32)(0x00000001) - #define AF2_DEV_SCAN (u32)(0x00000002) - #define AF2_DEV_CNT_OK (u32)(0x00000004) - #define AF2_COREDUMP_AVAIL (u32)(0x00000008) - #define AF2_COREDUMP_SAVED (u32)(0x00000010) - #define AF2_VDA_POWER_DOWN (u32)(0x00000100) - #define AF2_THUNDERLINK (u32)(0x00000200) - #define AF2_THUNDERBOLT (u32)(0x00000400) - #define AF2_INIT_DONE (u32)(0x00000800) - #define AF2_INT_PENDING (u32)(0x00001000) - #define AF2_TIMER_TICK (u32)(0x00002000) - #define AF2_IRQ_CLAIMED (u32)(0x00004000) - #define AF2_MSI_ENABLED (u32)(0x00008000) + long flags; + #define AF_PORT_CHANGE 0 + #define AF_CHPRST_NEEDED 1 + #define AF_CHPRST_PENDING 2 + #define AF_CHPRST_DETECTED 3 + #define AF_BUSRST_NEEDED 4 + #define AF_BUSRST_PENDING 5 + #define AF_BUSRST_DETECTED 6 + #define AF_DISABLED 7 + #define AF_FLASH_LOCK 8 + #define AF_OS_RESET 9 + #define AF_FLASHING 10 + #define AF_POWER_MGT 11 + #define AF_NVR_VALID 12 + #define AF_DEGRADED_MODE 13 + #define AF_DISC_PENDING 14 + #define AF_TASKLET_SCHEDULED 15 + #define AF_HEARTBEAT 16 + #define AF_HEARTBEAT_ENB 17 + #define AF_NOT_PRESENT 18 + #define AF_CHPRST_STARTED 19 + #define AF_FIRST_INIT 20 + #define AF_POWER_DOWN 21 + #define AF_DISC_IN_PROG 22 + #define AF_COMM_LIST_TOGGLE 23 + #define AF_LEGACY_SGE_MODE 24 + #define AF_DISC_POLLED 25 + long flags2; + #define AF2_SERIAL_FLASH 0 + #define AF2_DEV_SCAN 1 + #define AF2_DEV_CNT_OK 2 + #define AF2_COREDUMP_AVAIL 3 + #define AF2_COREDUMP_SAVED 4 + #define AF2_VDA_POWER_DOWN 5 + #define AF2_THUNDERLINK 6 + #define AF2_THUNDERBOLT 7 + #define AF2_INIT_DONE 8 + #define AF2_INT_PENDING 9 + #define AF2_TIMER_TICK 10 + #define AF2_IRQ_CLAIMED 11 + #define AF2_MSI_ENABLED 12 atomic_t disable_cnt; atomic_t dis_ints_cnt; u32 int_stat; @@ -1150,16 +1150,6 @@ void esas2r_queue_fw_event(struct esas2r_adapter *a, int data_sz); /* Inline functions */ -static inline u32 esas2r_lock_set_flags(volatile u32 *flags, u32 bits) -{ - return test_and_set_bit(ilog2(bits), (volatile unsigned long *)flags); -} - -static inline u32 esas2r_lock_clear_flags(volatile u32 *flags, u32 bits) -{ - return test_and_clear_bit(ilog2(bits), - (volatile unsigned long *)flags); -} /* Allocate a chip scatter/gather list entry */ static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a) @@ -1217,7 +1207,6 @@ static inline void esas2r_rq_init_request(struct esas2r_request *rq, struct esas2r_adapter *a) { union atto_vda_req *vrq = rq->vrq; - u32 handle; INIT_LIST_HEAD(&rq->sg_table_head); rq->data_buf = (void *)(vrq + 1); @@ -1253,11 +1242,9 @@ static inline void esas2r_rq_init_request(struct esas2r_request *rq, /* * add a reference number to the handle to make it unique (until it - * wraps of course) while preserving the upper word + * wraps of course) while preserving the least significant word */ - - handle = be32_to_cpu(vrq->scsi.handle) & 0xFFFF0000; - vrq->scsi.handle = cpu_to_be32(handle + a->cmd_ref_no++); + vrq->scsi.handle = (a->cmd_ref_no++ << 16) | (u16)vrq->scsi.handle; /* * the following formats a SCSI request. the caller can override as @@ -1303,10 +1290,13 @@ static inline void esas2r_rq_destroy_request(struct esas2r_request *rq, static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a) { - return (a->flags & (AF_BUSRST_NEEDED | AF_BUSRST_DETECTED - | AF_CHPRST_NEEDED | AF_CHPRST_DETECTED - | AF_PORT_CHANGE)) - ? true : false; + + return test_bit(AF_BUSRST_NEEDED, &a->flags) || + test_bit(AF_BUSRST_DETECTED, &a->flags) || + test_bit(AF_CHPRST_NEEDED, &a->flags) || + test_bit(AF_CHPRST_DETECTED, &a->flags) || + test_bit(AF_PORT_CHANGE, &a->flags); + } /* @@ -1345,24 +1335,24 @@ static inline void esas2r_enable_chip_interrupts(struct esas2r_adapter *a) static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a) { /* make sure we don't schedule twice */ - if (!(esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED) & - ilog2(AF_TASKLET_SCHEDULED))) + if (!test_and_set_bit(AF_TASKLET_SCHEDULED, &a->flags)) tasklet_hi_schedule(&a->tasklet); } static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a) { - if (!(a->flags & (AF_DEGRADED_MODE | AF_CHPRST_PENDING)) - && (a->nvram->options2 & SASNVR2_HEARTBEAT)) - esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT_ENB); + if (!test_bit(AF_DEGRADED_MODE, &a->flags) && + !test_bit(AF_CHPRST_PENDING, &a->flags) && + (a->nvram->options2 & SASNVR2_HEARTBEAT)) + set_bit(AF_HEARTBEAT_ENB, &a->flags); else - esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB); + clear_bit(AF_HEARTBEAT_ENB, &a->flags); } static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a) { - esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB); - esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT); + clear_bit(AF_HEARTBEAT_ENB, &a->flags); + clear_bit(AF_HEARTBEAT, &a->flags); } /* Set the initial state for resetting the adapter on the next pass through @@ -1372,9 +1362,9 @@ static inline void esas2r_local_reset_adapter(struct esas2r_adapter *a) { esas2r_disable_heartbeat(a); - esas2r_lock_set_flags(&a->flags, AF_CHPRST_NEEDED); - esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING); - esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING); + set_bit(AF_CHPRST_NEEDED, &a->flags); + set_bit(AF_CHPRST_PENDING, &a->flags); + set_bit(AF_DISC_PENDING, &a->flags); } /* See if an interrupt is pending on the adapter. */ diff --git a/drivers/scsi/esas2r/esas2r_disc.c b/drivers/scsi/esas2r/esas2r_disc.c index dec6c334ce3e..1c079f4300a5 100644 --- a/drivers/scsi/esas2r/esas2r_disc.c +++ b/drivers/scsi/esas2r/esas2r_disc.c @@ -86,9 +86,9 @@ void esas2r_disc_initialize(struct esas2r_adapter *a) esas2r_trace_enter(); - esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG); - esas2r_lock_clear_flags(&a->flags2, AF2_DEV_SCAN); - esas2r_lock_clear_flags(&a->flags2, AF2_DEV_CNT_OK); + clear_bit(AF_DISC_IN_PROG, &a->flags); + clear_bit(AF2_DEV_SCAN, &a->flags2); + clear_bit(AF2_DEV_CNT_OK, &a->flags2); a->disc_start_time = jiffies_to_msecs(jiffies); a->disc_wait_time = nvr->dev_wait_time * 1000; @@ -107,7 +107,8 @@ void esas2r_disc_initialize(struct esas2r_adapter *a) a->general_req.interrupt_cx = NULL; - if (a->flags & (AF_CHPRST_DETECTED | AF_POWER_MGT)) { + if (test_bit(AF_CHPRST_DETECTED, &a->flags) || + test_bit(AF_POWER_MGT, &a->flags)) { if (a->prev_dev_cnt == 0) { /* Don't bother waiting if there is nothing to wait * for. @@ -212,9 +213,7 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a) || a->disc_wait_cnt == 0)) { /* After three seconds of waiting, schedule a scan. */ if (time >= 3000 - && !(esas2r_lock_set_flags(&a->flags2, - AF2_DEV_SCAN) & - ilog2(AF2_DEV_SCAN))) { + && !test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) { spin_lock_irqsave(&a->mem_lock, flags); esas2r_disc_queue_event(a, DCDE_DEV_SCAN); spin_unlock_irqrestore(&a->mem_lock, flags); @@ -228,18 +227,14 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a) * We are done waiting...we think. Adjust the wait time to * consume events after the count is met. */ - if (!(esas2r_lock_set_flags(&a->flags2, AF2_DEV_CNT_OK) - & ilog2(AF2_DEV_CNT_OK))) + if (!test_and_set_bit(AF2_DEV_CNT_OK, &a->flags2)) a->disc_wait_time = time + 3000; /* If we haven't done a full scan yet, do it now. */ - if (!(esas2r_lock_set_flags(&a->flags2, - AF2_DEV_SCAN) & - ilog2(AF2_DEV_SCAN))) { + if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) { spin_lock_irqsave(&a->mem_lock, flags); esas2r_disc_queue_event(a, DCDE_DEV_SCAN); spin_unlock_irqrestore(&a->mem_lock, flags); - esas2r_trace_exit(); return; } @@ -253,9 +248,7 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a) return; } } else { - if (!(esas2r_lock_set_flags(&a->flags2, - AF2_DEV_SCAN) & - ilog2(AF2_DEV_SCAN))) { + if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) { spin_lock_irqsave(&a->mem_lock, flags); esas2r_disc_queue_event(a, DCDE_DEV_SCAN); spin_unlock_irqrestore(&a->mem_lock, flags); @@ -265,8 +258,8 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a) /* We want to stop waiting for devices. */ a->disc_wait_time = 0; - if ((a->flags & AF_DISC_POLLED) - && (a->flags & AF_DISC_IN_PROG)) { + if (test_bit(AF_DISC_POLLED, &a->flags) && + test_bit(AF_DISC_IN_PROG, &a->flags)) { /* * Polled discovery is still pending so continue the active * discovery until it is done. At that point, we will stop @@ -280,14 +273,14 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a) * driven; i.e. There is no transition. */ esas2r_disc_fix_curr_requests(a); - esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING); + clear_bit(AF_DISC_PENDING, &a->flags); /* * We have deferred target state changes until now because we * don't want to report any removals (due to the first arrival) * until the device wait time expires. */ - esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE); + set_bit(AF_PORT_CHANGE, &a->flags); } esas2r_trace_exit(); @@ -308,7 +301,8 @@ void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt) * Don't start discovery before or during polled discovery. if we did, * we would have a deadlock if we are in the ISR already. */ - if (!(a->flags & (AF_CHPRST_PENDING | AF_DISC_POLLED))) + if (!test_bit(AF_CHPRST_PENDING, &a->flags) && + !test_bit(AF_DISC_POLLED, &a->flags)) esas2r_disc_start_port(a); esas2r_trace_exit(); @@ -322,7 +316,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a) esas2r_trace_enter(); - if (a->flags & AF_DISC_IN_PROG) { + if (test_bit(AF_DISC_IN_PROG, &a->flags)) { esas2r_trace_exit(); return false; @@ -330,7 +324,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a) /* If there is a discovery waiting, process it. */ if (dc->disc_evt) { - if ((a->flags & AF_DISC_POLLED) + if (test_bit(AF_DISC_POLLED, &a->flags) && a->disc_wait_time == 0) { /* * We are doing polled discovery, but we no longer want @@ -347,7 +341,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a) esas2r_hdebug("disc done"); - esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE); + set_bit(AF_PORT_CHANGE, &a->flags); esas2r_trace_exit(); @@ -356,10 +350,10 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a) /* Handle the discovery context */ esas2r_trace("disc_evt: %d", dc->disc_evt); - esas2r_lock_set_flags(&a->flags, AF_DISC_IN_PROG); + set_bit(AF_DISC_IN_PROG, &a->flags); dc->flags = 0; - if (a->flags & AF_DISC_POLLED) + if (test_bit(AF_DISC_POLLED, &a->flags)) dc->flags |= DCF_POLLED; rq->interrupt_cx = dc; @@ -379,7 +373,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a) } /* Continue interrupt driven discovery */ - if (!(a->flags & AF_DISC_POLLED)) + if (!test_bit(AF_DISC_POLLED, &a->flags)) ret = esas2r_disc_continue(a, rq); else ret = true; @@ -453,10 +447,10 @@ static bool esas2r_disc_continue(struct esas2r_adapter *a, /* Discovery is done...for now. */ rq->interrupt_cx = NULL; - if (!(a->flags & AF_DISC_PENDING)) + if (!test_bit(AF_DISC_PENDING, &a->flags)) esas2r_disc_fix_curr_requests(a); - esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG); + clear_bit(AF_DISC_IN_PROG, &a->flags); /* Start the next discovery. */ return esas2r_disc_start_port(a); @@ -480,7 +474,8 @@ static bool esas2r_disc_start_request(struct esas2r_adapter *a, spin_lock_irqsave(&a->queue_lock, flags); - if (!(a->flags & (AF_CHPRST_PENDING | AF_FLASHING))) + if (!test_bit(AF_CHPRST_PENDING, &a->flags) && + !test_bit(AF_FLASHING, &a->flags)) esas2r_disc_local_start_request(a, rq); else list_add_tail(&rq->req_list, &a->defer_list); diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c index 8582929b1fef..b7dc59fca7a6 100644 --- a/drivers/scsi/esas2r/esas2r_flash.c +++ b/drivers/scsi/esas2r/esas2r_flash.c @@ -231,7 +231,7 @@ static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq) * RS_PENDING, FM API tasks will continue. */ rq->req_stat = RS_PENDING; - if (a->flags & AF_DEGRADED_MODE) + if (test_bit(AF_DEGRADED_MODE, &a->flags)) /* not suppported for now */; else build_flash_msg(a, rq); @@ -315,7 +315,7 @@ static bool complete_fmapi_req(struct esas2r_adapter *a, memset(fc->scratch, 0, FM_BUF_SZ); esas2r_enable_heartbeat(a); - esas2r_lock_clear_flags(&a->flags, AF_FLASH_LOCK); + clear_bit(AF_FLASH_LOCK, &a->flags); return false; } @@ -526,7 +526,7 @@ no_cfg: * The download is complete. If in degraded mode, * attempt a chip reset. */ - if (a->flags & AF_DEGRADED_MODE) + if (test_bit(AF_DEGRADED_MODE, &a->flags)) esas2r_local_reset_adapter(a); a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version; @@ -860,8 +860,13 @@ bool esas2r_process_fs_ioctl(struct esas2r_adapter *a, return false; } + if (fsc->command >= cmdcnt) { + fs->status = ATTO_STS_INV_FUNC; + return false; + } + func = cmd_to_fls_func[fsc->command]; - if (fsc->command >= cmdcnt || func == 0xFF) { + if (func == 0xFF) { fs->status = ATTO_STS_INV_FUNC; return false; } @@ -885,7 +890,7 @@ bool esas2r_process_fs_ioctl(struct esas2r_adapter *a, } } - if (a->flags & AF_DEGRADED_MODE) { + if (test_bit(AF_DEGRADED_MODE, &a->flags)) { fs->status = ATTO_STS_DEGRADED; return false; } @@ -940,8 +945,12 @@ static bool esas2r_flash_access(struct esas2r_adapter *a, u32 function) /* Now wait for the firmware to process it */ starttime = jiffies_to_msecs(jiffies); - timeout = a->flags & - (AF_CHPRST_PENDING | AF_DISC_PENDING) ? 40000 : 5000; + + if (test_bit(AF_CHPRST_PENDING, &a->flags) || + test_bit(AF_DISC_PENDING, &a->flags)) + timeout = 40000; + else + timeout = 5000; while (true) { intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); @@ -1003,7 +1012,7 @@ bool esas2r_read_flash_block(struct esas2r_adapter *a, u32 offset; u32 iatvr; - if (a->flags2 & AF2_SERIAL_FLASH) + if (test_bit(AF2_SERIAL_FLASH, &a->flags2)) iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE); else iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE); @@ -1231,9 +1240,9 @@ static void esas2r_nvram_callback(struct esas2r_adapter *a, if (rq->req_stat != RS_PENDING) { /* update the NVRAM state */ if (rq->req_stat == RS_SUCCESS) - esas2r_lock_set_flags(&a->flags, AF_NVR_VALID); + set_bit(AF_NVR_VALID, &a->flags); else - esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID); + clear_bit(AF_NVR_VALID, &a->flags); esas2r_enable_heartbeat(a); @@ -1253,7 +1262,7 @@ bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq, u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0]; struct atto_vda_flash_req *vrq = &rq->vrq->flash; - if (a->flags & AF_DEGRADED_MODE) + if (test_bit(AF_DEGRADED_MODE, &a->flags)) return false; if (down_interruptible(&a->nvram_semaphore)) @@ -1297,7 +1306,7 @@ bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq, FLS_OFFSET_NVR, sizeof(struct esas2r_sas_nvram)); - if (a->flags & AF_LEGACY_SGE_MODE) { + if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) { vrq->data.sge[0].length = cpu_to_le32(SGE_LAST | @@ -1332,7 +1341,7 @@ bool esas2r_nvram_validate(struct esas2r_adapter *a) } else if (n->version > SASNVR_VERSION) { esas2r_hdebug("invalid NVRAM version"); } else { - esas2r_lock_set_flags(&a->flags, AF_NVR_VALID); + set_bit(AF_NVR_VALID, &a->flags); rslt = true; } @@ -1354,8 +1363,8 @@ void esas2r_nvram_set_defaults(struct esas2r_adapter *a) struct esas2r_sas_nvram *n = a->nvram; u32 time = jiffies_to_msecs(jiffies); - esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID); - memcpy(n, &default_sas_nvram, sizeof(struct esas2r_sas_nvram)); + clear_bit(AF_NVR_VALID, &a->flags); + *n = default_sas_nvram; n->sas_addr[3] |= 0x0F; n->sas_addr[4] = HIBYTE(LOWORD(time)); n->sas_addr[5] = LOBYTE(LOWORD(time)); @@ -1373,7 +1382,7 @@ void esas2r_nvram_get_defaults(struct esas2r_adapter *a, * address out first. */ memcpy(&sas_addr[0], a->nvram->sas_addr, 8); - memcpy(nvram, &default_sas_nvram, sizeof(struct esas2r_sas_nvram)); + *nvram = default_sas_nvram; memcpy(&nvram->sas_addr[0], &sas_addr[0], 8); } @@ -1384,7 +1393,7 @@ bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi, u8 j; struct esas2r_component_header *ch; - if (esas2r_lock_set_flags(&a->flags, AF_FLASH_LOCK) & AF_FLASH_LOCK) { + if (test_and_set_bit(AF_FLASH_LOCK, &a->flags)) { /* flag was already set */ fi->status = FI_STAT_BUSY; return false; @@ -1408,7 +1417,7 @@ bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi, return complete_fmapi_req(a, rq, FI_STAT_IMG_VER); } - if (a->flags & AF_DEGRADED_MODE) + if (test_bit(AF_DEGRADED_MODE, &a->flags)) return complete_fmapi_req(a, rq, FI_STAT_DEGRADED); switch (fi->action) { diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c index 3a798e7d5c56..b9750e296d71 100644 --- a/drivers/scsi/esas2r/esas2r_init.c +++ b/drivers/scsi/esas2r/esas2r_init.c @@ -216,7 +216,7 @@ use_legacy_interrupts: goto use_legacy_interrupts; } a->intr_mode = INTR_MODE_MSI; - esas2r_lock_set_flags(&a->flags2, AF2_MSI_ENABLED); + set_bit(AF2_MSI_ENABLED, &a->flags2); break; @@ -252,7 +252,7 @@ static void esas2r_claim_interrupts(struct esas2r_adapter *a) return; } - esas2r_lock_set_flags(&a->flags2, AF2_IRQ_CLAIMED); + set_bit(AF2_IRQ_CLAIMED, &a->flags2); esas2r_log(ESAS2R_LOG_INFO, "claimed IRQ %d flags: 0x%lx", a->pcid->irq, flags); @@ -380,10 +380,10 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, /* interrupts will be disabled until we are done with init */ atomic_inc(&a->dis_ints_cnt); atomic_inc(&a->disable_cnt); - a->flags |= AF_CHPRST_PENDING - | AF_DISC_PENDING - | AF_FIRST_INIT - | AF_LEGACY_SGE_MODE; + set_bit(AF_CHPRST_PENDING, &a->flags); + set_bit(AF_DISC_PENDING, &a->flags); + set_bit(AF_FIRST_INIT, &a->flags); + set_bit(AF_LEGACY_SGE_MODE, &a->flags); a->init_msg = ESAS2R_INIT_MSG_START; a->max_vdareq_size = 128; @@ -440,11 +440,11 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, esas2r_claim_interrupts(a); - if (a->flags2 & AF2_IRQ_CLAIMED) + if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) esas2r_enable_chip_interrupts(a); - esas2r_lock_set_flags(&a->flags2, AF2_INIT_DONE); - if (!(a->flags & AF_DEGRADED_MODE)) + set_bit(AF2_INIT_DONE, &a->flags2); + if (!test_bit(AF_DEGRADED_MODE, &a->flags)) esas2r_kickoff_timer(a); esas2r_debug("esas2r_init_adapter done for %p (%d)", a, a->disable_cnt); @@ -457,8 +457,8 @@ static void esas2r_adapter_power_down(struct esas2r_adapter *a, { struct esas2r_mem_desc *memdesc, *next; - if ((a->flags2 & AF2_INIT_DONE) - && (!(a->flags & AF_DEGRADED_MODE))) { + if ((test_bit(AF2_INIT_DONE, &a->flags2)) + && (!test_bit(AF_DEGRADED_MODE, &a->flags))) { if (!power_management) { del_timer_sync(&a->timer); tasklet_kill(&a->tasklet); @@ -508,19 +508,19 @@ static void esas2r_adapter_power_down(struct esas2r_adapter *a, } /* Clean up interrupts */ - if (a->flags2 & AF2_IRQ_CLAIMED) { + if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) { esas2r_log_dev(ESAS2R_LOG_INFO, &(a->pcid->dev), "free_irq(%d) called", a->pcid->irq); free_irq(a->pcid->irq, a); esas2r_debug("IRQ released"); - esas2r_lock_clear_flags(&a->flags2, AF2_IRQ_CLAIMED); + clear_bit(AF2_IRQ_CLAIMED, &a->flags2); } - if (a->flags2 & AF2_MSI_ENABLED) { + if (test_bit(AF2_MSI_ENABLED, &a->flags2)) { pci_disable_msi(a->pcid); - esas2r_lock_clear_flags(&a->flags2, AF2_MSI_ENABLED); + clear_bit(AF2_MSI_ENABLED, &a->flags2); esas2r_debug("MSI disabled"); } @@ -641,12 +641,10 @@ void esas2r_kill_adapter(int i) pci_set_drvdata(a->pcid, NULL); esas2r_adapters[i] = NULL; - if (a->flags2 & AF2_INIT_DONE) { - esas2r_lock_clear_flags(&a->flags2, - AF2_INIT_DONE); + if (test_bit(AF2_INIT_DONE, &a->flags2)) { + clear_bit(AF2_INIT_DONE, &a->flags2); - esas2r_lock_set_flags(&a->flags, - AF_DEGRADED_MODE); + set_bit(AF_DEGRADED_MODE, &a->flags); esas2r_log_dev(ESAS2R_LOG_INFO, &(a->host->shost_gendev), @@ -665,7 +663,7 @@ void esas2r_kill_adapter(int i) int esas2r_cleanup(struct Scsi_Host *host) { - struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; + struct esas2r_adapter *a; int index; if (host == NULL) { @@ -678,6 +676,7 @@ int esas2r_cleanup(struct Scsi_Host *host) } esas2r_debug("esas2r_cleanup called for host %p", host); + a = (struct esas2r_adapter *)host->hostdata; index = a->index; esas2r_kill_adapter(index); return index; @@ -758,7 +757,7 @@ int esas2r_resume(struct pci_dev *pdev) esas2r_claim_interrupts(a); - if (a->flags2 & AF2_IRQ_CLAIMED) { + if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) { /* * Now that system interrupt(s) are claimed, we can enable * chip interrupts. @@ -780,7 +779,7 @@ error_exit: bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str) { - esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE); + set_bit(AF_DEGRADED_MODE, &a->flags); esas2r_log(ESAS2R_LOG_CRIT, "setting adapter to degraded mode: %s\n", error_str); return false; @@ -808,7 +807,7 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a) int pcie_cap_reg; pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); - if (0xffff && pcie_cap_reg) { + if (pcie_cap_reg) { u16 devcontrol; pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, @@ -895,7 +894,7 @@ bool esas2r_init_adapter_struct(struct esas2r_adapter *a, && (a->pcid->subsystem_device & ATTO_SSDID_TBT)) a->flags2 |= AF2_THUNDERBOLT; - if (a->flags2 & AF2_THUNDERBOLT) + if (test_bit(AF2_THUNDERBOLT, &a->flags2)) a->flags2 |= AF2_SERIAL_FLASH; if (a->pcid->subsystem_device == ATTO_TLSH_1068) @@ -955,14 +954,14 @@ bool esas2r_init_adapter_struct(struct esas2r_adapter *a, a->outbound_copy = (u32 volatile *)high; high += sizeof(u32); - if (!(a->flags & AF_NVR_VALID)) + if (!test_bit(AF_NVR_VALID, &a->flags)) esas2r_nvram_set_defaults(a); /* update the caller's uncached memory area pointer */ *uncached_area = (void *)high; /* initialize the allocated memory */ - if (a->flags & AF_FIRST_INIT) { + if (test_bit(AF_FIRST_INIT, &a->flags)) { memset(a->req_table, 0, (num_requests + num_ae_requests + 1) * sizeof(struct esas2r_request *)); @@ -1018,7 +1017,7 @@ bool esas2r_check_adapter(struct esas2r_adapter *a) * if the chip reset detected flag is set, we can bypass a bunch of * stuff. */ - if (a->flags & AF_CHPRST_DETECTED) + if (test_bit(AF_CHPRST_DETECTED, &a->flags)) goto skip_chip_reset; /* @@ -1056,14 +1055,12 @@ bool esas2r_check_adapter(struct esas2r_adapter *a) doorbell); if (ver == DRBL_FW_VER_0) { - esas2r_lock_set_flags(&a->flags, - AF_LEGACY_SGE_MODE); + set_bit(AF_LEGACY_SGE_MODE, &a->flags); a->max_vdareq_size = 128; a->build_sgl = esas2r_build_sg_list_sge; } else if (ver == DRBL_FW_VER_1) { - esas2r_lock_clear_flags(&a->flags, - AF_LEGACY_SGE_MODE); + clear_bit(AF_LEGACY_SGE_MODE, &a->flags); a->max_vdareq_size = 1024; a->build_sgl = esas2r_build_sg_list_prd; @@ -1138,7 +1135,7 @@ skip_chip_reset: *a->outbound_copy = a->last_write = a->last_read = a->list_size - 1; - esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE); + set_bit(AF_COMM_LIST_TOGGLE, &a->flags); esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE | a->last_write); esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE | @@ -1203,9 +1200,9 @@ skip_chip_reset: */ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB); if (doorbell & DRBL_POWER_DOWN) - esas2r_lock_set_flags(&a->flags2, AF2_VDA_POWER_DOWN); + set_bit(AF2_VDA_POWER_DOWN, &a->flags2); else - esas2r_lock_clear_flags(&a->flags2, AF2_VDA_POWER_DOWN); + clear_bit(AF2_VDA_POWER_DOWN, &a->flags2); /* * enable assertion of outbound queue and doorbell interrupts in the @@ -1238,8 +1235,8 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a, 0, NULL); ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init; - ci->sgl_page_size = sgl_page_size; - ci->epoch_time = now.tv_sec; + ci->sgl_page_size = cpu_to_le32(sgl_page_size); + ci->epoch_time = cpu_to_le32(now.tv_sec); rq->flags |= RF_FAILURE_OK; a->init_msg = ESAS2R_INIT_MSG_INIT; break; @@ -1249,12 +1246,15 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a, if (rq->req_stat == RS_SUCCESS) { u32 major; u32 minor; + u16 fw_release; a->fw_version = le16_to_cpu( rq->func_rsp.cfg_rsp.vda_version); a->fw_build = rq->func_rsp.cfg_rsp.fw_build; - major = LOBYTE(rq->func_rsp.cfg_rsp.fw_release); - minor = HIBYTE(rq->func_rsp.cfg_rsp.fw_release); + fw_release = le16_to_cpu( + rq->func_rsp.cfg_rsp.fw_release); + major = LOBYTE(fw_release); + minor = HIBYTE(fw_release); a->fw_version += (major << 16) + (minor << 24); } else { esas2r_hdebug("FAILED"); @@ -1265,9 +1265,8 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a, * unsupported config requests correctly. */ - if ((a->flags2 & AF2_THUNDERBOLT) - || (be32_to_cpu(a->fw_version) > - be32_to_cpu(0x47020052))) { + if ((test_bit(AF2_THUNDERBOLT, &a->flags2)) + || (be32_to_cpu(a->fw_version) > 0x00524702)) { esas2r_hdebug("CFG get init"); esas2r_build_cfg_req(a, rq, @@ -1360,10 +1359,10 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll) struct esas2r_request *rq; u32 i; - if (a->flags & AF_DEGRADED_MODE) + if (test_bit(AF_DEGRADED_MODE, &a->flags)) goto exit; - if (!(a->flags & AF_NVR_VALID)) { + if (!test_bit(AF_NVR_VALID, &a->flags)) { if (!esas2r_nvram_read_direct(a)) esas2r_log(ESAS2R_LOG_WARN, "invalid/missing NVRAM parameters"); @@ -1375,8 +1374,8 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll) } /* The firmware is ready. */ - esas2r_lock_clear_flags(&a->flags, AF_DEGRADED_MODE); - esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); + clear_bit(AF_DEGRADED_MODE, &a->flags); + clear_bit(AF_CHPRST_PENDING, &a->flags); /* Post all the async event requests */ for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++) @@ -1397,8 +1396,8 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll) esas2r_hdebug("firmware revision: %s", a->fw_rev); - if ((a->flags & AF_CHPRST_DETECTED) - && (a->flags & AF_FIRST_INIT)) { + if (test_bit(AF_CHPRST_DETECTED, &a->flags) + && (test_bit(AF_FIRST_INIT, &a->flags))) { esas2r_enable_chip_interrupts(a); return true; } @@ -1422,18 +1421,18 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll) * Block Tasklets from getting scheduled and indicate this is * polled discovery. */ - esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED); - esas2r_lock_set_flags(&a->flags, AF_DISC_POLLED); + set_bit(AF_TASKLET_SCHEDULED, &a->flags); + set_bit(AF_DISC_POLLED, &a->flags); /* * Temporarily bring the disable count to zero to enable * deferred processing. Note that the count is already zero * after the first initialization. */ - if (a->flags & AF_FIRST_INIT) + if (test_bit(AF_FIRST_INIT, &a->flags)) atomic_dec(&a->disable_cnt); - while (a->flags & AF_DISC_PENDING) { + while (test_bit(AF_DISC_PENDING, &a->flags)) { schedule_timeout_interruptible(msecs_to_jiffies(100)); /* @@ -1452,7 +1451,7 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll) * we have to make sure the timer tick processes the * doorbell indicating the firmware is ready. */ - if (!(a->flags & AF_CHPRST_PENDING)) + if (!test_bit(AF_CHPRST_PENDING, &a->flags)) esas2r_disc_check_for_work(a); /* Simulate a timer tick. */ @@ -1472,11 +1471,11 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll) } - if (a->flags & AF_FIRST_INIT) + if (test_bit(AF_FIRST_INIT, &a->flags)) atomic_inc(&a->disable_cnt); - esas2r_lock_clear_flags(&a->flags, AF_DISC_POLLED); - esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED); + clear_bit(AF_DISC_POLLED, &a->flags); + clear_bit(AF_TASKLET_SCHEDULED, &a->flags); } @@ -1503,26 +1502,26 @@ exit: * need to get done before we exit. */ - if ((a->flags & AF_CHPRST_DETECTED) - && (a->flags & AF_FIRST_INIT)) { + if (test_bit(AF_CHPRST_DETECTED, &a->flags) && + test_bit(AF_FIRST_INIT, &a->flags)) { /* * Reinitialization was performed during the first * initialization. Only clear the chip reset flag so the * original device polling is not cancelled. */ if (!rslt) - esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); + clear_bit(AF_CHPRST_PENDING, &a->flags); } else { /* First initialization or a subsequent re-init is complete. */ if (!rslt) { - esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); - esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING); + clear_bit(AF_CHPRST_PENDING, &a->flags); + clear_bit(AF_DISC_PENDING, &a->flags); } /* Enable deferred processing after the first initialization. */ - if (a->flags & AF_FIRST_INIT) { - esas2r_lock_clear_flags(&a->flags, AF_FIRST_INIT); + if (test_bit(AF_FIRST_INIT, &a->flags)) { + clear_bit(AF_FIRST_INIT, &a->flags); if (atomic_dec_return(&a->disable_cnt) == 0) esas2r_do_deferred_processes(a); @@ -1534,7 +1533,7 @@ exit: void esas2r_reset_adapter(struct esas2r_adapter *a) { - esas2r_lock_set_flags(&a->flags, AF_OS_RESET); + set_bit(AF_OS_RESET, &a->flags); esas2r_local_reset_adapter(a); esas2r_schedule_tasklet(a); } @@ -1549,18 +1548,17 @@ void esas2r_reset_chip(struct esas2r_adapter *a) * dump is located in the upper 512KB of the onchip SRAM. Make sure * to not overwrite a previous crash that was saved. */ - if ((a->flags2 & AF2_COREDUMP_AVAIL) - && !(a->flags2 & AF2_COREDUMP_SAVED) - && a->fw_coredump_buff) { + if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) && + !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) { esas2r_read_mem_block(a, a->fw_coredump_buff, MW_DATA_ADDR_SRAM + 0x80000, ESAS2R_FWCOREDUMP_SZ); - esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_SAVED); + set_bit(AF2_COREDUMP_SAVED, &a->flags2); } - esas2r_lock_clear_flags(&a->flags2, AF2_COREDUMP_AVAIL); + clear_bit(AF2_COREDUMP_AVAIL, &a->flags2); /* Reset the chip */ if (a->pcid->revision == MVR_FREY_B2) @@ -1606,10 +1604,10 @@ static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a) */ void esas2r_power_down(struct esas2r_adapter *a) { - esas2r_lock_set_flags(&a->flags, AF_POWER_MGT); - esas2r_lock_set_flags(&a->flags, AF_POWER_DOWN); + set_bit(AF_POWER_MGT, &a->flags); + set_bit(AF_POWER_DOWN, &a->flags); - if (!(a->flags & AF_DEGRADED_MODE)) { + if (!test_bit(AF_DEGRADED_MODE, &a->flags)) { u32 starttime; u32 doorbell; @@ -1649,14 +1647,14 @@ void esas2r_power_down(struct esas2r_adapter *a) * For versions of firmware that support it tell them the driver * is powering down. */ - if (a->flags2 & AF2_VDA_POWER_DOWN) + if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2)) esas2r_power_down_notify_firmware(a); } /* Suspend I/O processing. */ - esas2r_lock_set_flags(&a->flags, AF_OS_RESET); - esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING); - esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING); + set_bit(AF_OS_RESET, &a->flags); + set_bit(AF_DISC_PENDING, &a->flags); + set_bit(AF_CHPRST_PENDING, &a->flags); esas2r_process_adapter_reset(a); @@ -1673,9 +1671,9 @@ bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll) { bool ret; - esas2r_lock_clear_flags(&a->flags, AF_POWER_DOWN); + clear_bit(AF_POWER_DOWN, &a->flags); esas2r_init_pci_cfg_space(a); - esas2r_lock_set_flags(&a->flags, AF_FIRST_INIT); + set_bit(AF_FIRST_INIT, &a->flags); atomic_inc(&a->disable_cnt); /* reinitialize the adapter */ @@ -1687,17 +1685,17 @@ bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll) esas2r_send_reset_ae(a, true); /* clear this flag after initialization. */ - esas2r_lock_clear_flags(&a->flags, AF_POWER_MGT); + clear_bit(AF_POWER_MGT, &a->flags); return ret; } bool esas2r_is_adapter_present(struct esas2r_adapter *a) { - if (a->flags & AF_NOT_PRESENT) + if (test_bit(AF_NOT_PRESENT, &a->flags)) return false; if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) { - esas2r_lock_set_flags(&a->flags, AF_NOT_PRESENT); + set_bit(AF_NOT_PRESENT, &a->flags); return false; } diff --git a/drivers/scsi/esas2r/esas2r_int.c b/drivers/scsi/esas2r/esas2r_int.c index c2d4ff57c5c3..f16d6bcf9bb6 100644 --- a/drivers/scsi/esas2r/esas2r_int.c +++ b/drivers/scsi/esas2r/esas2r_int.c @@ -96,7 +96,7 @@ irqreturn_t esas2r_interrupt(int irq, void *dev_id) if (!esas2r_adapter_interrupt_pending(a)) return IRQ_NONE; - esas2r_lock_set_flags(&a->flags2, AF2_INT_PENDING); + set_bit(AF2_INT_PENDING, &a->flags2); esas2r_schedule_tasklet(a); return IRQ_HANDLED; @@ -317,9 +317,10 @@ void esas2r_do_deferred_processes(struct esas2r_adapter *a) * = 2 - can start any request */ - if (a->flags & (AF_CHPRST_PENDING | AF_FLASHING)) + if (test_bit(AF_CHPRST_PENDING, &a->flags) || + test_bit(AF_FLASHING, &a->flags)) startreqs = 0; - else if (a->flags & AF_DISC_PENDING) + else if (test_bit(AF_DISC_PENDING, &a->flags)) startreqs = 1; atomic_inc(&a->disable_cnt); @@ -367,7 +368,7 @@ void esas2r_do_deferred_processes(struct esas2r_adapter *a) * Flashing could have been set by last local * start */ - if (a->flags & AF_FLASHING) + if (test_bit(AF_FLASHING, &a->flags)) break; } } @@ -404,7 +405,7 @@ void esas2r_process_adapter_reset(struct esas2r_adapter *a) dc->disc_evt = 0; - esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG); + clear_bit(AF_DISC_IN_PROG, &a->flags); } /* @@ -425,7 +426,7 @@ void esas2r_process_adapter_reset(struct esas2r_adapter *a) a->last_write = a->last_read = a->list_size - 1; - esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE); + set_bit(AF_COMM_LIST_TOGGLE, &a->flags); /* Kill all the requests on the active list */ list_for_each(element, &a->defer_list) { @@ -470,7 +471,7 @@ static void esas2r_process_bus_reset(struct esas2r_adapter *a) if (atomic_read(&a->disable_cnt) == 0) esas2r_do_deferred_processes(a); - esas2r_lock_clear_flags(&a->flags, AF_OS_RESET); + clear_bit(AF_OS_RESET, &a->flags); esas2r_trace_exit(); } @@ -478,10 +479,10 @@ static void esas2r_process_bus_reset(struct esas2r_adapter *a) static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a) { - esas2r_lock_clear_flags(&a->flags, AF_CHPRST_NEEDED); - esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED); - esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED); - esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING); + clear_bit(AF_CHPRST_NEEDED, &a->flags); + clear_bit(AF_BUSRST_NEEDED, &a->flags); + clear_bit(AF_BUSRST_DETECTED, &a->flags); + clear_bit(AF_BUSRST_PENDING, &a->flags); /* * Make sure we don't get attempt more than 3 resets * when the uptime between resets does not exceed one @@ -507,10 +508,10 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a) * prevent the heartbeat from trying to recover. */ - esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE); - esas2r_lock_set_flags(&a->flags, AF_DISABLED); - esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); - esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING); + set_bit(AF_DEGRADED_MODE, &a->flags); + set_bit(AF_DISABLED, &a->flags); + clear_bit(AF_CHPRST_PENDING, &a->flags); + clear_bit(AF_DISC_PENDING, &a->flags); esas2r_disable_chip_interrupts(a); a->int_mask = 0; @@ -519,18 +520,17 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a) esas2r_log(ESAS2R_LOG_CRIT, "Adapter disabled because of hardware failure"); } else { - u32 flags = - esas2r_lock_set_flags(&a->flags, AF_CHPRST_STARTED); + bool alrdyrst = test_and_set_bit(AF_CHPRST_STARTED, &a->flags); - if (!(flags & AF_CHPRST_STARTED)) + if (!alrdyrst) /* * Only disable interrupts if this is * the first reset attempt. */ esas2r_disable_chip_interrupts(a); - if ((a->flags & AF_POWER_MGT) && !(a->flags & AF_FIRST_INIT) && - !(flags & AF_CHPRST_STARTED)) { + if ((test_bit(AF_POWER_MGT, &a->flags)) && + !test_bit(AF_FIRST_INIT, &a->flags) && !alrdyrst) { /* * Don't reset the chip on the first * deferred power up attempt. @@ -543,10 +543,10 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a) /* Kick off the reinitialization */ a->chip_uptime += ESAS2R_CHP_UPTIME_CNT; a->chip_init_time = jiffies_to_msecs(jiffies); - if (!(a->flags & AF_POWER_MGT)) { + if (!test_bit(AF_POWER_MGT, &a->flags)) { esas2r_process_adapter_reset(a); - if (!(flags & AF_CHPRST_STARTED)) { + if (!alrdyrst) { /* Remove devices now that I/O is cleaned up. */ a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a); @@ -560,38 +560,37 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a) static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a) { - while (a->flags & AF_CHPRST_DETECTED) { + while (test_bit(AF_CHPRST_DETECTED, &a->flags)) { /* * Balance the enable in esas2r_initadapter_hw. * Esas2r_power_down already took care of it for power * management. */ - if (!(a->flags & AF_DEGRADED_MODE) && !(a->flags & - AF_POWER_MGT)) + if (!test_bit(AF_DEGRADED_MODE, &a->flags) && + !test_bit(AF_POWER_MGT, &a->flags)) esas2r_disable_chip_interrupts(a); /* Reinitialize the chip. */ esas2r_check_adapter(a); esas2r_init_adapter_hw(a, 0); - if (a->flags & AF_CHPRST_NEEDED) + if (test_bit(AF_CHPRST_NEEDED, &a->flags)) break; - if (a->flags & AF_POWER_MGT) { + if (test_bit(AF_POWER_MGT, &a->flags)) { /* Recovery from power management. */ - if (a->flags & AF_FIRST_INIT) { + if (test_bit(AF_FIRST_INIT, &a->flags)) { /* Chip reset during normal power up */ esas2r_log(ESAS2R_LOG_CRIT, "The firmware was reset during a normal power-up sequence"); } else { /* Deferred power up complete. */ - esas2r_lock_clear_flags(&a->flags, - AF_POWER_MGT); + clear_bit(AF_POWER_MGT, &a->flags); esas2r_send_reset_ae(a, true); } } else { /* Recovery from online chip reset. */ - if (a->flags & AF_FIRST_INIT) { + if (test_bit(AF_FIRST_INIT, &a->flags)) { /* Chip reset during driver load */ } else { /* Chip reset after driver load */ @@ -602,14 +601,14 @@ static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a) "Recovering from a chip reset while the chip was online"); } - esas2r_lock_clear_flags(&a->flags, AF_CHPRST_STARTED); + clear_bit(AF_CHPRST_STARTED, &a->flags); esas2r_enable_chip_interrupts(a); /* * Clear this flag last! this indicates that the chip has been * reset already during initialization. */ - esas2r_lock_clear_flags(&a->flags, AF_CHPRST_DETECTED); + clear_bit(AF_CHPRST_DETECTED, &a->flags); } } @@ -617,26 +616,28 @@ static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a) /* Perform deferred tasks when chip interrupts are disabled */ void esas2r_do_tasklet_tasks(struct esas2r_adapter *a) { - if (a->flags & (AF_CHPRST_NEEDED | AF_CHPRST_DETECTED)) { - if (a->flags & AF_CHPRST_NEEDED) + + if (test_bit(AF_CHPRST_NEEDED, &a->flags) || + test_bit(AF_CHPRST_DETECTED, &a->flags)) { + if (test_bit(AF_CHPRST_NEEDED, &a->flags)) esas2r_chip_rst_needed_during_tasklet(a); esas2r_handle_chip_rst_during_tasklet(a); } - if (a->flags & AF_BUSRST_NEEDED) { + if (test_bit(AF_BUSRST_NEEDED, &a->flags)) { esas2r_hdebug("hard resetting bus"); - esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED); + clear_bit(AF_BUSRST_NEEDED, &a->flags); - if (a->flags & AF_FLASHING) - esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED); + if (test_bit(AF_FLASHING, &a->flags)) + set_bit(AF_BUSRST_DETECTED, &a->flags); else esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_RESET_BUS); } - if (a->flags & AF_BUSRST_DETECTED) { + if (test_bit(AF_BUSRST_DETECTED, &a->flags)) { esas2r_process_bus_reset(a); esas2r_log_dev(ESAS2R_LOG_WARN, @@ -645,14 +646,14 @@ void esas2r_do_tasklet_tasks(struct esas2r_adapter *a) scsi_report_bus_reset(a->host, 0); - esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED); - esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING); + clear_bit(AF_BUSRST_DETECTED, &a->flags); + clear_bit(AF_BUSRST_PENDING, &a->flags); esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete"); } - if (a->flags & AF_PORT_CHANGE) { - esas2r_lock_clear_flags(&a->flags, AF_PORT_CHANGE); + if (test_bit(AF_PORT_CHANGE, &a->flags)) { + clear_bit(AF_PORT_CHANGE, &a->flags); esas2r_targ_db_report_changes(a); } @@ -672,10 +673,10 @@ static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell) esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); if (doorbell & DRBL_RESET_BUS) - esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED); + set_bit(AF_BUSRST_DETECTED, &a->flags); if (doorbell & DRBL_FORCE_INT) - esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT); + clear_bit(AF_HEARTBEAT, &a->flags); if (doorbell & DRBL_PANIC_REASON_MASK) { esas2r_hdebug("*** Firmware Panic ***"); @@ -683,7 +684,7 @@ static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell) } if (doorbell & DRBL_FW_RESET) { - esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_AVAIL); + set_bit(AF2_COREDUMP_AVAIL, &a->flags2); esas2r_local_reset_adapter(a); } @@ -918,7 +919,7 @@ void esas2r_complete_request(struct esas2r_adapter *a, { if (rq->vrq->scsi.function == VDA_FUNC_FLASH && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT) - esas2r_lock_clear_flags(&a->flags, AF_FLASHING); + clear_bit(AF_FLASHING, &a->flags); /* See if we setup a callback to do special processing */ diff --git a/drivers/scsi/esas2r/esas2r_io.c b/drivers/scsi/esas2r/esas2r_io.c index 324e2626a08b..a8df916cd57a 100644 --- a/drivers/scsi/esas2r/esas2r_io.c +++ b/drivers/scsi/esas2r/esas2r_io.c @@ -49,7 +49,8 @@ void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) struct esas2r_request *startrq = rq; unsigned long flags; - if (unlikely(a->flags & (AF_DEGRADED_MODE | AF_POWER_DOWN))) { + if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags) || + test_bit(AF_POWER_DOWN, &a->flags))) { if (rq->vrq->scsi.function == VDA_FUNC_SCSI) rq->req_stat = RS_SEL2; else @@ -69,8 +70,8 @@ void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) * Note that if AF_DISC_PENDING is set than this will * go on the defer queue. */ - if (unlikely(t->target_state != TS_PRESENT - && !(a->flags & AF_DISC_PENDING))) + if (unlikely(t->target_state != TS_PRESENT && + !test_bit(AF_DISC_PENDING, &a->flags))) rq->req_stat = RS_SEL; } } @@ -91,8 +92,9 @@ void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) spin_lock_irqsave(&a->queue_lock, flags); if (likely(list_empty(&a->defer_list) && - !(a->flags & - (AF_CHPRST_PENDING | AF_FLASHING | AF_DISC_PENDING)))) + !test_bit(AF_CHPRST_PENDING, &a->flags) && + !test_bit(AF_FLASHING, &a->flags) && + !test_bit(AF_DISC_PENDING, &a->flags))) esas2r_local_start_request(a, startrq); else list_add_tail(&startrq->req_list, &a->defer_list); @@ -124,7 +126,7 @@ void esas2r_local_start_request(struct esas2r_adapter *a, if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)) - esas2r_lock_set_flags(&a->flags, AF_FLASHING); + set_bit(AF_FLASHING, &a->flags); list_add_tail(&rq->req_list, &a->active_list); esas2r_start_vda_request(a, rq); @@ -147,11 +149,10 @@ void esas2r_start_vda_request(struct esas2r_adapter *a, if (a->last_write >= a->list_size) { a->last_write = 0; /* update the toggle bit */ - if (a->flags & AF_COMM_LIST_TOGGLE) - esas2r_lock_clear_flags(&a->flags, - AF_COMM_LIST_TOGGLE); + if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags)) + clear_bit(AF_COMM_LIST_TOGGLE, &a->flags); else - esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE); + set_bit(AF_COMM_LIST_TOGGLE, &a->flags); } element = @@ -169,7 +170,7 @@ void esas2r_start_vda_request(struct esas2r_adapter *a, /* Update the write pointer */ dw = a->last_write; - if (a->flags & AF_COMM_LIST_TOGGLE) + if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags)) dw |= MU_ILW_TOGGLE; esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle); @@ -687,18 +688,14 @@ static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime) esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); if (ver == DRBL_FW_VER_0) { - esas2r_lock_set_flags(&a->flags, - AF_CHPRST_DETECTED); - esas2r_lock_set_flags(&a->flags, - AF_LEGACY_SGE_MODE); + set_bit(AF_CHPRST_DETECTED, &a->flags); + set_bit(AF_LEGACY_SGE_MODE, &a->flags); a->max_vdareq_size = 128; a->build_sgl = esas2r_build_sg_list_sge; } else if (ver == DRBL_FW_VER_1) { - esas2r_lock_set_flags(&a->flags, - AF_CHPRST_DETECTED); - esas2r_lock_clear_flags(&a->flags, - AF_LEGACY_SGE_MODE); + set_bit(AF_CHPRST_DETECTED, &a->flags); + clear_bit(AF_LEGACY_SGE_MODE, &a->flags); a->max_vdareq_size = 1024; a->build_sgl = esas2r_build_sg_list_prd; @@ -719,28 +716,27 @@ void esas2r_timer_tick(struct esas2r_adapter *a) a->last_tick_time = currtime; /* count down the uptime */ - if (a->chip_uptime - && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) { + if (a->chip_uptime && + !test_bit(AF_CHPRST_PENDING, &a->flags) && + !test_bit(AF_DISC_PENDING, &a->flags)) { if (deltatime >= a->chip_uptime) a->chip_uptime = 0; else a->chip_uptime -= deltatime; } - if (a->flags & AF_CHPRST_PENDING) { - if (!(a->flags & AF_CHPRST_NEEDED) - && !(a->flags & AF_CHPRST_DETECTED)) + if (test_bit(AF_CHPRST_PENDING, &a->flags)) { + if (!test_bit(AF_CHPRST_NEEDED, &a->flags) && + !test_bit(AF_CHPRST_DETECTED, &a->flags)) esas2r_handle_pending_reset(a, currtime); } else { - if (a->flags & AF_DISC_PENDING) + if (test_bit(AF_DISC_PENDING, &a->flags)) esas2r_disc_check_complete(a); - - if (a->flags & AF_HEARTBEAT_ENB) { - if (a->flags & AF_HEARTBEAT) { + if (test_bit(AF_HEARTBEAT_ENB, &a->flags)) { + if (test_bit(AF_HEARTBEAT, &a->flags)) { if ((currtime - a->heartbeat_time) >= ESAS2R_HEARTBEAT_TIME) { - esas2r_lock_clear_flags(&a->flags, - AF_HEARTBEAT); + clear_bit(AF_HEARTBEAT, &a->flags); esas2r_hdebug("heartbeat failed"); esas2r_log(ESAS2R_LOG_CRIT, "heartbeat failed"); @@ -748,7 +744,7 @@ void esas2r_timer_tick(struct esas2r_adapter *a) esas2r_local_reset_adapter(a); } } else { - esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT); + set_bit(AF_HEARTBEAT, &a->flags); a->heartbeat_time = currtime; esas2r_force_interrupt(a); } @@ -812,7 +808,7 @@ bool esas2r_send_task_mgmt(struct esas2r_adapter *a, rqaux->vrq->scsi.flags |= cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK)); - if (a->flags & AF_FLASHING) { + if (test_bit(AF_FLASHING, &a->flags)) { /* Assume success. if there are active requests, return busy */ rqaux->req_stat = RS_SUCCESS; @@ -831,7 +827,7 @@ bool esas2r_send_task_mgmt(struct esas2r_adapter *a, spin_unlock_irqrestore(&a->queue_lock, flags); - if (!(a->flags & AF_FLASHING)) + if (!test_bit(AF_FLASHING, &a->flags)) esas2r_start_request(a, rqaux); esas2r_comp_list_drain(a, &comp_list); @@ -848,11 +844,12 @@ void esas2r_reset_bus(struct esas2r_adapter *a) { esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset"); - if (!(a->flags & AF_DEGRADED_MODE) - && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) { - esas2r_lock_set_flags(&a->flags, AF_BUSRST_NEEDED); - esas2r_lock_set_flags(&a->flags, AF_BUSRST_PENDING); - esas2r_lock_set_flags(&a->flags, AF_OS_RESET); + if (!test_bit(AF_DEGRADED_MODE, &a->flags) && + !test_bit(AF_CHPRST_PENDING, &a->flags) && + !test_bit(AF_DISC_PENDING, &a->flags)) { + set_bit(AF_BUSRST_NEEDED, &a->flags); + set_bit(AF_BUSRST_PENDING, &a->flags); + set_bit(AF_OS_RESET, &a->flags); esas2r_schedule_tasklet(a); } diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c index f3d0cb885972..d89a0277a8e1 100644 --- a/drivers/scsi/esas2r/esas2r_ioctl.c +++ b/drivers/scsi/esas2r/esas2r_ioctl.c @@ -347,7 +347,7 @@ static bool csmi_ioctl_tunnel(struct esas2r_adapter *a, { struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl; - if (a->flags & AF_DEGRADED_MODE) + if (test_bit(AF_DEGRADED_MODE, &a->flags)) return false; esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); @@ -415,7 +415,7 @@ static int csmi_ioctl_callback(struct esas2r_adapter *a, lun = tm->lun; } - if (path > 0 || tid > ESAS2R_MAX_ID) { + if (path > 0) { rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32( CSMI_STS_INV_PARAM); return false; @@ -463,7 +463,7 @@ static int csmi_ioctl_callback(struct esas2r_adapter *a, gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver)); gcc->bios_build_rev = LOWORD(a->flash_ver); - if (a->flags2 & AF2_THUNDERLINK) + if (test_bit(AF2_THUNDERLINK, &a->flags2)) gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA | CSMI_CNTLRF_SATA_HBA; else @@ -485,7 +485,7 @@ static int csmi_ioctl_callback(struct esas2r_adapter *a, { struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts; - if (a->flags & AF_DEGRADED_MODE) + if (test_bit(AF_DEGRADED_MODE, &a->flags)) gcs->status = CSMI_CNTLR_STS_FAILED; else gcs->status = CSMI_CNTLR_STS_GOOD; @@ -819,10 +819,10 @@ static int hba_ioctl_callback(struct esas2r_adapter *a, gai->adap_type = ATTO_GAI_AT_ESASRAID2; - if (a->flags2 & AF2_THUNDERLINK) + if (test_bit(AF2_THUNDERLINK, &a->flags2)) gai->adap_type = ATTO_GAI_AT_TLSASHBA; - if (a->flags & AF_DEGRADED_MODE) + if (test_bit(AF_DEGRADED_MODE, &a->flags)) gai->adap_flags |= ATTO_GAI_AF_DEGRADED; gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP | @@ -938,7 +938,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a, u32 total_len = ESAS2R_FWCOREDUMP_SZ; /* Size is zero if a core dump isn't present */ - if (!(a->flags2 & AF2_COREDUMP_SAVED)) + if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) total_len = 0; if (len > total_len) @@ -960,8 +960,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a, memset(a->fw_coredump_buff, 0, ESAS2R_FWCOREDUMP_SZ); - esas2r_lock_clear_flags(&a->flags2, - AF2_COREDUMP_SAVED); + clear_bit(AF2_COREDUMP_SAVED, &a->flags2); } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) { hi->status = ATTO_STS_UNSUPPORTED; break; @@ -973,7 +972,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a, trc->total_length = ESAS2R_FWCOREDUMP_SZ; /* Return zero length buffer if core dump not present */ - if (!(a->flags2 & AF2_COREDUMP_SAVED)) + if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) trc->total_length = 0; } else { hi->status = ATTO_STS_UNSUPPORTED; @@ -1048,6 +1047,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a, else if (spt->flags & ATTO_SPTF_HEAD_OF_Q) rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q); + if (!esas2r_build_sg_list(a, rq, sgc)) { hi->status = ATTO_STS_OUT_OF_RSRC; break; @@ -1139,15 +1139,15 @@ static int hba_ioctl_callback(struct esas2r_adapter *a, break; } - if (a->flags & AF_CHPRST_NEEDED) + if (test_bit(AF_CHPRST_NEEDED, &a->flags)) ac->adap_state = ATTO_AC_AS_RST_SCHED; - else if (a->flags & AF_CHPRST_PENDING) + else if (test_bit(AF_CHPRST_PENDING, &a->flags)) ac->adap_state = ATTO_AC_AS_RST_IN_PROG; - else if (a->flags & AF_DISC_PENDING) + else if (test_bit(AF_DISC_PENDING, &a->flags)) ac->adap_state = ATTO_AC_AS_RST_DISC; - else if (a->flags & AF_DISABLED) + else if (test_bit(AF_DISABLED, &a->flags)) ac->adap_state = ATTO_AC_AS_DISABLED; - else if (a->flags & AF_DEGRADED_MODE) + else if (test_bit(AF_DEGRADED_MODE, &a->flags)) ac->adap_state = ATTO_AC_AS_DEGRADED; else ac->adap_state = ATTO_AC_AS_OK; diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index 4abf1272e1eb..f37f3e3dd5d5 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -889,7 +889,7 @@ int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) /* Assume success, if it fails we will fix the result later. */ cmd->result = DID_OK << 16; - if (unlikely(a->flags & AF_DEGRADED_MODE)) { + if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) { cmd->result = DID_NO_CONNECT << 16; cmd->scsi_done(cmd); return 0; @@ -1050,7 +1050,7 @@ int esas2r_eh_abort(struct scsi_cmnd *cmd) esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd); - if (a->flags & AF_DEGRADED_MODE) { + if (test_bit(AF_DEGRADED_MODE, &a->flags)) { cmd->result = DID_ABORT << 16; scsi_set_resid(cmd, 0); @@ -1131,7 +1131,7 @@ static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset) struct esas2r_adapter *a = (struct esas2r_adapter *)cmd->device->host->hostdata; - if (a->flags & AF_DEGRADED_MODE) + if (test_bit(AF_DEGRADED_MODE, &a->flags)) return FAILED; if (host_reset) @@ -1141,14 +1141,14 @@ static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset) /* above call sets the AF_OS_RESET flag. wait for it to clear. */ - while (a->flags & AF_OS_RESET) { + while (test_bit(AF_OS_RESET, &a->flags)) { msleep(10); - if (a->flags & AF_DEGRADED_MODE) + if (test_bit(AF_DEGRADED_MODE, &a->flags)) return FAILED; } - if (a->flags & AF_DEGRADED_MODE) + if (test_bit(AF_DEGRADED_MODE, &a->flags)) return FAILED; return SUCCESS; @@ -1176,7 +1176,7 @@ static int esas2r_dev_targ_reset(struct scsi_cmnd *cmd, bool target_reset) u8 task_management_status = RS_PENDING; bool completed; - if (a->flags & AF_DEGRADED_MODE) + if (test_bit(AF_DEGRADED_MODE, &a->flags)) return FAILED; retry: @@ -1229,7 +1229,7 @@ retry: msleep(10); } - if (a->flags & AF_DEGRADED_MODE) + if (test_bit(AF_DEGRADED_MODE, &a->flags)) return FAILED; if (task_management_status == RS_BUSY) { @@ -1666,13 +1666,13 @@ void esas2r_adapter_tasklet(unsigned long context) { struct esas2r_adapter *a = (struct esas2r_adapter *)context; - if (unlikely(a->flags2 & AF2_TIMER_TICK)) { - esas2r_lock_clear_flags(&a->flags2, AF2_TIMER_TICK); + if (unlikely(test_bit(AF2_TIMER_TICK, &a->flags2))) { + clear_bit(AF2_TIMER_TICK, &a->flags2); esas2r_timer_tick(a); } - if (likely(a->flags2 & AF2_INT_PENDING)) { - esas2r_lock_clear_flags(&a->flags2, AF2_INT_PENDING); + if (likely(test_bit(AF2_INT_PENDING, &a->flags2))) { + clear_bit(AF2_INT_PENDING, &a->flags2); esas2r_adapter_interrupt(a); } @@ -1680,12 +1680,12 @@ void esas2r_adapter_tasklet(unsigned long context) esas2r_do_tasklet_tasks(a); if (esas2r_is_tasklet_pending(a) - || (a->flags2 & AF2_INT_PENDING) - || (a->flags2 & AF2_TIMER_TICK)) { - esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED); + || (test_bit(AF2_INT_PENDING, &a->flags2)) + || (test_bit(AF2_TIMER_TICK, &a->flags2))) { + clear_bit(AF_TASKLET_SCHEDULED, &a->flags); esas2r_schedule_tasklet(a); } else { - esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED); + clear_bit(AF_TASKLET_SCHEDULED, &a->flags); } } @@ -1707,7 +1707,7 @@ static void esas2r_timer_callback(unsigned long context) { struct esas2r_adapter *a = (struct esas2r_adapter *)context; - esas2r_lock_set_flags(&a->flags2, AF2_TIMER_TICK); + set_bit(AF2_TIMER_TICK, &a->flags2); esas2r_schedule_tasklet(a); diff --git a/drivers/scsi/esas2r/esas2r_targdb.c b/drivers/scsi/esas2r/esas2r_targdb.c index e540a2fa3d15..bf45beaad439 100644 --- a/drivers/scsi/esas2r/esas2r_targdb.c +++ b/drivers/scsi/esas2r/esas2r_targdb.c @@ -86,7 +86,7 @@ void esas2r_targ_db_report_changes(struct esas2r_adapter *a) esas2r_trace_enter(); - if (a->flags & AF_DISC_PENDING) { + if (test_bit(AF_DISC_PENDING, &a->flags)) { esas2r_trace_exit(); return; } diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c index f8ec6d636846..30028e56df63 100644 --- a/drivers/scsi/esas2r/esas2r_vda.c +++ b/drivers/scsi/esas2r/esas2r_vda.c @@ -84,7 +84,7 @@ bool esas2r_process_vda_ioctl(struct esas2r_adapter *a, return false; } - if (a->flags & AF_DEGRADED_MODE) { + if (test_bit(AF_DEGRADED_MODE, &a->flags)) { vi->status = ATTO_STS_DEGRADED; return false; } @@ -302,6 +302,7 @@ static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a, if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) { struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg; struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp; + char buf[sizeof(cfg->data.init.fw_release) + 1]; cfg->data_length = cpu_to_le32(sizeof(struct atto_vda_cfg_init)); @@ -309,10 +310,12 @@ static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a, le32_to_cpu(rsp->vda_version); cfg->data.init.fw_build = rsp->fw_build; - sprintf((char *)&cfg->data.init.fw_release, - "%1d.%02d", - (int)LOBYTE(le16_to_cpu(rsp->fw_release)), - (int)HIBYTE(le16_to_cpu(rsp->fw_release))); + snprintf(buf, sizeof(buf), "%1.1u.%2.2u", + (int)LOBYTE(le16_to_cpu(rsp->fw_release)), + (int)HIBYTE(le16_to_cpu(rsp->fw_release))); + + memcpy(&cfg->data.init.fw_release, buf, + sizeof(cfg->data.init.fw_release)); if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A') cfg->data.init.fw_version = @@ -386,7 +389,7 @@ void esas2r_build_mgt_req(struct esas2r_adapter *a, vrq->length = cpu_to_le32(length); if (vrq->length) { - if (a->flags & AF_LEGACY_SGE_MODE) { + if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) { vrq->sg_list_offset = (u8)offsetof( struct atto_vda_mgmt_req, sge); @@ -424,7 +427,7 @@ void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq) vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data)); - if (a->flags & AF_LEGACY_SGE_MODE) { + if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) { vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ae_req, sge); vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length); diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 07453bbf05e7..f3170008ae71 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -408,6 +408,7 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev, } ctlr = fcoe_ctlr_device_priv(ctlr_dev); + ctlr->cdev = ctlr_dev; fcoe = fcoe_ctlr_priv(ctlr); dev_hold(netdev); @@ -1440,22 +1441,28 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, ctlr = fcoe_to_ctlr(fcoe); lport = ctlr->lp; if (unlikely(!lport)) { - FCOE_NETDEV_DBG(netdev, "Cannot find hba structure"); + FCOE_NETDEV_DBG(netdev, "Cannot find hba structure\n"); goto err2; } if (!lport->link_up) goto err2; - FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p " - "data:%p tail:%p end:%p sum:%d dev:%s", + FCOE_NETDEV_DBG(netdev, + "skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n", skb->len, skb->data_len, skb->head, skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), skb->csum, skb->dev ? skb->dev->name : "<NULL>"); + + skb = skb_share_check(skb, GFP_ATOMIC); + + if (skb == NULL) + return NET_RX_DROP; + eh = eth_hdr(skb); if (is_fip_mode(ctlr) && - compare_ether_addr(eh->h_source, ctlr->dest_addr)) { + !ether_addr_equal(eh->h_source, ctlr->dest_addr)) { FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n", eh->h_source); goto err; @@ -1540,13 +1547,13 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, wake_up_process(fps->thread); spin_unlock(&fps->fcoe_rx_list.lock); - return 0; + return NET_RX_SUCCESS; err: per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++; put_cpu(); err2: kfree_skb(skb); - return -1; + return NET_RX_DROP; } /** @@ -1788,13 +1795,13 @@ static void fcoe_recv_frame(struct sk_buff *skb) lport = fr->fr_dev; if (unlikely(!lport)) { if (skb->destructor != fcoe_percpu_flush_done) - FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb"); + FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n"); kfree_skb(skb); return; } - FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d " - "head:%p data:%p tail:%p end:%p sum:%d dev:%s", + FCOE_NETDEV_DBG(skb->dev, + "skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n", skb->len, skb->data_len, skb->head, skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), skb->csum, diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 203415e02518..34a1b1f333b4 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -160,74 +160,113 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode) } EXPORT_SYMBOL(fcoe_ctlr_init); +/** + * fcoe_sysfs_fcf_add() - Add a fcoe_fcf{,_device} to a fcoe_ctlr{,_device} + * @new: The newly discovered FCF + * + * Called with fip->ctlr_mutex held + */ static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new) { struct fcoe_ctlr *fip = new->fip; - struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); - struct fcoe_fcf_device temp, *fcf_dev; - int rc = 0; + struct fcoe_ctlr_device *ctlr_dev; + struct fcoe_fcf_device *temp, *fcf_dev; + int rc = -ENOMEM; LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n", new->fabric_name, new->fcf_mac); - mutex_lock(&ctlr_dev->lock); - - temp.fabric_name = new->fabric_name; - temp.switch_name = new->switch_name; - temp.fc_map = new->fc_map; - temp.vfid = new->vfid; - memcpy(temp.mac, new->fcf_mac, ETH_ALEN); - temp.priority = new->pri; - temp.fka_period = new->fka_period; - temp.selected = 0; /* default to unselected */ - - fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp); - if (unlikely(!fcf_dev)) { - rc = -ENOMEM; + temp = kzalloc(sizeof(*temp), GFP_KERNEL); + if (!temp) goto out; - } + + temp->fabric_name = new->fabric_name; + temp->switch_name = new->switch_name; + temp->fc_map = new->fc_map; + temp->vfid = new->vfid; + memcpy(temp->mac, new->fcf_mac, ETH_ALEN); + temp->priority = new->pri; + temp->fka_period = new->fka_period; + temp->selected = 0; /* default to unselected */ /* - * The fcoe_sysfs layer can return a CONNECTED fcf that - * has a priv (fcf was never deleted) or a CONNECTED fcf - * that doesn't have a priv (fcf was deleted). However, - * libfcoe will always delete FCFs before trying to add - * them. This is ensured because both recv_adv and - * age_fcfs are protected by the the fcoe_ctlr's mutex. - * This means that we should never get a FCF with a - * non-NULL priv pointer. + * If ctlr_dev doesn't exist then it means we're a libfcoe user + * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device. + * fnic would be an example of a driver with this behavior. In this + * case we want to add the fcoe_fcf to the fcoe_ctlr list, but we + * don't want to make sysfs changes. */ - BUG_ON(fcf_dev->priv); - fcf_dev->priv = new; - new->fcf_dev = fcf_dev; + ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); + if (ctlr_dev) { + mutex_lock(&ctlr_dev->lock); + fcf_dev = fcoe_fcf_device_add(ctlr_dev, temp); + if (unlikely(!fcf_dev)) { + rc = -ENOMEM; + mutex_unlock(&ctlr_dev->lock); + goto out; + } + + /* + * The fcoe_sysfs layer can return a CONNECTED fcf that + * has a priv (fcf was never deleted) or a CONNECTED fcf + * that doesn't have a priv (fcf was deleted). However, + * libfcoe will always delete FCFs before trying to add + * them. This is ensured because both recv_adv and + * age_fcfs are protected by the the fcoe_ctlr's mutex. + * This means that we should never get a FCF with a + * non-NULL priv pointer. + */ + BUG_ON(fcf_dev->priv); + + fcf_dev->priv = new; + new->fcf_dev = fcf_dev; + mutex_unlock(&ctlr_dev->lock); + } list_add(&new->list, &fip->fcfs); fip->fcf_count++; + rc = 0; out: - mutex_unlock(&ctlr_dev->lock); + kfree(temp); return rc; } +/** + * fcoe_sysfs_fcf_del() - Remove a fcoe_fcf{,_device} to a fcoe_ctlr{,_device} + * @new: The FCF to be removed + * + * Called with fip->ctlr_mutex held + */ static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new) { struct fcoe_ctlr *fip = new->fip; - struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); + struct fcoe_ctlr_device *cdev; struct fcoe_fcf_device *fcf_dev; list_del(&new->list); fip->fcf_count--; - mutex_lock(&ctlr_dev->lock); - - fcf_dev = fcoe_fcf_to_fcf_dev(new); - WARN_ON(!fcf_dev); - new->fcf_dev = NULL; - fcoe_fcf_device_delete(fcf_dev); - kfree(new); - - mutex_unlock(&ctlr_dev->lock); + /* + * If ctlr_dev doesn't exist then it means we're a libfcoe user + * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device + * or a fcoe_fcf_device. + * + * fnic would be an example of a driver with this behavior. In this + * case we want to remove the fcoe_fcf from the fcoe_ctlr list (above), + * but we don't want to make sysfs changes. + */ + cdev = fcoe_ctlr_to_ctlr_dev(fip); + if (cdev) { + mutex_lock(&cdev->lock); + fcf_dev = fcoe_fcf_to_fcf_dev(new); + WARN_ON(!fcf_dev); + new->fcf_dev = NULL; + fcoe_fcf_device_delete(fcf_dev); + kfree(new); + mutex_unlock(&cdev->lock); + } } /** @@ -300,7 +339,7 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip) spin_unlock_bh(&fip->ctlr_lock); sel = fip->sel_fcf; - if (sel && !compare_ether_addr(sel->fcf_mac, fip->dest_addr)) + if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr)) goto unlock; if (!is_zero_ether_addr(fip->dest_addr)) { printk(KERN_NOTICE "libfcoe: host%d: " @@ -1000,7 +1039,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) if (fcf->switch_name == new.switch_name && fcf->fabric_name == new.fabric_name && fcf->fc_map == new.fc_map && - compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) { + ether_addr_equal(fcf->fcf_mac, new.fcf_mac)) { found = 1; break; } @@ -1340,7 +1379,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, mp = (struct fip_mac_desc *)desc; if (dlen < sizeof(*mp)) goto err; - if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac)) + if (!ether_addr_equal(mp->fd_mac, fcf->fcf_mac)) goto err; desc_mask &= ~BIT(FIP_DT_MAC); break; @@ -1418,8 +1457,8 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, * 'port_id' is already validated, check MAC address and * wwpn */ - if (compare_ether_addr(fip->get_src_addr(vn_port), - vp->fd_mac) != 0 || + if (!ether_addr_equal(fip->get_src_addr(vn_port), + vp->fd_mac) || get_unaligned_be64(&vp->fd_wwpn) != vn_port->wwpn) continue; @@ -1453,6 +1492,9 @@ err: */ void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) { + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + return; skb_queue_tail(&fip->fip_recv_list, skb); schedule_work(&fip->recv_work); } @@ -1479,12 +1521,12 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) goto drop; eh = eth_hdr(skb); if (fip->mode == FIP_MODE_VN2VN) { - if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && - compare_ether_addr(eh->h_dest, fcoe_all_vn2vn) && - compare_ether_addr(eh->h_dest, fcoe_all_p2p)) + if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) && + !ether_addr_equal(eh->h_dest, fcoe_all_vn2vn) && + !ether_addr_equal(eh->h_dest, fcoe_all_p2p)) goto drop; - } else if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && - compare_ether_addr(eh->h_dest, fcoe_all_enode)) + } else if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) && + !ether_addr_equal(eh->h_dest, fcoe_all_enode)) goto drop; fiph = (struct fip_header *)skb->data; op = ntohs(fiph->fip_op); @@ -1856,7 +1898,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport, * address_mode flag to use FC_OUI-based Ethernet DA. * Otherwise we use the FCoE gateway addr */ - if (!compare_ether_addr(sa, (u8[6])FC_FCOE_FLOGI_MAC)) { + if (ether_addr_equal(sa, (u8[6])FC_FCOE_FLOGI_MAC)) { fcoe_ctlr_map_dest(fip); } else { memcpy(fip->dest_addr, sa, ETH_ALEN); @@ -2825,8 +2867,8 @@ unlock: * disabled, so that should ensure that this routine is only called * when nothing is happening. */ -void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, - enum fip_state fip_mode) +static void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, + enum fip_state fip_mode) { void *priv; diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index c9382d6eee78..045c4e11ee54 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -300,29 +300,29 @@ static ssize_t store_ctlr_mode(struct device *dev, switch (ctlr->enabled) { case FCOE_CTLR_ENABLED: - LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled."); + LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.\n"); return -EBUSY; case FCOE_CTLR_DISABLED: if (!ctlr->f->set_fcoe_ctlr_mode) { LIBFCOE_SYSFS_DBG(ctlr, - "Mode change not supported by LLD."); + "Mode change not supported by LLD.\n"); return -ENOTSUPP; } ctlr->mode = fcoe_parse_mode(mode); if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) { - LIBFCOE_SYSFS_DBG(ctlr, - "Unknown mode %s provided.", buf); + LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n", + buf); return -EINVAL; } ctlr->f->set_fcoe_ctlr_mode(ctlr); - LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.", buf); + LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf); return count; case FCOE_CTLR_UNUSED: default: - LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported."); + LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.\n"); return -ENOTSUPP; }; } @@ -553,16 +553,20 @@ static struct device_type fcoe_fcf_device_type = { .release = fcoe_fcf_device_release, }; -static struct bus_attribute fcoe_bus_attr_group[] = { - __ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store), - __ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store), - __ATTR_NULL +static BUS_ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store); +static BUS_ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store); + +static struct attribute *fcoe_bus_attrs[] = { + &bus_attr_ctlr_create.attr, + &bus_attr_ctlr_destroy.attr, + NULL, }; +ATTRIBUTE_GROUPS(fcoe_bus); static struct bus_type fcoe_bus_type = { .name = "fcoe", .match = &fcoe_bus_match, - .bus_attrs = fcoe_bus_attr_group, + .bus_groups = fcoe_bus_groups, }; /** @@ -653,7 +657,7 @@ static int fcoe_fcf_device_match(struct fcoe_fcf_device *new, if (new->switch_name == old->switch_name && new->fabric_name == old->fabric_name && new->fc_map == old->fc_map && - compare_ether_addr(new->mac, old->mac) == 0) + ether_addr_equal(new->mac, old->mac)) return 1; return 0; } diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index c18c68150e9f..528d43b7b569 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -27,6 +27,7 @@ #include "fnic_io.h" #include "fnic_res.h" #include "fnic_trace.h" +#include "fnic_stats.h" #include "vnic_dev.h" #include "vnic_wq.h" #include "vnic_rq.h" @@ -38,11 +39,13 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.5.0.23" +#define DRV_VERSION "1.5.0.45" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " #define DESC_CLEAN_LOW_WATERMARK 8 +#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */ +#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */ #define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */ #define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ #define FNIC_DFLT_QUEUE_DEPTH 32 @@ -154,6 +157,9 @@ do { \ FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \ shost_printk(kern_level, host, fmt, ##args);) +#define FNIC_MAIN_NOTE(kern_level, host, fmt, args...) \ + shost_printk(kern_level, host, fmt, ##args) + extern const char *fnic_state_str[]; enum fnic_intx_intr_index { @@ -215,16 +221,25 @@ struct fnic { struct vnic_stats *stats; unsigned long stats_time; /* time of stats update */ + unsigned long stats_reset_time; /* time of stats reset */ struct vnic_nic_cfg *nic_cfg; char name[IFNAMSIZ]; struct timer_list notify_timer; /* used for MSI interrupts */ + unsigned int fnic_max_tag_id; unsigned int err_intr_offset; unsigned int link_intr_offset; unsigned int wq_count; unsigned int cq_count; + struct dentry *fnic_stats_debugfs_host; + struct dentry *fnic_stats_debugfs_file; + struct dentry *fnic_reset_debugfs_file; + unsigned int reset_stats; + atomic64_t io_cmpl_skip; + struct fnic_stats fnic_stats; + u32 vlan_hw_insert:1; /* let hw insert the tag */ u32 in_remove:1; /* fnic device in removal */ u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */ @@ -359,4 +374,5 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) return ((fnic->state_flags & st_flags) == st_flags); } void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); +void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *); #endif /* _FNIC_H_ */ diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c index cbcb0121c84d..b6073f875761 100644 --- a/drivers/scsi/fnic/fnic_debugfs.c +++ b/drivers/scsi/fnic/fnic_debugfs.c @@ -23,6 +23,58 @@ static struct dentry *fnic_trace_debugfs_root; static struct dentry *fnic_trace_debugfs_file; static struct dentry *fnic_trace_enable; +static struct dentry *fnic_stats_debugfs_root; + +/* + * fnic_debugfs_init - Initialize debugfs for fnic debug logging + * + * Description: + * When Debugfs is configured this routine sets up the fnic debugfs + * file system. If not already created, this routine will create the + * fnic directory and statistics directory for trace buffer and + * stats logging. + */ +int fnic_debugfs_init(void) +{ + int rc = -1; + fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL); + if (!fnic_trace_debugfs_root) { + printk(KERN_DEBUG "Cannot create debugfs root\n"); + return rc; + } + + if (!fnic_trace_debugfs_root) { + printk(KERN_DEBUG + "fnic root directory doesn't exist in debugfs\n"); + return rc; + } + + fnic_stats_debugfs_root = debugfs_create_dir("statistics", + fnic_trace_debugfs_root); + if (!fnic_stats_debugfs_root) { + printk(KERN_DEBUG "Cannot create Statistics directory\n"); + return rc; + } + + rc = 0; + return rc; +} + +/* + * fnic_debugfs_terminate - Tear down debugfs infrastructure + * + * Description: + * When Debugfs is configured this routine removes debugfs file system + * elements that are specific to fnic. + */ +void fnic_debugfs_terminate(void) +{ + debugfs_remove(fnic_stats_debugfs_root); + fnic_stats_debugfs_root = NULL; + + debugfs_remove(fnic_trace_debugfs_root); + fnic_trace_debugfs_root = NULL; +} /* * fnic_trace_ctrl_open - Open the trace_enable file @@ -241,16 +293,16 @@ static const struct file_operations fnic_trace_debugfs_fops = { * Description: * When Debugfs is configured this routine sets up the fnic debugfs * file system. If not already created, this routine will create the - * fnic directory. It will create file trace to log fnic trace buffer - * output into debugfs and it will also create file trace_enable to - * control enable/disable of trace logging into trace buffer. + * create file trace to log fnic trace buffer output into debugfs and + * it will also create file trace_enable to control enable/disable of + * trace logging into trace buffer. */ int fnic_trace_debugfs_init(void) { int rc = -1; - fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL); if (!fnic_trace_debugfs_root) { - printk(KERN_DEBUG "Cannot create debugfs root\n"); + printk(KERN_DEBUG + "FNIC Debugfs root directory doesn't exist\n"); return rc; } fnic_trace_enable = debugfs_create_file("tracing_enable", @@ -259,8 +311,8 @@ int fnic_trace_debugfs_init(void) NULL, &fnic_trace_ctrl_fops); if (!fnic_trace_enable) { - printk(KERN_DEBUG "Cannot create trace_enable file" - " under debugfs"); + printk(KERN_DEBUG + "Cannot create trace_enable file under debugfs\n"); return rc; } @@ -271,7 +323,8 @@ int fnic_trace_debugfs_init(void) &fnic_trace_debugfs_fops); if (!fnic_trace_debugfs_file) { - printk(KERN_DEBUG "Cannot create trace file under debugfs"); + printk(KERN_DEBUG + "Cannot create trace file under debugfs\n"); return rc; } rc = 0; @@ -295,8 +348,323 @@ void fnic_trace_debugfs_terminate(void) debugfs_remove(fnic_trace_enable); fnic_trace_enable = NULL; } - if (fnic_trace_debugfs_root) { - debugfs_remove(fnic_trace_debugfs_root); - fnic_trace_debugfs_root = NULL; +} + +/* + * fnic_reset_stats_open - Open the reset_stats file + * @inode: The inode pointer. + * @file: The file pointer to attach the stats reset flag. + * + * Description: + * This routine opens a debugsfs file reset_stats and stores i_private data + * to debug structure to retrieve later for while performing other + * file oprations. + * + * Returns: + * This function returns zero if successful. + */ +static int fnic_reset_stats_open(struct inode *inode, struct file *file) +{ + struct stats_debug_info *debug; + + debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL); + if (!debug) + return -ENOMEM; + + debug->i_private = inode->i_private; + + file->private_data = debug; + + return 0; +} + +/* + * fnic_reset_stats_read - Read a reset_stats debugfs file + * @filp: The file pointer to read from. + * @ubuf: The buffer to copy the data to. + * @cnt: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads value of variable reset_stats + * and stores into local @buf. It will start reading file at @ppos and + * copy up to @cnt of data to @ubuf from @buf. + * + * Returns: + * This function returns the amount of data that was read. + */ +static ssize_t fnic_reset_stats_read(struct file *file, + char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct stats_debug_info *debug = file->private_data; + struct fnic *fnic = (struct fnic *)debug->i_private; + char buf[64]; + int len; + + len = sprintf(buf, "%u\n", fnic->reset_stats); + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); +} + +/* + * fnic_reset_stats_write - Write to reset_stats debugfs file + * @filp: The file pointer to write from. + * @ubuf: The buffer to copy the data from. + * @cnt: The number of bytes to write. + * @ppos: The position in the file to start writing to. + * + * Description: + * This routine writes data from user buffer @ubuf to buffer @buf and + * resets cumulative stats of fnic. + * + * Returns: + * This function returns the amount of data that was written. + */ +static ssize_t fnic_reset_stats_write(struct file *file, + const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct stats_debug_info *debug = file->private_data; + struct fnic *fnic = (struct fnic *)debug->i_private; + struct fnic_stats *stats = &fnic->fnic_stats; + u64 *io_stats_p = (u64 *)&stats->io_stats; + u64 *fw_stats_p = (u64 *)&stats->fw_stats; + char buf[64]; + unsigned long val; + int ret; + + if (cnt >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, cnt)) + return -EFAULT; + + buf[cnt] = 0; + + ret = kstrtoul(buf, 10, &val); + if (ret < 0) + return ret; + + fnic->reset_stats = val; + + if (fnic->reset_stats) { + /* Skip variable is used to avoid descrepancies to Num IOs + * and IO Completions stats. Skip incrementing No IO Compls + * for pending active IOs after reset stats + */ + atomic64_set(&fnic->io_cmpl_skip, + atomic64_read(&stats->io_stats.active_ios)); + memset(&stats->abts_stats, 0, sizeof(struct abort_stats)); + memset(&stats->term_stats, 0, + sizeof(struct terminate_stats)); + memset(&stats->reset_stats, 0, sizeof(struct reset_stats)); + memset(&stats->misc_stats, 0, sizeof(struct misc_stats)); + memset(&stats->vlan_stats, 0, sizeof(struct vlan_stats)); + memset(io_stats_p+1, 0, + sizeof(struct io_path_stats) - sizeof(u64)); + memset(fw_stats_p+1, 0, + sizeof(struct fw_stats) - sizeof(u64)); } + + (*ppos)++; + return cnt; +} + +/* + * fnic_reset_stats_release - Release the buffer used to store + * debugfs file data + * @inode: The inode pointer + * @file: The file pointer that contains the buffer to release + * + * Description: + * This routine frees the buffer that was allocated when the debugfs + * file was opened. + * + * Returns: + * This function returns zero. + */ +static int fnic_reset_stats_release(struct inode *inode, + struct file *file) +{ + struct stats_debug_info *debug = file->private_data; + kfree(debug); + return 0; +} + +/* + * fnic_stats_debugfs_open - Open the stats file for specific host + * and get fnic stats. + * @inode: The inode pointer. + * @file: The file pointer to attach the specific host statistics. + * + * Description: + * This routine opens a debugsfs file stats of specific host and print + * fnic stats. + * + * Returns: + * This function returns zero if successful. + */ +static int fnic_stats_debugfs_open(struct inode *inode, + struct file *file) +{ + struct fnic *fnic = inode->i_private; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + struct stats_debug_info *debug; + int buf_size = 2 * PAGE_SIZE; + + debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL); + if (!debug) + return -ENOMEM; + + debug->debug_buffer = vmalloc(buf_size); + if (!debug->debug_buffer) { + kfree(debug); + return -ENOMEM; + } + + debug->buf_size = buf_size; + memset((void *)debug->debug_buffer, 0, buf_size); + debug->buffer_len = fnic_get_stats_data(debug, fnic_stats); + + file->private_data = debug; + + return 0; +} + +/* + * fnic_stats_debugfs_read - Read a debugfs file + * @file: The file pointer to read from. + * @ubuf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @pos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the buffer indicated in the private_data + * field of @file. It will start reading at @pos and copy up to @nbytes of + * data to @ubuf. + * + * Returns: + * This function returns the amount of data that was read (this could be + * less than @nbytes if the end of the file was reached). + */ +static ssize_t fnic_stats_debugfs_read(struct file *file, + char __user *ubuf, + size_t nbytes, + loff_t *pos) +{ + struct stats_debug_info *debug = file->private_data; + int rc = 0; + rc = simple_read_from_buffer(ubuf, nbytes, pos, + debug->debug_buffer, + debug->buffer_len); + return rc; +} + +/* + * fnic_stats_stats_release - Release the buffer used to store + * debugfs file data + * @inode: The inode pointer + * @file: The file pointer that contains the buffer to release + * + * Description: + * This routine frees the buffer that was allocated when the debugfs + * file was opened. + * + * Returns: + * This function returns zero. + */ +static int fnic_stats_debugfs_release(struct inode *inode, + struct file *file) +{ + struct stats_debug_info *debug = file->private_data; + vfree(debug->debug_buffer); + kfree(debug); + return 0; +} + +static const struct file_operations fnic_stats_debugfs_fops = { + .owner = THIS_MODULE, + .open = fnic_stats_debugfs_open, + .read = fnic_stats_debugfs_read, + .release = fnic_stats_debugfs_release, +}; + +static const struct file_operations fnic_reset_debugfs_fops = { + .owner = THIS_MODULE, + .open = fnic_reset_stats_open, + .read = fnic_reset_stats_read, + .write = fnic_reset_stats_write, + .release = fnic_reset_stats_release, +}; + +/* + * fnic_stats_init - Initialize stats struct and create stats file per fnic + * + * Description: + * When Debugfs is configured this routine sets up the stats file per fnic + * It will create file stats and reset_stats under statistics/host# directory + * to log per fnic stats. + */ +int fnic_stats_debugfs_init(struct fnic *fnic) +{ + int rc = -1; + char name[16]; + + snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no); + + if (!fnic_stats_debugfs_root) { + printk(KERN_DEBUG "fnic_stats root doesn't exist\n"); + return rc; + } + fnic->fnic_stats_debugfs_host = debugfs_create_dir(name, + fnic_stats_debugfs_root); + if (!fnic->fnic_stats_debugfs_host) { + printk(KERN_DEBUG "Cannot create host directory\n"); + return rc; + } + + fnic->fnic_stats_debugfs_file = debugfs_create_file("stats", + S_IFREG|S_IRUGO|S_IWUSR, + fnic->fnic_stats_debugfs_host, + fnic, + &fnic_stats_debugfs_fops); + if (!fnic->fnic_stats_debugfs_file) { + printk(KERN_DEBUG "Cannot create host stats file\n"); + return rc; + } + + fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats", + S_IFREG|S_IRUGO|S_IWUSR, + fnic->fnic_stats_debugfs_host, + fnic, + &fnic_reset_debugfs_fops); + if (!fnic->fnic_reset_debugfs_file) { + printk(KERN_DEBUG "Cannot create host stats file\n"); + return rc; + } + rc = 0; + return rc; +} + +/* + * fnic_stats_debugfs_remove - Tear down debugfs infrastructure of stats + * + * Description: + * When Debugfs is configured this routine removes debugfs file system + * elements that are specific to fnic stats. + */ +void fnic_stats_debugfs_remove(struct fnic *fnic) +{ + if (!fnic) + return; + + debugfs_remove(fnic->fnic_stats_debugfs_file); + fnic->fnic_stats_debugfs_file = NULL; + + debugfs_remove(fnic->fnic_reset_debugfs_file); + fnic->fnic_reset_debugfs_file = NULL; + + debugfs_remove(fnic->fnic_stats_debugfs_host); + fnic->fnic_stats_debugfs_host = NULL; } diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 006fa92a02df..1671325aec7f 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -302,6 +302,7 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip, static void fnic_fcoe_send_vlan_req(struct fnic *fnic) { struct fcoe_ctlr *fip = &fnic->ctlr; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; struct sk_buff *skb; char *eth_fr; int fr_len; @@ -337,6 +338,7 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic) vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn); + atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs); skb_put(skb, sizeof(*vlan)); skb->protocol = htons(ETH_P_FIP); @@ -354,6 +356,7 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb) struct fcoe_ctlr *fip = &fnic->ctlr; struct fip_header *fiph; struct fip_desc *desc; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; u16 vid; size_t rlen; size_t dlen; @@ -402,6 +405,7 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb) /* any VLAN descriptors present ? */ if (list_empty(&fnic->vlans)) { /* retry from timer */ + atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID); FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, "No VLAN descriptors in FIP VLAN response\n"); spin_unlock_irqrestore(&fnic->vlans_lock, flags); @@ -533,6 +537,7 @@ drop: void fnic_handle_fip_frame(struct work_struct *work) { struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); + struct fnic_stats *fnic_stats = &fnic->fnic_stats; unsigned long flags; struct sk_buff *skb; struct ethhdr *eh; @@ -567,6 +572,8 @@ void fnic_handle_fip_frame(struct work_struct *work) * fcf's & restart from scratch */ if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { + atomic64_inc( + &fnic_stats->vlan_stats.flogi_rejects); shost_printk(KERN_INFO, fnic->lport->host, "Trigger a Link down - VLAN Disc\n"); fcoe_ctlr_link_down(&fnic->ctlr); @@ -651,13 +658,13 @@ void fnic_update_mac_locked(struct fnic *fnic, u8 *new) if (is_zero_ether_addr(new)) new = ctl; - if (!compare_ether_addr(data, new)) + if (ether_addr_equal(data, new)) return; FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); - if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl)) + if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl)) vnic_dev_del_addr(fnic->vdev, data); memcpy(data, new, ETH_ALEN); - if (compare_ether_addr(new, ctl)) + if (!ether_addr_equal(new, ctl)) vnic_dev_add_addr(fnic->vdev, new); } @@ -753,6 +760,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc struct fnic *fnic = vnic_dev_priv(rq->vdev); struct sk_buff *skb; struct fc_frame *fp; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; unsigned int eth_hdrs_stripped; u8 type, color, eop, sop, ingress_port, vlan_stripped; u8 fcoe = 0, fcoe_sof, fcoe_eof; @@ -803,6 +811,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc eth_hdrs_stripped = 0; skb_trim(skb, bytes_written); if (!fcs_ok) { + atomic64_inc(&fnic_stats->misc_stats.frame_errors); FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "fcs error. dropping packet.\n"); goto drop; @@ -818,6 +827,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc } if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { + atomic64_inc(&fnic_stats->misc_stats.frame_errors); FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "fnic rq_cmpl fcoe x%x fcsok x%x" " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" @@ -1205,6 +1215,7 @@ void fnic_handle_fip_timer(struct fnic *fnic) { unsigned long flags; struct fcoe_vlan *vlan; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; u64 sol_time; spin_lock_irqsave(&fnic->fnic_lock, flags); @@ -1273,6 +1284,7 @@ void fnic_handle_fip_timer(struct fnic *fnic) vlan->state = FIP_VLAN_SENT; /* sent now */ } spin_unlock_irqrestore(&fnic->vlans_lock, flags); + atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count); vlan->sol_count++; sol_time = jiffies + msecs_to_jiffies (FCOE_CTLR_START_DELAY); diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c index 5c1f223cabce..7d9b54ae7f62 100644 --- a/drivers/scsi/fnic/fnic_isr.c +++ b/drivers/scsi/fnic/fnic_isr.c @@ -37,6 +37,9 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data) if (!pba) return IRQ_NONE; + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + if (pba & (1 << FNIC_INTX_NOTIFY)) { vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]); fnic_handle_link_event(fnic); @@ -66,6 +69,9 @@ static irqreturn_t fnic_isr_msi(int irq, void *data) struct fnic *fnic = data; unsigned long work_done = 0; + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + work_done += fnic_wq_copy_cmpl_handler(fnic, -1); work_done += fnic_wq_cmpl_handler(fnic, -1); work_done += fnic_rq_cmpl_handler(fnic, -1); @@ -83,6 +89,9 @@ static irqreturn_t fnic_isr_msix_rq(int irq, void *data) struct fnic *fnic = data; unsigned long rq_work_done = 0; + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + rq_work_done = fnic_rq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ], rq_work_done, @@ -97,6 +106,9 @@ static irqreturn_t fnic_isr_msix_wq(int irq, void *data) struct fnic *fnic = data; unsigned long wq_work_done = 0; + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + wq_work_done = fnic_wq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ], wq_work_done, @@ -110,6 +122,9 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data) struct fnic *fnic = data; unsigned long wq_copy_work_done = 0; + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], wq_copy_work_done, @@ -122,6 +137,9 @@ static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data) { struct fnic *fnic = data; + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]); fnic_log_q_error(fnic); fnic_handle_link_event(fnic); diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index d08dc12082c9..33e4ec2bfe73 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -74,6 +74,10 @@ module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages " "for fnic trace buffer"); +static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH; +module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); + static struct libfc_function_template fnic_transport_template = { .frame_send = fnic_send, .lport_set_port_id = fnic_set_port_id, @@ -91,7 +95,7 @@ static int fnic_slave_alloc(struct scsi_device *sdev) if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; - scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH); + scsi_activate_tcq(sdev, fnic_max_qdepth); return 0; } @@ -126,6 +130,7 @@ fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) static void fnic_get_host_speed(struct Scsi_Host *shost); static struct scsi_transport_template *fnic_fc_transport; static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *); +static void fnic_reset_host_stats(struct Scsi_Host *); static struct fc_function_template fnic_fc_functions = { @@ -153,6 +158,7 @@ static struct fc_function_template fnic_fc_functions = { .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo, .issue_fc_host_lip = fnic_reset, .get_fc_host_stats = fnic_get_stats, + .reset_fc_host_stats = fnic_reset_host_stats, .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), .terminate_rport_io = fnic_terminate_rport_io, .bsg_request = fc_lport_bsg_request, @@ -206,13 +212,116 @@ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors; stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop; stats->invalid_crc_count = vs->rx.rx_crc_errors; - stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ; + stats->seconds_since_last_reset = + (jiffies - fnic->stats_reset_time) / HZ; stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); return stats; } +/* + * fnic_dump_fchost_stats + * note : dumps fc_statistics into system logs + */ +void fnic_dump_fchost_stats(struct Scsi_Host *host, + struct fc_host_statistics *stats) +{ + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: seconds since last reset = %llu\n", + stats->seconds_since_last_reset); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: tx frames = %llu\n", + stats->tx_frames); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: tx words = %llu\n", + stats->tx_words); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: rx frames = %llu\n", + stats->rx_frames); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: rx words = %llu\n", + stats->rx_words); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: lip count = %llu\n", + stats->lip_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: nos count = %llu\n", + stats->nos_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: error frames = %llu\n", + stats->error_frames); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: dumped frames = %llu\n", + stats->dumped_frames); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: link failure count = %llu\n", + stats->link_failure_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: loss of sync count = %llu\n", + stats->loss_of_sync_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: loss of signal count = %llu\n", + stats->loss_of_signal_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: prim seq protocol err count = %llu\n", + stats->prim_seq_protocol_err_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: invalid tx word count= %llu\n", + stats->invalid_tx_word_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: invalid crc count = %llu\n", + stats->invalid_crc_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: fcp input requests = %llu\n", + stats->fcp_input_requests); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: fcp output requests = %llu\n", + stats->fcp_output_requests); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: fcp control requests = %llu\n", + stats->fcp_control_requests); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: fcp input megabytes = %llu\n", + stats->fcp_input_megabytes); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: fcp output megabytes = %llu\n", + stats->fcp_output_megabytes); + return; +} + +/* + * fnic_reset_host_stats : clears host stats + * note : called when reset_statistics set under sysfs dir + */ +static void fnic_reset_host_stats(struct Scsi_Host *host) +{ + int ret; + struct fc_lport *lp = shost_priv(host); + struct fnic *fnic = lport_priv(lp); + struct fc_host_statistics *stats; + unsigned long flags; + + /* dump current stats, before clearing them */ + stats = fnic_get_stats(host); + fnic_dump_fchost_stats(host, stats); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + ret = vnic_dev_stats_clear(fnic->vdev); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (ret) { + FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, + "fnic: Reset vnic stats failed" + " 0x%x", ret); + return; + } + fnic->stats_reset_time = jiffies; + memset(stats, 0, sizeof(*stats)); + + return; +} + void fnic_log_q_error(struct fnic *fnic) { unsigned int i; @@ -447,11 +556,11 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) host->transportt = fnic_fc_transport; - err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ); + err = fnic_stats_debugfs_init(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, - "Unable to alloc shared tag map\n"); - goto err_out_free_hba; + "Failed to initialize debugfs for stats\n"); + fnic_stats_debugfs_remove(fnic); } /* Setup PCI resources */ @@ -476,10 +585,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); /* Query PCI controller on system for DMA addressing - * limitation for the device. Try 40-bit first, and + * limitation for the device. Try 64-bit first, and * fail to 32-bit. */ - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { @@ -496,10 +605,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_release_regions; } } else { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, - "Unable to obtain 40-bit DMA " + "Unable to obtain 64-bit DMA " "for consistent allocations, aborting.\n"); goto err_out_release_regions; } @@ -566,6 +675,22 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) "aborting.\n"); goto err_out_dev_close; } + + /* Configure Maximum Outstanding IO reqs*/ + if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) { + host->can_queue = min_t(u32, FNIC_MAX_IO_REQ, + max_t(u32, FNIC_MIN_IO_REQ, + fnic->config.io_throttle_count)); + } + fnic->fnic_max_tag_id = host->can_queue; + + err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Unable to alloc shared tag map\n"); + goto err_out_dev_close; + } + host->max_lun = fnic->config.luns_per_tgt; host->max_id = FNIC_MAX_FCP_TARGET; host->max_cmd_len = FCOE_MAX_CMD_LEN; @@ -719,6 +844,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } fc_lport_init_stats(lp); + fnic->stats_reset_time = jiffies; fc_lport_config(lp); @@ -798,6 +924,7 @@ err_out_release_regions: err_out_disable_device: pci_disable_device(pdev); err_out_free_hba: + fnic_stats_debugfs_remove(fnic); scsi_host_put(lp->host); err_out: return err; @@ -850,6 +977,7 @@ static void fnic_remove(struct pci_dev *pdev) fcoe_ctlr_destroy(&fnic->ctlr); fc_lport_destroy(lp); + fnic_stats_debugfs_remove(fnic); /* * This stops the fnic device, masks all interrupts. Completed @@ -894,6 +1022,14 @@ static int __init fnic_init_module(void) printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); + /* Create debugfs entries for fnic */ + err = fnic_debugfs_init(); + if (err < 0) { + printk(KERN_ERR PFX "Failed to create fnic directory " + "for tracing and stats logging\n"); + fnic_debugfs_terminate(); + } + /* Allocate memory for trace buffer */ err = fnic_trace_buf_init(); if (err < 0) { @@ -982,6 +1118,7 @@ err_create_fnic_sgl_slab_max: kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); err_create_fnic_sgl_slab_dflt: fnic_trace_free(); + fnic_debugfs_terminate(); return err; } @@ -998,6 +1135,7 @@ static void __exit fnic_cleanup_module(void) kmem_cache_destroy(fnic_io_req_cache); fc_release_transport(fnic_fc_transport); fnic_trace_free(); + fnic_debugfs_terminate(); } module_init(fnic_init_module); diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index a97e6e584f8c..0521436d05d6 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -111,6 +111,12 @@ static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic, return &fnic->io_req_lock[hash]; } +static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic, + int tag) +{ + return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)]; +} + /* * Unmap the data buffer and sense buffer for an io_req, * also unmap and free the device-private scatter/gather list. @@ -220,15 +226,23 @@ int fnic_fw_reset_handler(struct fnic *fnic) if (!vnic_wq_copy_desc_avail(wq)) ret = -EAGAIN; - else + else { fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, + atomic64_read( + &fnic->fnic_stats.fw_stats.active_fw_reqs)); + } spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); - if (!ret) + if (!ret) { + atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Issued fw reset\n"); - else { + } else { fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Failed to issue fw reset\n"); @@ -285,6 +299,12 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id) fc_id, fnic->ctlr.map_dest, gw_mac); } + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, + atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); + flogi_reg_ioreq_end: spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); return ret; @@ -304,6 +324,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); struct fc_rport_libfc_priv *rp = rport->dd_data; struct host_sg_desc *desc; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; u8 pri_tag = 0; unsigned int i; unsigned long intr_flags; @@ -352,6 +373,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, "fnic_queue_wq_copy_desc failure - no descriptors\n"); + atomic64_inc(&misc_stats->io_cpwq_alloc_failures); return SCSI_MLQUEUE_HOST_BUSY; } @@ -380,6 +402,12 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, rport->maxframe_size, rp->r_a_tov, rp->e_d_tov); + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, + atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); return 0; } @@ -395,6 +423,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ struct fc_rport *rport; struct fnic_io_req *io_req = NULL; struct fnic *fnic = lport_priv(lp); + struct fnic_stats *fnic_stats = &fnic->fnic_stats; struct vnic_wq_copy *wq; int ret; u64 cmd_trace; @@ -408,6 +437,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ rport = starget_to_rport(scsi_target(sc->device)); ret = fc_remote_port_chkready(rport); if (ret) { + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); sc->result = ret; done(sc); return 0; @@ -430,6 +460,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ /* Get a new io_req for this SCSI IO */ io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); if (!io_req) { + atomic64_inc(&fnic_stats->io_stats.alloc_failures); ret = SCSI_MLQUEUE_HOST_BUSY; goto out; } @@ -456,6 +487,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type], GFP_ATOMIC); if (!io_req->sgl_list) { + atomic64_inc(&fnic_stats->io_stats.alloc_failures); ret = SCSI_MLQUEUE_HOST_BUSY; scsi_dma_unmap(sc); mempool_free(io_req, fnic->io_req_pool); @@ -503,6 +535,13 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ mempool_free(io_req, fnic->io_req_pool); } } else { + atomic64_inc(&fnic_stats->io_stats.active_ios); + atomic64_inc(&fnic_stats->io_stats.num_ios); + if (atomic64_read(&fnic_stats->io_stats.active_ios) > + atomic64_read(&fnic_stats->io_stats.max_active_ios)) + atomic64_set(&fnic_stats->io_stats.max_active_ios, + atomic64_read(&fnic_stats->io_stats.active_ios)); + /* REVISIT: Use per IO lock in the final code */ CMD_FLAGS(sc) |= FNIC_IO_ISSUED; } @@ -536,12 +575,18 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, struct fcpio_tag tag; int ret = 0; unsigned long flags; + struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); + atomic64_inc(&reset_stats->fw_reset_completions); + /* Clean up all outstanding io requests */ fnic_cleanup_io(fnic, SCSI_NO_TAG); + atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0); + atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0); + spin_lock_irqsave(&fnic->fnic_lock, flags); /* fnic should be in FC_TRANS_ETH_MODE */ @@ -565,6 +610,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, * reset the firmware. Free the cached flogi */ fnic->state = FNIC_IN_FC_MODE; + atomic64_inc(&reset_stats->fw_reset_failures); ret = -1; } } else { @@ -572,6 +618,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, fnic->lport->host, "Unexpected state %s while processing" " reset cmpl\n", fnic_state_to_str(fnic->state)); + atomic64_inc(&reset_stats->fw_reset_failures); ret = -1; } @@ -695,10 +742,14 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic, wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + fnic->fnic_stats.misc_stats.last_ack_time = jiffies; if (is_ack_index_in_range(wq, request_out)) { fnic->fw_ack_index[0] = request_out; fnic->fw_ack_recd[0] = 1; - } + } else + atomic64_inc( + &fnic->fnic_stats.misc_stats.ack_index_out_of_range); + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); FNIC_TRACE(fnic_fcpio_ack_handler, fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], @@ -720,6 +771,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, struct fcpio_icmnd_cmpl *icmnd_cmpl; struct fnic_io_req *io_req; struct scsi_cmnd *sc; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; unsigned long flags; spinlock_t *io_lock; u64 cmd_trace; @@ -730,7 +782,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, fcpio_tag_id_dec(&tag, &id); icmnd_cmpl = &desc->u.icmnd_cmpl; - if (id >= FNIC_MAX_IO_REQ) { + if (id >= fnic->fnic_max_tag_id) { shost_printk(KERN_ERR, fnic->lport->host, "Tag out of range tag %x hdr status = %s\n", id, fnic_fcpio_status_to_str(hdr_status)); @@ -740,6 +792,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, sc = scsi_host_find_tag(fnic->lport->host, id); WARN_ON_ONCE(!sc); if (!sc) { + atomic64_inc(&fnic_stats->io_stats.sc_null); shost_printk(KERN_ERR, fnic->lport->host, "icmnd_cmpl sc is null - " "hdr status = %s tag = 0x%x desc = 0x%p\n", @@ -760,6 +813,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, io_req = (struct fnic_io_req *)CMD_SP(sc); WARN_ON_ONCE(!io_req); if (!io_req) { + atomic64_inc(&fnic_stats->io_stats.ioreq_null); CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL; spin_unlock_irqrestore(io_lock, flags); shost_printk(KERN_ERR, fnic->lport->host, @@ -818,63 +872,54 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) xfer_len -= icmnd_cmpl->residual; - /* - * If queue_full, then try to reduce queue depth for all - * LUNS on the target. Todo: this should be accompanied - * by a periodic queue_depth rampup based on successful - * IO completion. - */ - if (icmnd_cmpl->scsi_status == QUEUE_FULL) { - struct scsi_device *t_sdev; - int qd = 0; - - shost_for_each_device(t_sdev, sc->device->host) { - if (t_sdev->id != sc->device->id) - continue; - - if (t_sdev->queue_depth > 1) { - qd = scsi_track_queue_full - (t_sdev, - t_sdev->queue_depth - 1); - if (qd == -1) - qd = t_sdev->host->cmd_per_lun; - shost_printk(KERN_INFO, - fnic->lport->host, - "scsi[%d:%d:%d:%d" - "] queue full detected," - "new depth = %d\n", - t_sdev->host->host_no, - t_sdev->channel, - t_sdev->id, t_sdev->lun, - t_sdev->queue_depth); - } - } - } + if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) + atomic64_inc(&fnic_stats->misc_stats.queue_fulls); break; case FCPIO_TIMEOUT: /* request was timed out */ + atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout); sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status; break; case FCPIO_ABORTED: /* request was aborted */ + atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted); sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break; case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */ + atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch); scsi_set_resid(sc, icmnd_cmpl->residual); sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break; case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */ + atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources); sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status; break; - case FCPIO_INVALID_HEADER: /* header contains invalid data */ - case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ - case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ + case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */ + atomic64_inc(&fnic_stats->io_stats.io_not_found); + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */ - case FCPIO_MSS_INVALID: /* request was aborted due to mss error */ + atomic64_inc(&fnic_stats->misc_stats.sgl_invalid); + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + case FCPIO_FW_ERR: /* request was terminated due fw error */ + atomic64_inc(&fnic_stats->fw_stats.io_fw_errs); + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + + case FCPIO_MSS_INVALID: /* request was aborted due to mss error */ + atomic64_inc(&fnic_stats->misc_stats.mss_invalid); + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + + case FCPIO_INVALID_HEADER: /* header contains invalid data */ + case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ + case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ default: shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", fnic_fcpio_status_to_str(hdr_status)); @@ -882,6 +927,11 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, break; } + if (hdr_status != FCPIO_SUCCESS) { + atomic64_inc(&fnic_stats->io_stats.io_failures); + shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", + fnic_fcpio_status_to_str(hdr_status)); + } /* Break link with the SCSI command */ CMD_SP(sc) = NULL; CMD_FLAGS(sc) |= FNIC_IO_DONE; @@ -915,6 +965,12 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, } else fnic->lport->host_stats.fcp_control_requests++; + atomic64_dec(&fnic_stats->io_stats.active_ios); + if (atomic64_read(&fnic->io_cmpl_skip)) + atomic64_dec(&fnic->io_cmpl_skip); + else + atomic64_inc(&fnic_stats->io_stats.io_completions); + /* Call SCSI completion function to complete the IO */ if (sc->scsi_done) sc->scsi_done(sc); @@ -932,6 +988,10 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, u32 id; struct scsi_cmnd *sc; struct fnic_io_req *io_req; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats; + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; unsigned long flags; spinlock_t *io_lock; unsigned long start_time; @@ -939,7 +999,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); fcpio_tag_id_dec(&tag, &id); - if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) { + if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) { shost_printk(KERN_ERR, fnic->lport->host, "Tag out of range tag %x hdr status = %s\n", id, fnic_fcpio_status_to_str(hdr_status)); @@ -949,6 +1009,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); WARN_ON_ONCE(!sc); if (!sc) { + atomic64_inc(&fnic_stats->io_stats.sc_null); shost_printk(KERN_ERR, fnic->lport->host, "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", fnic_fcpio_status_to_str(hdr_status), id); @@ -959,6 +1020,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, io_req = (struct fnic_io_req *)CMD_SP(sc); WARN_ON_ONCE(!io_req); if (!io_req) { + atomic64_inc(&fnic_stats->io_stats.ioreq_null); spin_unlock_irqrestore(io_lock, flags); CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; shost_printk(KERN_ERR, fnic->lport->host, @@ -983,15 +1045,48 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, spin_unlock_irqrestore(io_lock, flags); } else if (id & FNIC_TAG_ABORT) { /* Completion of abort cmd */ + switch (hdr_status) { + case FCPIO_SUCCESS: + break; + case FCPIO_TIMEOUT: + if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) + atomic64_inc(&abts_stats->abort_fw_timeouts); + else + atomic64_inc( + &term_stats->terminate_fw_timeouts); + break; + case FCPIO_IO_NOT_FOUND: + if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) + atomic64_inc(&abts_stats->abort_io_not_found); + else + atomic64_inc( + &term_stats->terminate_io_not_found); + break; + default: + if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) + atomic64_inc(&abts_stats->abort_failures); + else + atomic64_inc( + &term_stats->terminate_failures); + break; + } if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) { /* This is a late completion. Ignore it */ spin_unlock_irqrestore(io_lock, flags); return; } - CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; CMD_ABTS_STATUS(sc) = hdr_status; - CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; + + atomic64_dec(&fnic_stats->io_stats.active_ios); + if (atomic64_read(&fnic->io_cmpl_skip)) + atomic64_dec(&fnic->io_cmpl_skip); + else + atomic64_inc(&fnic_stats->io_stats.io_completions); + + if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) + atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "abts cmpl recd. id %d status %s\n", (int)(id & FNIC_TAG_MASK), @@ -1095,6 +1190,18 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, struct fnic *fnic = vnic_dev_priv(vdev); switch (desc->hdr.type) { + case FCPIO_ICMND_CMPL: /* fw completed a command */ + case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ + case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ + case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */ + case FCPIO_RESET_CMPL: /* fw completed reset */ + atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs); + break; + default: + break; + } + + switch (desc->hdr.type) { case FCPIO_ACK: /* fw copied copy wq desc to its queue */ fnic_fcpio_ack_handler(fnic, cq_index, desc); break; @@ -1148,23 +1255,26 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do) static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) { - unsigned int i; + int i; struct fnic_io_req *io_req; unsigned long flags = 0; struct scsi_cmnd *sc; spinlock_t *io_lock; unsigned long start_time = 0; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; - for (i = 0; i < FNIC_MAX_IO_REQ; i++) { + for (i = 0; i < fnic->fnic_max_tag_id; i++) { if (i == exclude_id) continue; + io_lock = fnic_io_lock_tag(fnic, i); + spin_lock_irqsave(io_lock, flags); sc = scsi_host_find_tag(fnic->lport->host, i); - if (!sc) + if (!sc) { + spin_unlock_irqrestore(io_lock, flags); continue; + } - io_lock = fnic_io_lock_hash(fnic, sc); - spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { @@ -1205,6 +1315,11 @@ cleanup_scsi_cmd: FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:" " DID_TRANSPORT_DISRUPTED\n"); + if (atomic64_read(&fnic->io_cmpl_skip)) + atomic64_dec(&fnic->io_cmpl_skip); + else + atomic64_inc(&fnic_stats->io_stats.io_completions); + /* Complete the command to SCSI */ if (sc->scsi_done) { FNIC_TRACE(fnic_cleanup_io, @@ -1236,7 +1351,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, fcpio_tag_id_dec(&desc->hdr.tag, &id); id &= FNIC_TAG_MASK; - if (id >= FNIC_MAX_IO_REQ) + if (id >= fnic->fnic_max_tag_id) return; sc = scsi_host_find_tag(fnic->lport->host, id); @@ -1288,6 +1403,7 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, { struct vnic_wq_copy *wq = &fnic->wq_copy[0]; struct Scsi_Host *host = fnic->lport->host; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; unsigned long flags; spin_lock_irqsave(host->host_lock, flags); @@ -1309,12 +1425,19 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, atomic_dec(&fnic->in_flight); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_queue_abort_io_req: failure: no descriptors\n"); + atomic64_inc(&misc_stats->abts_cpwq_alloc_failures); return 1; } fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT, 0, task_req, tag, fc_lun, io_req->port_id, fnic->config.ra_tov, fnic->config.ed_tov); + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, + atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); atomic_dec(&fnic->in_flight); @@ -1325,10 +1448,13 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) { int tag; int abt_tag; + int term_cnt = 0; struct fnic_io_req *io_req; spinlock_t *io_lock; unsigned long flags; struct scsi_cmnd *sc; + struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; struct scsi_lun fc_lun; enum fnic_ioreq_state old_ioreq_state; @@ -1340,14 +1466,15 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) if (fnic->in_remove) return; - for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { + for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { abt_tag = tag; + io_lock = fnic_io_lock_tag(fnic, tag); + spin_lock_irqsave(io_lock, flags); sc = scsi_host_find_tag(fnic->lport->host, tag); - if (!sc) + if (!sc) { + spin_unlock_irqrestore(io_lock, flags); continue; - - io_lock = fnic_io_lock_hash(fnic, sc); - spin_lock_irqsave(io_lock, flags); + } io_req = (struct fnic_io_req *)CMD_SP(sc); @@ -1391,6 +1518,7 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { + atomic64_inc(&reset_stats->device_reset_terminates); abt_tag = (tag | FNIC_TAG_DEV_RST); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_rport_exch_reset dev rst sc 0x%p\n", @@ -1427,8 +1555,12 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) else CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; spin_unlock_irqrestore(io_lock, flags); + atomic64_inc(&term_stats->terminates); + term_cnt++; } } + if (term_cnt > atomic64_read(&term_stats->max_terminates)) + atomic64_set(&term_stats->max_terminates, term_cnt); } @@ -1436,17 +1568,37 @@ void fnic_terminate_rport_io(struct fc_rport *rport) { int tag; int abt_tag; + int term_cnt = 0; struct fnic_io_req *io_req; spinlock_t *io_lock; unsigned long flags; struct scsi_cmnd *sc; struct scsi_lun fc_lun; - struct fc_rport_libfc_priv *rdata = rport->dd_data; - struct fc_lport *lport = rdata->local_port; - struct fnic *fnic = lport_priv(lport); + struct fc_rport_libfc_priv *rdata; + struct fc_lport *lport; + struct fnic *fnic; struct fc_rport *cmd_rport; + struct reset_stats *reset_stats; + struct terminate_stats *term_stats; enum fnic_ioreq_state old_ioreq_state; + if (!rport) { + printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n"); + return; + } + rdata = rport->dd_data; + + if (!rdata) { + printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n"); + return; + } + lport = rdata->local_port; + + if (!lport) { + printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n"); + return; + } + fnic = lport_priv(lport); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_terminate_rport_io called" " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n", @@ -1456,18 +1608,24 @@ void fnic_terminate_rport_io(struct fc_rport *rport) if (fnic->in_remove) return; - for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { + reset_stats = &fnic->fnic_stats.reset_stats; + term_stats = &fnic->fnic_stats.term_stats; + + for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { abt_tag = tag; + io_lock = fnic_io_lock_tag(fnic, tag); + spin_lock_irqsave(io_lock, flags); sc = scsi_host_find_tag(fnic->lport->host, tag); - if (!sc) + if (!sc) { + spin_unlock_irqrestore(io_lock, flags); continue; + } cmd_rport = starget_to_rport(scsi_target(sc->device)); - if (rport != cmd_rport) + if (rport != cmd_rport) { + spin_unlock_irqrestore(io_lock, flags); continue; - - io_lock = fnic_io_lock_hash(fnic, sc); - spin_lock_irqsave(io_lock, flags); + } io_req = (struct fnic_io_req *)CMD_SP(sc); @@ -1509,6 +1667,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport) CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { + atomic64_inc(&reset_stats->device_reset_terminates); abt_tag = (tag | FNIC_TAG_DEV_RST); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_terminate_rport_io dev rst sc 0x%p\n", sc); @@ -1545,8 +1704,12 @@ void fnic_terminate_rport_io(struct fc_rport *rport) else CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; spin_unlock_irqrestore(io_lock, flags); + atomic64_inc(&term_stats->terminates); + term_cnt++; } } + if (term_cnt > atomic64_read(&term_stats->max_terminates)) + atomic64_set(&term_stats->max_terminates, term_cnt); } @@ -1567,6 +1730,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) int ret = SUCCESS; u32 task_req = 0; struct scsi_lun fc_lun; + struct fnic_stats *fnic_stats; + struct abort_stats *abts_stats; + struct terminate_stats *term_stats; int tag; DECLARE_COMPLETION_ONSTACK(tm_done); @@ -1577,6 +1743,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) lp = shost_priv(sc->device->host); fnic = lport_priv(lp); + fnic_stats = &fnic->fnic_stats; + abts_stats = &fnic->fnic_stats.abts_stats; + term_stats = &fnic->fnic_stats.term_stats; + rport = starget_to_rport(scsi_target(sc->device)); tag = sc->request->tag; FNIC_SCSI_DBG(KERN_DEBUG, @@ -1635,8 +1805,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) */ if (fc_remote_port_chkready(rport) == 0) task_req = FCPIO_ITMF_ABT_TASK; - else + else { + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); task_req = FCPIO_ITMF_ABT_TASK_TERM; + } /* Now queue the abort command to firmware */ int_to_scsilun(sc->device->lun, &fc_lun); @@ -1651,10 +1823,13 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) ret = FAILED; goto fnic_abort_cmd_end; } - if (task_req == FCPIO_ITMF_ABT_TASK) + if (task_req == FCPIO_ITMF_ABT_TASK) { CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED; - else + atomic64_inc(&fnic_stats->abts_stats.aborts); + } else { CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED; + atomic64_inc(&fnic_stats->term_stats.terminates); + } /* * We queued an abort IO, wait for its completion. @@ -1672,6 +1847,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req) { + atomic64_inc(&fnic_stats->io_stats.ioreq_null); spin_unlock_irqrestore(io_lock, flags); CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; ret = FAILED; @@ -1680,13 +1856,24 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) io_req->abts_done = NULL; /* fw did not complete abort, timed out */ - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { spin_unlock_irqrestore(io_lock, flags); + if (task_req == FCPIO_ITMF_ABT_TASK) { + FNIC_SCSI_DBG(KERN_INFO, + fnic->lport->host, "Abort Driver Timeout\n"); + atomic64_inc(&abts_stats->abort_drv_timeouts); + } else { + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "Terminate Driver Timeout\n"); + atomic64_inc(&term_stats->terminate_drv_timeouts); + } CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; ret = FAILED; goto fnic_abort_cmd_end; } + CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; + /* * firmware completed the abort, check the status, * free the io_req irrespective of failure or success @@ -1724,6 +1911,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, { struct vnic_wq_copy *wq = &fnic->wq_copy[0]; struct Scsi_Host *host = fnic->lport->host; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; struct scsi_lun fc_lun; int ret = 0; unsigned long intr_flags; @@ -1745,6 +1933,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, if (!vnic_wq_copy_desc_avail(wq)) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "queue_dr_io_req failure - no descriptors\n"); + atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures); ret = -EAGAIN; goto lr_io_req_end; } @@ -1757,6 +1946,12 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, fc_lun.scsi_lun, io_req->port_id, fnic->config.ra_tov, fnic->config.ed_tov); + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, + atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); + lr_io_req_end: spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); atomic_dec(&fnic->in_flight); @@ -1784,17 +1979,18 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, DECLARE_COMPLETION_ONSTACK(tm_done); enum fnic_ioreq_state old_ioreq_state; - for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { + for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { + io_lock = fnic_io_lock_tag(fnic, tag); + spin_lock_irqsave(io_lock, flags); sc = scsi_host_find_tag(fnic->lport->host, tag); /* * ignore this lun reset cmd or cmds that do not belong to * this lun */ - if (!sc || sc == lr_sc || sc->device != lun_dev) + if (!sc || sc == lr_sc || sc->device != lun_dev) { + spin_unlock_irqrestore(io_lock, flags); continue; - - io_lock = fnic_io_lock_hash(fnic, sc); - spin_lock_irqsave(io_lock, flags); + } io_req = (struct fnic_io_req *)CMD_SP(sc); @@ -1823,6 +2019,11 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, spin_unlock_irqrestore(io_lock, flags); continue; } + + if (io_req->abts_done) + shost_printk(KERN_ERR, fnic->lport->host, + "%s: io_req->abts_done is set state is %s\n", + __func__, fnic_ioreq_state_to_str(CMD_STATE(sc))); old_ioreq_state = CMD_STATE(sc); /* * Any pending IO issued prior to reset is expected to be @@ -1833,11 +2034,6 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, */ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; - if (io_req->abts_done) - shost_printk(KERN_ERR, fnic->lport->host, - "%s: io_req->abts_done is set state is %s\n", - __func__, fnic_ioreq_state_to_str(CMD_STATE(sc))); - BUG_ON(io_req->abts_done); abt_tag = tag; @@ -1890,12 +2086,13 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, io_req->abts_done = NULL; /* if abort is still pending with fw, fail */ - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { spin_unlock_irqrestore(io_lock, flags); CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; ret = 1; goto clean_pending_aborts_end; } + CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; CMD_SP(sc) = NULL; spin_unlock_irqrestore(io_lock, flags); @@ -1989,6 +2186,8 @@ int fnic_device_reset(struct scsi_cmnd *sc) unsigned long flags; unsigned long start_time = 0; struct scsi_lun fc_lun; + struct fnic_stats *fnic_stats; + struct reset_stats *reset_stats; int tag = 0; DECLARE_COMPLETION_ONSTACK(tm_done); int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/ @@ -2000,6 +2199,10 @@ int fnic_device_reset(struct scsi_cmnd *sc) lp = shost_priv(sc->device->host); fnic = lport_priv(lp); + fnic_stats = &fnic->fnic_stats; + reset_stats = &fnic->fnic_stats.reset_stats; + + atomic64_inc(&reset_stats->device_resets); rport = starget_to_rport(scsi_target(sc->device)); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, @@ -2010,8 +2213,10 @@ int fnic_device_reset(struct scsi_cmnd *sc) goto fnic_device_reset_end; /* Check if remote port up */ - if (fc_remote_port_chkready(rport)) + if (fc_remote_port_chkready(rport)) { + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); goto fnic_device_reset_end; + } CMD_FLAGS(sc) = FNIC_DEVICE_RESET; /* Allocate tag if not present */ @@ -2087,14 +2292,15 @@ int fnic_device_reset(struct scsi_cmnd *sc) * gets cleaned up during higher levels of EH */ if (status == FCPIO_INVALID_CODE) { + atomic64_inc(&reset_stats->device_reset_timeouts); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Device reset timed out\n"); CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT; spin_unlock_irqrestore(io_lock, flags); int_to_scsilun(sc->device->lun, &fc_lun); /* - * Issue abort and terminate on the device reset request. - * If q'ing of the abort fails, retry issue it after a delay. + * Issue abort and terminate on device reset request. + * If q'ing of terminate fails, retry it after a delay. */ while (1) { spin_lock_irqsave(io_lock, flags); @@ -2200,6 +2406,10 @@ fnic_device_reset_end: "Returning from device reset %s\n", (ret == SUCCESS) ? "SUCCESS" : "FAILED"); + + if (ret == FAILED) + atomic64_inc(&reset_stats->device_reset_failures); + return ret; } @@ -2208,26 +2418,34 @@ int fnic_reset(struct Scsi_Host *shost) { struct fc_lport *lp; struct fnic *fnic; - int ret = SUCCESS; + int ret = 0; + struct reset_stats *reset_stats; lp = shost_priv(shost); fnic = lport_priv(lp); + reset_stats = &fnic->fnic_stats.reset_stats; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_reset called\n"); + atomic64_inc(&reset_stats->fnic_resets); + /* * Reset local port, this will clean up libFC exchanges, * reset remote port sessions, and if link is up, begin flogi */ - if (lp->tt.lport_reset(lp)) - ret = FAILED; + ret = lp->tt.lport_reset(lp); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Returning from fnic reset %s\n", - (ret == SUCCESS) ? + (ret == 0) ? "SUCCESS" : "FAILED"); + if (ret == 0) + atomic64_inc(&reset_stats->fnic_reset_completions); + else + atomic64_inc(&reset_stats->fnic_reset_failures); + return ret; } @@ -2252,7 +2470,7 @@ int fnic_host_reset(struct scsi_cmnd *sc) * scsi-ml tries to send a TUR to every device if host reset is * successful, so before returning to scsi, fabric should be up */ - ret = fnic_reset(shost); + ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED; if (ret == SUCCESS) { wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ; ret = FAILED; @@ -2405,7 +2623,7 @@ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc) lun_dev = lr_sc->device; /* walk again to check, if IOs are still pending in fw */ - for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { + for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { sc = scsi_host_find_tag(fnic->lport->host, tag); /* * ignore this lun reset cmd or cmds that do not belong to diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h new file mode 100644 index 000000000000..540cceb843cd --- /dev/null +++ b/drivers/scsi/fnic/fnic_stats.h @@ -0,0 +1,116 @@ +/* + * Copyright 2013 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _FNIC_STATS_H_ +#define _FNIC_STATS_H_ +struct io_path_stats { + atomic64_t active_ios; + atomic64_t max_active_ios; + atomic64_t io_completions; + atomic64_t io_failures; + atomic64_t ioreq_null; + atomic64_t alloc_failures; + atomic64_t sc_null; + atomic64_t io_not_found; + atomic64_t num_ios; +}; + +struct abort_stats { + atomic64_t aborts; + atomic64_t abort_failures; + atomic64_t abort_drv_timeouts; + atomic64_t abort_fw_timeouts; + atomic64_t abort_io_not_found; +}; + +struct terminate_stats { + atomic64_t terminates; + atomic64_t max_terminates; + atomic64_t terminate_drv_timeouts; + atomic64_t terminate_fw_timeouts; + atomic64_t terminate_io_not_found; + atomic64_t terminate_failures; +}; + +struct reset_stats { + atomic64_t device_resets; + atomic64_t device_reset_failures; + atomic64_t device_reset_aborts; + atomic64_t device_reset_timeouts; + atomic64_t device_reset_terminates; + atomic64_t fw_resets; + atomic64_t fw_reset_completions; + atomic64_t fw_reset_failures; + atomic64_t fnic_resets; + atomic64_t fnic_reset_completions; + atomic64_t fnic_reset_failures; +}; + +struct fw_stats { + atomic64_t active_fw_reqs; + atomic64_t max_fw_reqs; + atomic64_t fw_out_of_resources; + atomic64_t io_fw_errs; +}; + +struct vlan_stats { + atomic64_t vlan_disc_reqs; + atomic64_t resp_withno_vlanID; + atomic64_t sol_expiry_count; + atomic64_t flogi_rejects; +}; + +struct misc_stats { + u64 last_isr_time; + u64 last_ack_time; + atomic64_t isr_count; + atomic64_t max_cq_entries; + atomic64_t ack_index_out_of_range; + atomic64_t data_count_mismatch; + atomic64_t fcpio_timeout; + atomic64_t fcpio_aborted; + atomic64_t sgl_invalid; + atomic64_t mss_invalid; + atomic64_t abts_cpwq_alloc_failures; + atomic64_t devrst_cpwq_alloc_failures; + atomic64_t io_cpwq_alloc_failures; + atomic64_t no_icmnd_itmf_cmpls; + atomic64_t queue_fulls; + atomic64_t rport_not_ready; + atomic64_t frame_errors; +}; + +struct fnic_stats { + struct io_path_stats io_stats; + struct abort_stats abts_stats; + struct terminate_stats term_stats; + struct reset_stats reset_stats; + struct fw_stats fw_stats; + struct vlan_stats vlan_stats; + struct misc_stats misc_stats; +}; + +struct stats_debug_info { + char *debug_buffer; + void *i_private; + int buf_size; + int buffer_len; +}; + +int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *); +int fnic_stats_debugfs_init(struct fnic *); +void fnic_stats_debugfs_remove(struct fnic *); +#endif /* _FNIC_STATS_H_ */ diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index 23a60e3d8527..e002e7187dc0 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -189,6 +189,191 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt) } /* + * fnic_get_stats_data - Copy fnic stats buffer to a memory file + * @fnic_dbgfs_t: pointer to debugfs fnic stats buffer + * + * Description: + * This routine gathers the fnic stats debugfs data from the fnic_stats struct + * and dumps it to stats_debug_info. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into + * stats_debug_info + */ +int fnic_get_stats_data(struct stats_debug_info *debug, + struct fnic_stats *stats) +{ + int len = 0; + int buf_size = debug->buf_size; + struct timespec val1, val2; + + len = snprintf(debug->debug_buffer + len, buf_size - len, + "------------------------------------------\n" + "\t\tIO Statistics\n" + "------------------------------------------\n"); + len += snprintf(debug->debug_buffer + len, buf_size - len, + "Number of Active IOs: %lld\nMaximum Active IOs: %lld\n" + "Number of IOs: %lld\nNumber of IO Completions: %lld\n" + "Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n" + "Number of Memory alloc Failures: %lld\n" + "Number of IOREQ Null: %lld\n" + "Number of SCSI cmd pointer Null: %lld\n", + (u64)atomic64_read(&stats->io_stats.active_ios), + (u64)atomic64_read(&stats->io_stats.max_active_ios), + (u64)atomic64_read(&stats->io_stats.num_ios), + (u64)atomic64_read(&stats->io_stats.io_completions), + (u64)atomic64_read(&stats->io_stats.io_failures), + (u64)atomic64_read(&stats->io_stats.io_not_found), + (u64)atomic64_read(&stats->io_stats.alloc_failures), + (u64)atomic64_read(&stats->io_stats.ioreq_null), + (u64)atomic64_read(&stats->io_stats.sc_null)); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tAbort Statistics\n" + "------------------------------------------\n"); + len += snprintf(debug->debug_buffer + len, buf_size - len, + "Number of Aborts: %lld\n" + "Number of Abort Failures: %lld\n" + "Number of Abort Driver Timeouts: %lld\n" + "Number of Abort FW Timeouts: %lld\n" + "Number of Abort IO NOT Found: %lld\n", + (u64)atomic64_read(&stats->abts_stats.aborts), + (u64)atomic64_read(&stats->abts_stats.abort_failures), + (u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts), + (u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts), + (u64)atomic64_read(&stats->abts_stats.abort_io_not_found)); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tTerminate Statistics\n" + "------------------------------------------\n"); + len += snprintf(debug->debug_buffer + len, buf_size - len, + "Number of Terminates: %lld\n" + "Maximum Terminates: %lld\n" + "Number of Terminate Driver Timeouts: %lld\n" + "Number of Terminate FW Timeouts: %lld\n" + "Number of Terminate IO NOT Found: %lld\n" + "Number of Terminate Failures: %lld\n", + (u64)atomic64_read(&stats->term_stats.terminates), + (u64)atomic64_read(&stats->term_stats.max_terminates), + (u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts), + (u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts), + (u64)atomic64_read(&stats->term_stats.terminate_io_not_found), + (u64)atomic64_read(&stats->term_stats.terminate_failures)); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tReset Statistics\n" + "------------------------------------------\n"); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "Number of Device Resets: %lld\n" + "Number of Device Reset Failures: %lld\n" + "Number of Device Reset Aborts: %lld\n" + "Number of Device Reset Timeouts: %lld\n" + "Number of Device Reset Terminates: %lld\n" + "Number of FW Resets: %lld\n" + "Number of FW Reset Completions: %lld\n" + "Number of FW Reset Failures: %lld\n" + "Number of Fnic Reset: %lld\n" + "Number of Fnic Reset Completions: %lld\n" + "Number of Fnic Reset Failures: %lld\n", + (u64)atomic64_read(&stats->reset_stats.device_resets), + (u64)atomic64_read(&stats->reset_stats.device_reset_failures), + (u64)atomic64_read(&stats->reset_stats.device_reset_aborts), + (u64)atomic64_read(&stats->reset_stats.device_reset_timeouts), + (u64)atomic64_read( + &stats->reset_stats.device_reset_terminates), + (u64)atomic64_read(&stats->reset_stats.fw_resets), + (u64)atomic64_read(&stats->reset_stats.fw_reset_completions), + (u64)atomic64_read(&stats->reset_stats.fw_reset_failures), + (u64)atomic64_read(&stats->reset_stats.fnic_resets), + (u64)atomic64_read( + &stats->reset_stats.fnic_reset_completions), + (u64)atomic64_read(&stats->reset_stats.fnic_reset_failures)); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tFirmware Statistics\n" + "------------------------------------------\n"); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "Number of Active FW Requests %lld\n" + "Maximum FW Requests: %lld\n" + "Number of FW out of resources: %lld\n" + "Number of FW IO errors: %lld\n", + (u64)atomic64_read(&stats->fw_stats.active_fw_reqs), + (u64)atomic64_read(&stats->fw_stats.max_fw_reqs), + (u64)atomic64_read(&stats->fw_stats.fw_out_of_resources), + (u64)atomic64_read(&stats->fw_stats.io_fw_errs)); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tVlan Discovery Statistics\n" + "------------------------------------------\n"); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "Number of Vlan Discovery Requests Sent %lld\n" + "Vlan Response Received with no FCF VLAN ID: %lld\n" + "No solicitations recvd after vlan set, expiry count: %lld\n" + "Flogi rejects count: %lld\n", + (u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs), + (u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID), + (u64)atomic64_read(&stats->vlan_stats.sol_expiry_count), + (u64)atomic64_read(&stats->vlan_stats.flogi_rejects)); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tOther Important Statistics\n" + "------------------------------------------\n"); + + jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1); + jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "Last ISR time: %llu (%8lu.%8lu)\n" + "Last ACK time: %llu (%8lu.%8lu)\n" + "Number of ISRs: %lld\n" + "Maximum CQ Entries: %lld\n" + "Number of ACK index out of range: %lld\n" + "Number of data count mismatch: %lld\n" + "Number of FCPIO Timeouts: %lld\n" + "Number of FCPIO Aborted: %lld\n" + "Number of SGL Invalid: %lld\n" + "Number of Copy WQ Alloc Failures for ABTs: %lld\n" + "Number of Copy WQ Alloc Failures for Device Reset: %lld\n" + "Number of Copy WQ Alloc Failures for IOs: %lld\n" + "Number of no icmnd itmf Completions: %lld\n" + "Number of QUEUE Fulls: %lld\n" + "Number of rport not ready: %lld\n" + "Number of receive frame errors: %lld\n", + (u64)stats->misc_stats.last_isr_time, + val1.tv_sec, val1.tv_nsec, + (u64)stats->misc_stats.last_ack_time, + val2.tv_sec, val2.tv_nsec, + (u64)atomic64_read(&stats->misc_stats.isr_count), + (u64)atomic64_read(&stats->misc_stats.max_cq_entries), + (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range), + (u64)atomic64_read(&stats->misc_stats.data_count_mismatch), + (u64)atomic64_read(&stats->misc_stats.fcpio_timeout), + (u64)atomic64_read(&stats->misc_stats.fcpio_aborted), + (u64)atomic64_read(&stats->misc_stats.sgl_invalid), + (u64)atomic64_read( + &stats->misc_stats.abts_cpwq_alloc_failures), + (u64)atomic64_read( + &stats->misc_stats.devrst_cpwq_alloc_failures), + (u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures), + (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls), + (u64)atomic64_read(&stats->misc_stats.queue_fulls), + (u64)atomic64_read(&stats->misc_stats.rport_not_ready), + (u64)atomic64_read(&stats->misc_stats.frame_errors)); + + return len; + +} + +/* * fnic_trace_buf_init - Initialize fnic trace buffer logging facility * * Description: diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h index cef42b4c4d6c..d412f2ee3c4f 100644 --- a/drivers/scsi/fnic/fnic_trace.h +++ b/drivers/scsi/fnic/fnic_trace.h @@ -84,7 +84,8 @@ fnic_trace_data_t *fnic_trace_get_buf(void); int fnic_get_trace_data(fnic_dbgfs_t *); int fnic_trace_buf_init(void); void fnic_trace_free(void); +int fnic_debugfs_init(void); +void fnic_debugfs_terminate(void); int fnic_trace_debugfs_init(void); void fnic_trace_debugfs_terminate(void); - #endif diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h index fbb55364e272..e343e1d0f801 100644 --- a/drivers/scsi/fnic/vnic_scsi.h +++ b/drivers/scsi/fnic/vnic_scsi.h @@ -54,8 +54,8 @@ #define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000 #define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000 -#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 256 -#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 4096 +#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 1 +#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 2048 #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0 #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000 diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index ee4fa40a50b1..ce5ef0190bad 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -4684,6 +4684,7 @@ static struct scsi_host_template gdth_template = { .cmd_per_lun = GDTH_MAXC_P_L, .unchecked_isa_dma = 1, .use_clustering = ENABLE_CLUSTERING, + .no_write_same = 1, }; #ifdef CONFIG_ISA diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index df0c3c71ea43..f2c5005f312a 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -316,6 +316,12 @@ static void scsi_host_dev_release(struct device *dev) kfree(shost); } +static unsigned int shost_eh_deadline; + +module_param_named(eh_deadline, shost_eh_deadline, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(eh_deadline, + "SCSI EH timeout in seconds (should be between 1 and 2^32-1)"); + static struct device_type scsi_host_type = { .name = "scsi_host", .release = scsi_host_dev_release, @@ -388,6 +394,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) shost->unchecked_isa_dma = sht->unchecked_isa_dma; shost->use_clustering = sht->use_clustering; shost->ordered_tag = sht->ordered_tag; + shost->eh_deadline = shost_eh_deadline * HZ; + shost->no_write_same = sht->no_write_same; if (sht->supported_mode == MODE_UNKNOWN) /* means we didn't set it ... default to INITIATOR */ diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index a4320bcc40f2..20a5e6ecf945 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -54,7 +54,7 @@ #include "hpsa.h" /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ -#define HPSA_DRIVER_VERSION "2.0.2-1" +#define HPSA_DRIVER_VERSION "3.4.0-1" #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" #define HPSA "hpsa" @@ -89,17 +89,17 @@ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334D}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1920}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, @@ -107,7 +107,19 @@ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, {0,} @@ -125,24 +137,35 @@ static struct board_type products[] = { {0x3245103C, "Smart Array P410i", &SA5_access}, {0x3247103C, "Smart Array P411", &SA5_access}, {0x3249103C, "Smart Array P812", &SA5_access}, - {0x324a103C, "Smart Array P712m", &SA5_access}, - {0x324b103C, "Smart Array P711m", &SA5_access}, + {0x324A103C, "Smart Array P712m", &SA5_access}, + {0x324B103C, "Smart Array P711m", &SA5_access}, {0x3350103C, "Smart Array P222", &SA5_access}, {0x3351103C, "Smart Array P420", &SA5_access}, {0x3352103C, "Smart Array P421", &SA5_access}, {0x3353103C, "Smart Array P822", &SA5_access}, + {0x334D103C, "Smart Array P822se", &SA5_access}, {0x3354103C, "Smart Array P420i", &SA5_access}, {0x3355103C, "Smart Array P220i", &SA5_access}, {0x3356103C, "Smart Array P721m", &SA5_access}, - {0x1920103C, "Smart Array", &SA5_access}, - {0x1921103C, "Smart Array", &SA5_access}, - {0x1922103C, "Smart Array", &SA5_access}, - {0x1923103C, "Smart Array", &SA5_access}, - {0x1924103C, "Smart Array", &SA5_access}, - {0x1925103C, "Smart Array", &SA5_access}, - {0x1926103C, "Smart Array", &SA5_access}, - {0x1928103C, "Smart Array", &SA5_access}, - {0x334d103C, "Smart Array P822se", &SA5_access}, + {0x1921103C, "Smart Array P830i", &SA5_access}, + {0x1922103C, "Smart Array P430", &SA5_access}, + {0x1923103C, "Smart Array P431", &SA5_access}, + {0x1924103C, "Smart Array P830", &SA5_access}, + {0x1926103C, "Smart Array P731m", &SA5_access}, + {0x1928103C, "Smart Array P230i", &SA5_access}, + {0x1929103C, "Smart Array P530", &SA5_access}, + {0x21BD103C, "Smart Array", &SA5_access}, + {0x21BE103C, "Smart Array", &SA5_access}, + {0x21BF103C, "Smart Array", &SA5_access}, + {0x21C0103C, "Smart Array", &SA5_access}, + {0x21C1103C, "Smart Array", &SA5_access}, + {0x21C2103C, "Smart Array", &SA5_access}, + {0x21C3103C, "Smart Array", &SA5_access}, + {0x21C4103C, "Smart Array", &SA5_access}, + {0x21C5103C, "Smart Array", &SA5_access}, + {0x21C7103C, "Smart Array", &SA5_access}, + {0x21C8103C, "Smart Array", &SA5_access}, + {0x21C9103C, "Smart Array", &SA5_access}, {0xFFFF103C, "Unknown Smart Array", &SA5_access}, }; @@ -538,6 +561,7 @@ static struct scsi_host_template hpsa_driver_template = { .sdev_attrs = hpsa_sdev_attrs, .shost_attrs = hpsa_shost_attrs, .max_sectors = 8192, + .no_write_same = 1, }; @@ -1265,7 +1289,7 @@ static void complete_scsi_command(struct CommandList *cp) "has check condition: aborted command: " "ASC: 0x%x, ASCQ: 0x%x\n", cp, asc, ascq); - cmd->result = DID_SOFT_ERROR << 16; + cmd->result |= DID_SOFT_ERROR << 16; break; } /* Must be some other type of check condition */ @@ -4902,7 +4926,7 @@ reinit_after_soft_reset: hpsa_hba_inquiry(h); hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ start_controller_lockup_detector(h); - return 1; + return 0; clean4: hpsa_free_sg_chain_blocks(h); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 4e31caa21ddf..23f5ba5e6472 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -2208,7 +2208,10 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) if (rsp_rc != 0) { sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc); - return -EIO; + /* If failure is received, the host adapter is most likely going + through reset, return success so the caller will wait for the command + being cancelled to get returned */ + return 0; } sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n"); @@ -2221,7 +2224,15 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) if (status != IBMVFC_MAD_SUCCESS) { sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status); - return -EIO; + switch (status) { + case IBMVFC_MAD_DRIVER_FAILED: + case IBMVFC_MAD_CRQ_ERROR: + /* Host adapter most likely going through reset, return success to + the caller will wait for the command being cancelled to get returned */ + return 0; + default: + return -EIO; + }; } sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n"); diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index d0fa4b6c551f..fa764406df68 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -241,7 +241,7 @@ static void gather_partition_info(void) struct device_node *rootdn; const char *ppartition_name; - const unsigned int *p_number_ptr; + const __be32 *p_number_ptr; /* Retrieve information about this partition */ rootdn = of_find_node_by_path("/"); @@ -255,7 +255,7 @@ static void gather_partition_info(void) sizeof(partition_name)); p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL); if (p_number_ptr) - partition_number = *p_number_ptr; + partition_number = of_read_number(p_number_ptr, 1); of_node_put(rootdn); } @@ -270,10 +270,11 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata) strncpy(hostdata->madapter_info.partition_name, partition_name, sizeof(hostdata->madapter_info.partition_name)); - hostdata->madapter_info.partition_number = partition_number; + hostdata->madapter_info.partition_number = + cpu_to_be32(partition_number); - hostdata->madapter_info.mad_version = 1; - hostdata->madapter_info.os_type = 2; + hostdata->madapter_info.mad_version = cpu_to_be32(1); + hostdata->madapter_info.os_type = cpu_to_be32(2); } /** @@ -464,9 +465,9 @@ static int initialize_event_pool(struct event_pool *pool, memset(&evt->crq, 0x00, sizeof(evt->crq)); atomic_set(&evt->free, 1); evt->crq.valid = 0x80; - evt->crq.IU_length = sizeof(*evt->xfer_iu); - evt->crq.IU_data_ptr = pool->iu_token + - sizeof(*evt->xfer_iu) * i; + evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu)); + evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token + + sizeof(*evt->xfer_iu) * i); evt->xfer_iu = pool->iu_storage + i; evt->hostdata = hostdata; evt->ext_list = NULL; @@ -588,7 +589,7 @@ static void init_event_struct(struct srp_event_struct *evt_struct, evt_struct->cmnd_done = NULL; evt_struct->sync_srp = NULL; evt_struct->crq.format = format; - evt_struct->crq.timeout = timeout; + evt_struct->crq.timeout = cpu_to_be16(timeout); evt_struct->done = done; } @@ -659,8 +660,8 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg, scsi_for_each_sg(cmd, sg, nseg, i) { struct srp_direct_buf *descr = md + i; - descr->va = sg_dma_address(sg); - descr->len = sg_dma_len(sg); + descr->va = cpu_to_be64(sg_dma_address(sg)); + descr->len = cpu_to_be32(sg_dma_len(sg)); descr->key = 0; total_length += sg_dma_len(sg); } @@ -703,13 +704,14 @@ static int map_sg_data(struct scsi_cmnd *cmd, } indirect->table_desc.va = 0; - indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf); + indirect->table_desc.len = cpu_to_be32(sg_mapped * + sizeof(struct srp_direct_buf)); indirect->table_desc.key = 0; if (sg_mapped <= MAX_INDIRECT_BUFS) { total_length = map_sg_list(cmd, sg_mapped, &indirect->desc_list[0]); - indirect->len = total_length; + indirect->len = cpu_to_be32(total_length); return 1; } @@ -731,9 +733,10 @@ static int map_sg_data(struct scsi_cmnd *cmd, total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list); - indirect->len = total_length; - indirect->table_desc.va = evt_struct->ext_list_token; - indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]); + indirect->len = cpu_to_be32(total_length); + indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token); + indirect->table_desc.len = cpu_to_be32(sg_mapped * + sizeof(indirect->desc_list[0])); memcpy(indirect->desc_list, evt_struct->ext_list, MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf)); return 1; @@ -849,7 +852,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, struct ibmvscsi_host_data *hostdata, unsigned long timeout) { - u64 *crq_as_u64 = (u64 *) &evt_struct->crq; + __be64 *crq_as_u64 = (__be64 *)&evt_struct->crq; int request_status = 0; int rc; int srp_req = 0; @@ -920,8 +923,9 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, add_timer(&evt_struct->timer); } - if ((rc = - ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { + rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + if (rc != 0) { list_del(&evt_struct->list); del_timer(&evt_struct->timer); @@ -987,15 +991,16 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct) if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) memcpy(cmnd->sense_buffer, rsp->data, - rsp->sense_data_len); + be32_to_cpu(rsp->sense_data_len)); unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, evt_struct->hostdata->dev); if (rsp->flags & SRP_RSP_FLAG_DOOVER) - scsi_set_resid(cmnd, rsp->data_out_res_cnt); + scsi_set_resid(cmnd, + be32_to_cpu(rsp->data_out_res_cnt)); else if (rsp->flags & SRP_RSP_FLAG_DIOVER) - scsi_set_resid(cmnd, rsp->data_in_res_cnt); + scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt)); } if (evt_struct->cmnd_done) @@ -1037,7 +1042,7 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd, memset(srp_cmd, 0x00, SRP_MAX_IU_LEN); srp_cmd->opcode = SRP_CMD; memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb)); - srp_cmd->lun = ((u64) lun) << 48; + srp_cmd->lun = cpu_to_be64(((u64)lun) << 48); if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { if (!firmware_has_feature(FW_FEATURE_CMO)) @@ -1062,9 +1067,10 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd, if ((in_fmt == SRP_DATA_DESC_INDIRECT || out_fmt == SRP_DATA_DESC_INDIRECT) && indirect->table_desc.va == 0) { - indirect->table_desc.va = evt_struct->crq.IU_data_ptr + + indirect->table_desc.va = + cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) + offsetof(struct srp_cmd, add_data) + - offsetof(struct srp_indirect_buf, desc_list); + offsetof(struct srp_indirect_buf, desc_list)); } return ibmvscsi_send_srp_event(evt_struct, hostdata, 0); @@ -1158,7 +1164,7 @@ static void login_rsp(struct srp_event_struct *evt_struct) * request_limit could have been set to -1 by this client. */ atomic_set(&hostdata->request_limit, - evt_struct->xfer_iu->srp.login_rsp.req_lim_delta); + be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta)); /* If we had any pending I/Os, kick them */ scsi_unblock_requests(hostdata->host); @@ -1184,8 +1190,9 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) login = &evt_struct->iu.srp.login_req; memset(login, 0, sizeof(*login)); login->opcode = SRP_LOGIN_REQ; - login->req_it_iu_len = sizeof(union srp_iu); - login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; + login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu)); + login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | + SRP_BUF_FORMAT_INDIRECT); spin_lock_irqsave(hostdata->host->host_lock, flags); /* Start out with a request limit of 0, since this is negotiated in @@ -1214,12 +1221,13 @@ static void capabilities_rsp(struct srp_event_struct *evt_struct) dev_err(hostdata->dev, "error 0x%X getting capabilities info\n", evt_struct->xfer_iu->mad.capabilities.common.status); } else { - if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP) + if (hostdata->caps.migration.common.server_support != + cpu_to_be16(SERVER_SUPPORTS_CAP)) dev_info(hostdata->dev, "Partition migration not supported\n"); if (client_reserve) { if (hostdata->caps.reserve.common.server_support == - SERVER_SUPPORTS_CAP) + cpu_to_be16(SERVER_SUPPORTS_CAP)) dev_info(hostdata->dev, "Client reserve enabled\n"); else dev_info(hostdata->dev, "Client reserve not supported\n"); @@ -1251,9 +1259,9 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) req = &evt_struct->iu.mad.capabilities; memset(req, 0, sizeof(*req)); - hostdata->caps.flags = CAP_LIST_SUPPORTED; + hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED); if (hostdata->client_migrated) - hostdata->caps.flags |= CLIENT_MIGRATED; + hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED); strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), sizeof(hostdata->caps.name)); @@ -1264,22 +1272,31 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0'; - req->common.type = VIOSRP_CAPABILITIES_TYPE; - req->buffer = hostdata->caps_addr; + req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE); + req->buffer = cpu_to_be64(hostdata->caps_addr); - hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES; - hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration); - hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP; - hostdata->caps.migration.ecl = 1; + hostdata->caps.migration.common.cap_type = + cpu_to_be32(MIGRATION_CAPABILITIES); + hostdata->caps.migration.common.length = + cpu_to_be16(sizeof(hostdata->caps.migration)); + hostdata->caps.migration.common.server_support = + cpu_to_be16(SERVER_SUPPORTS_CAP); + hostdata->caps.migration.ecl = cpu_to_be32(1); if (client_reserve) { - hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES; - hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve); - hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP; - hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2; - req->common.length = sizeof(hostdata->caps); + hostdata->caps.reserve.common.cap_type = + cpu_to_be32(RESERVATION_CAPABILITIES); + hostdata->caps.reserve.common.length = + cpu_to_be16(sizeof(hostdata->caps.reserve)); + hostdata->caps.reserve.common.server_support = + cpu_to_be16(SERVER_SUPPORTS_CAP); + hostdata->caps.reserve.type = + cpu_to_be32(CLIENT_RESERVE_SCSI_2); + req->common.length = + cpu_to_be16(sizeof(hostdata->caps)); } else - req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve); + req->common.length = cpu_to_be16(sizeof(hostdata->caps) - + sizeof(hostdata->caps.reserve)); spin_lock_irqsave(hostdata->host->host_lock, flags); if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) @@ -1297,7 +1314,7 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) static void fast_fail_rsp(struct srp_event_struct *evt_struct) { struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; - u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status; + u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status); if (status == VIOSRP_MAD_NOT_SUPPORTED) dev_err(hostdata->dev, "fast_fail not supported in server\n"); @@ -1334,8 +1351,8 @@ static int enable_fast_fail(struct ibmvscsi_host_data *hostdata) fast_fail_mad = &evt_struct->iu.mad.fast_fail; memset(fast_fail_mad, 0, sizeof(*fast_fail_mad)); - fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL; - fast_fail_mad->common.length = sizeof(*fast_fail_mad); + fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL); + fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad)); spin_lock_irqsave(hostdata->host->host_lock, flags); rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); @@ -1362,15 +1379,15 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct) "host partition %s (%d), OS %d, max io %u\n", hostdata->madapter_info.srp_version, hostdata->madapter_info.partition_name, - hostdata->madapter_info.partition_number, - hostdata->madapter_info.os_type, - hostdata->madapter_info.port_max_txu[0]); + be32_to_cpu(hostdata->madapter_info.partition_number), + be32_to_cpu(hostdata->madapter_info.os_type), + be32_to_cpu(hostdata->madapter_info.port_max_txu[0])); if (hostdata->madapter_info.port_max_txu[0]) hostdata->host->max_sectors = - hostdata->madapter_info.port_max_txu[0] >> 9; + be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9; - if (hostdata->madapter_info.os_type == 3 && + if (be32_to_cpu(hostdata->madapter_info.os_type) == 3 && strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", hostdata->madapter_info.srp_version); @@ -1379,7 +1396,7 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct) hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; } - if (hostdata->madapter_info.os_type == 3) { + if (be32_to_cpu(hostdata->madapter_info.os_type) == 3) { enable_fast_fail(hostdata); return; } @@ -1414,9 +1431,9 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) req = &evt_struct->iu.mad.adapter_info; memset(req, 0x00, sizeof(*req)); - req->common.type = VIOSRP_ADAPTER_INFO_TYPE; - req->common.length = sizeof(hostdata->madapter_info); - req->buffer = hostdata->adapter_info_addr; + req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE); + req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info)); + req->buffer = cpu_to_be64(hostdata->adapter_info_addr); spin_lock_irqsave(hostdata->host->host_lock, flags); if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) @@ -1501,7 +1518,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) /* Set up an abort SRP command */ memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); tsk_mgmt->opcode = SRP_TSK_MGMT; - tsk_mgmt->lun = ((u64) lun) << 48; + tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48); tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; tsk_mgmt->task_tag = (u64) found_evt; @@ -1624,7 +1641,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) /* Set up a lun reset SRP command */ memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); tsk_mgmt->opcode = SRP_TSK_MGMT; - tsk_mgmt->lun = ((u64) lun) << 48; + tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48); tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; evt->sync_srp = &srp_rsp; @@ -1735,8 +1752,9 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq, { long rc; unsigned long flags; + /* The hypervisor copies our tag value here so no byteswapping */ struct srp_event_struct *evt_struct = - (struct srp_event_struct *)crq->IU_data_ptr; + (__force struct srp_event_struct *)crq->IU_data_ptr; switch (crq->valid) { case 0xC0: /* initialization */ switch (crq->format) { @@ -1792,18 +1810,18 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq, */ if (!valid_event_struct(&hostdata->pool, evt_struct)) { dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n", - (void *)crq->IU_data_ptr); + evt_struct); return; } if (atomic_read(&evt_struct->free)) { dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n", - (void *)crq->IU_data_ptr); + evt_struct); return; } if (crq->format == VIOSRP_SRP_FORMAT) - atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta, + atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta), &hostdata->request_limit); del_timer(&evt_struct->timer); @@ -1856,13 +1874,11 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, /* Set up a lun reset SRP command */ memset(host_config, 0x00, sizeof(*host_config)); - host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; - host_config->common.length = length; - host_config->buffer = addr = dma_map_single(hostdata->dev, buffer, - length, - DMA_BIDIRECTIONAL); + host_config->common.type = cpu_to_be32(VIOSRP_HOST_CONFIG_TYPE); + host_config->common.length = cpu_to_be16(length); + addr = dma_map_single(hostdata->dev, buffer, length, DMA_BIDIRECTIONAL); - if (dma_mapping_error(hostdata->dev, host_config->buffer)) { + if (dma_mapping_error(hostdata->dev, addr)) { if (!firmware_has_feature(FW_FEATURE_CMO)) dev_err(hostdata->dev, "dma_mapping error getting host config\n"); @@ -1870,6 +1886,8 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, return -1; } + host_config->buffer = cpu_to_be64(addr); + init_completion(&evt_struct->comp); spin_lock_irqsave(hostdata->host->host_lock, flags); rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h index 2cd735d1d196..116243087622 100644 --- a/drivers/scsi/ibmvscsi/viosrp.h +++ b/drivers/scsi/ibmvscsi/viosrp.h @@ -75,9 +75,9 @@ struct viosrp_crq { u8 format; /* SCSI vs out-of-band */ u8 reserved; u8 status; /* non-scsi failure? (e.g. DMA failure) */ - u16 timeout; /* in seconds */ - u16 IU_length; /* in bytes */ - u64 IU_data_ptr; /* the TCE for transferring data */ + __be16 timeout; /* in seconds */ + __be16 IU_length; /* in bytes */ + __be64 IU_data_ptr; /* the TCE for transferring data */ }; /* MADs are Management requests above and beyond the IUs defined in the SRP @@ -124,10 +124,10 @@ enum viosrp_capability_flag { * Common MAD header */ struct mad_common { - u32 type; - u16 status; - u16 length; - u64 tag; + __be32 type; + __be16 status; + __be16 length; + __be64 tag; }; /* @@ -139,23 +139,23 @@ struct mad_common { */ struct viosrp_empty_iu { struct mad_common common; - u64 buffer; - u32 port; + __be64 buffer; + __be32 port; }; struct viosrp_error_log { struct mad_common common; - u64 buffer; + __be64 buffer; }; struct viosrp_adapter_info { struct mad_common common; - u64 buffer; + __be64 buffer; }; struct viosrp_host_config { struct mad_common common; - u64 buffer; + __be64 buffer; }; struct viosrp_fast_fail { @@ -164,27 +164,27 @@ struct viosrp_fast_fail { struct viosrp_capabilities { struct mad_common common; - u64 buffer; + __be64 buffer; }; struct mad_capability_common { - u32 cap_type; - u16 length; - u16 server_support; + __be32 cap_type; + __be16 length; + __be16 server_support; }; struct mad_reserve_cap { struct mad_capability_common common; - u32 type; + __be32 type; }; struct mad_migration_cap { struct mad_capability_common common; - u32 ecl; + __be32 ecl; }; struct capabilities{ - u32 flags; + __be32 flags; char name[SRP_MAX_LOC_LEN]; char loc[SRP_MAX_LOC_LEN]; struct mad_migration_cap migration; @@ -208,10 +208,10 @@ union viosrp_iu { struct mad_adapter_info_data { char srp_version[8]; char partition_name[96]; - u32 partition_number; - u32 mad_version; - u32 os_type; - u32 port_max_txu[8]; /* per-port maximum transfer */ + __be32 partition_number; + __be32 mad_version; + __be32 os_type; + __be32 port_max_txu[8]; /* per-port maximum transfer */ }; #endif diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 36ac1c34ce97..573f4128b6b6 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -6305,7 +6305,8 @@ static struct scsi_host_template driver_template = { .use_clustering = ENABLE_CLUSTERING, .shost_attrs = ipr_ioa_attrs, .sdev_attrs = ipr_dev_attrs, - .proc_name = IPR_NAME + .proc_name = IPR_NAME, + .no_write_same = 1, }; /** diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index 8d5ea8a1e5a6..52a216f21ae5 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -374,6 +374,7 @@ static struct scsi_host_template ips_driver_template = { .sg_tablesize = IPS_MAX_SG, .cmd_per_lun = 3, .use_clustering = ENABLE_CLUSTERING, + .no_write_same = 1, }; diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 9e2588a6881c..add6d1566ec8 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -116,6 +116,7 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk) struct iscsi_conn *conn = sk->sk_user_data; if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) && + (conn->session->state != ISCSI_STATE_LOGGING_OUT) && !atomic_read(&sk->sk_rmem_alloc)) { ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n"); iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE); diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 587992952b3c..1b3a09473452 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -27,6 +27,7 @@ #include <linux/slab.h> #include <linux/err.h> #include <linux/export.h> +#include <linux/log2.h> #include <scsi/fc/fc_fc2.h> @@ -303,10 +304,7 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, fr_eof(fp) = FC_EOF_N; } - /* - * Initialize remainig fh fields - * from fc_fill_fc_hdr - */ + /* Initialize remaining fh fields from fc_fill_fc_hdr */ fh->fh_ox_id = htons(ep->oxid); fh->fh_rx_id = htons(ep->rxid); fh->fh_seq_id = ep->seq.id; @@ -362,9 +360,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep, FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec); - if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, - msecs_to_jiffies(timer_msec))) - fc_exch_hold(ep); /* hold for timer */ + fc_exch_hold(ep); /* hold for timer */ + if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, + msecs_to_jiffies(timer_msec))) + fc_exch_release(ep); } /** @@ -382,6 +381,8 @@ static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) /** * fc_exch_done_locked() - Complete an exchange with the exchange lock held * @ep: The exchange that is complete + * + * Note: May sleep if invoked from outside a response handler. */ static int fc_exch_done_locked(struct fc_exch *ep) { @@ -393,7 +394,6 @@ static int fc_exch_done_locked(struct fc_exch *ep) * ep, and in that case we only clear the resp and set it as * complete, so it can be reused by the timer to send the rrq. */ - ep->resp = NULL; if (ep->state & FC_EX_DONE) return rc; ep->esb_stat |= ESB_ST_COMPLETE; @@ -464,15 +464,21 @@ static void fc_exch_delete(struct fc_exch *ep) } static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp, - struct fc_frame *fp) + struct fc_frame *fp) { struct fc_exch *ep; struct fc_frame_header *fh = fc_frame_header_get(fp); - int error; + int error = -ENXIO; u32 f_ctl; u8 fh_type = fh->fh_type; ep = fc_seq_exch(sp); + + if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) { + fc_frame_free(fp); + goto out; + } + WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT)); f_ctl = ntoh24(fh->fh_f_ctl); @@ -515,6 +521,9 @@ out: * @lport: The local port that the exchange will be sent on * @sp: The sequence to be sent * @fp: The frame to be sent on the exchange + * + * Note: The frame will be freed either by a direct call to fc_frame_free(fp) + * or indirectly by calling libfc_function_template.frame_send(). */ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp) @@ -581,6 +590,8 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp) /* * Set the response handler for the exchange associated with a sequence. + * + * Note: May sleep if invoked from outside a response handler. */ static void fc_seq_set_resp(struct fc_seq *sp, void (*resp)(struct fc_seq *, struct fc_frame *, @@ -588,8 +599,18 @@ static void fc_seq_set_resp(struct fc_seq *sp, void *arg) { struct fc_exch *ep = fc_seq_exch(sp); + DEFINE_WAIT(wait); spin_lock_bh(&ep->ex_lock); + while (ep->resp_active && ep->resp_task != current) { + prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE); + spin_unlock_bh(&ep->ex_lock); + + schedule(); + + spin_lock_bh(&ep->ex_lock); + } + finish_wait(&ep->resp_wq, &wait); ep->resp = resp; ep->arg = arg; spin_unlock_bh(&ep->ex_lock); @@ -622,27 +643,31 @@ static int fc_exch_abort_locked(struct fc_exch *ep, if (!sp) return -ENOMEM; - ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL; if (timer_msec) fc_exch_timer_set_locked(ep, timer_msec); - /* - * If not logged into the fabric, don't send ABTS but leave - * sequence active until next timeout. - */ - if (!ep->sid) - return 0; - - /* - * Send an abort for the sequence that timed out. - */ - fp = fc_frame_alloc(ep->lp, 0); - if (fp) { - fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, - FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); - error = fc_seq_send_locked(ep->lp, sp, fp); - } else - error = -ENOBUFS; + if (ep->sid) { + /* + * Send an abort for the sequence that timed out. + */ + fp = fc_frame_alloc(ep->lp, 0); + if (fp) { + ep->esb_stat |= ESB_ST_SEQ_INIT; + fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, + FC_TYPE_BLS, FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + error = fc_seq_send_locked(ep->lp, sp, fp); + } else { + error = -ENOBUFS; + } + } else { + /* + * If not logged into the fabric, don't send ABTS but leave + * sequence active until next timeout. + */ + error = 0; + } + ep->esb_stat |= ESB_ST_ABNORMAL; return error; } @@ -669,6 +694,61 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp, } /** + * fc_invoke_resp() - invoke ep->resp() + * + * Notes: + * It is assumed that after initialization finished (this means the + * first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are + * modified only via fc_seq_set_resp(). This guarantees that none of these + * two variables changes if ep->resp_active > 0. + * + * If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when + * this function is invoked, the first spin_lock_bh() call in this function + * will wait until fc_seq_set_resp() has finished modifying these variables. + * + * Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that + * ep->resp() won't be invoked after fc_exch_done() has returned. + * + * The response handler itself may invoke fc_exch_done(), which will clear the + * ep->resp pointer. + * + * Return value: + * Returns true if and only if ep->resp has been invoked. + */ +static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp, + struct fc_frame *fp) +{ + void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); + void *arg; + bool res = false; + + spin_lock_bh(&ep->ex_lock); + ep->resp_active++; + if (ep->resp_task != current) + ep->resp_task = !ep->resp_task ? current : NULL; + resp = ep->resp; + arg = ep->arg; + spin_unlock_bh(&ep->ex_lock); + + if (resp) { + resp(sp, fp, arg); + res = true; + } else if (!IS_ERR(fp)) { + fc_frame_free(fp); + } + + spin_lock_bh(&ep->ex_lock); + if (--ep->resp_active == 0) + ep->resp_task = NULL; + spin_unlock_bh(&ep->ex_lock); + + if (ep->resp_active == 0) + wake_up(&ep->resp_wq); + + return res; +} + +/** * fc_exch_timeout() - Handle exchange timer expiration * @work: The work_struct identifying the exchange that timed out */ @@ -677,8 +757,6 @@ static void fc_exch_timeout(struct work_struct *work) struct fc_exch *ep = container_of(work, struct fc_exch, timeout_work.work); struct fc_seq *sp = &ep->seq; - void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); - void *arg; u32 e_stat; int rc = 1; @@ -696,16 +774,13 @@ static void fc_exch_timeout(struct work_struct *work) fc_exch_rrq(ep); goto done; } else { - resp = ep->resp; - arg = ep->arg; - ep->resp = NULL; if (e_stat & ESB_ST_ABNORMAL) rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); if (!rc) fc_exch_delete(ep); - if (resp) - resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT)); + fc_seq_set_resp(sp, NULL, ep->arg); fc_seq_exch_abort(sp, 2 * ep->r_a_tov); goto done; } @@ -792,6 +867,8 @@ hit: ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */ ep->rxid = FC_XID_UNKNOWN; ep->class = mp->class; + ep->resp_active = 0; + init_waitqueue_head(&ep->resp_wq); INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout); out: return ep; @@ -838,8 +915,10 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); spin_lock_bh(&pool->lock); ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); - if (ep && ep->xid == xid) + if (ep) { + WARN_ON(ep->xid != xid); fc_exch_hold(ep); + } spin_unlock_bh(&pool->lock); } return ep; @@ -850,6 +929,8 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and * the memory allocated for the related objects may be freed. * @sp: The sequence that has completed + * + * Note: May sleep if invoked from outside a response handler. */ static void fc_exch_done(struct fc_seq *sp) { @@ -859,6 +940,8 @@ static void fc_exch_done(struct fc_seq *sp) spin_lock_bh(&ep->ex_lock); rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); + + fc_seq_set_resp(sp, NULL, ep->arg); if (!rc) fc_exch_delete(ep); } @@ -987,6 +1070,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, } } + spin_lock_bh(&ep->ex_lock); /* * At this point, we have the exchange held. * Find or create the sequence. @@ -1014,11 +1098,11 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, * sending RSP, hence write request on other * end never finishes. */ - spin_lock_bh(&ep->ex_lock); sp->ssb_stat |= SSB_ST_RESP; sp->id = fh->fh_seq_id; - spin_unlock_bh(&ep->ex_lock); } else { + spin_unlock_bh(&ep->ex_lock); + /* sequence/exch should exist */ reject = FC_RJT_SEQ_ID; goto rel; @@ -1029,6 +1113,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, if (f_ctl & FC_FC_SEQ_INIT) ep->esb_stat |= ESB_ST_SEQ_INIT; + spin_unlock_bh(&ep->ex_lock); fr_seq(fp) = sp; out: @@ -1291,21 +1376,23 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) if (!ep) goto reject; + + fp = fc_frame_alloc(ep->lp, sizeof(*ap)); + if (!fp) + goto free; + spin_lock_bh(&ep->ex_lock); if (ep->esb_stat & ESB_ST_COMPLETE) { spin_unlock_bh(&ep->ex_lock); + + fc_frame_free(fp); goto reject; } - if (!(ep->esb_stat & ESB_ST_REC_QUAL)) + if (!(ep->esb_stat & ESB_ST_REC_QUAL)) { + ep->esb_stat |= ESB_ST_REC_QUAL; fc_exch_hold(ep); /* hold for REC_QUAL */ - ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL; - fc_exch_timer_set_locked(ep, ep->r_a_tov); - - fp = fc_frame_alloc(ep->lp, sizeof(*ap)); - if (!fp) { - spin_unlock_bh(&ep->ex_lock); - goto free; } + fc_exch_timer_set_locked(ep, ep->r_a_tov); fh = fc_frame_header_get(fp); ap = fc_frame_payload_get(fp, sizeof(*ap)); memset(ap, 0, sizeof(*ap)); @@ -1319,14 +1406,16 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) } sp = fc_seq_start_next_locked(sp); fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS); + ep->esb_stat |= ESB_ST_ABNORMAL; spin_unlock_bh(&ep->ex_lock); + +free: fc_frame_free(rx_fp); return; reject: fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID); -free: - fc_frame_free(rx_fp); + goto free; } /** @@ -1416,9 +1505,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp, * If new exch resp handler is valid then call that * first. */ - if (ep->resp) - ep->resp(sp, fp, ep->arg); - else + if (!fc_invoke_resp(ep, sp, fp)) lport->tt.lport_recv(lport, fp); fc_exch_release(ep); /* release from lookup */ } else { @@ -1442,8 +1529,6 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) struct fc_exch *ep; enum fc_sof sof; u32 f_ctl; - void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); - void *ex_resp_arg; int rc; ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); @@ -1478,19 +1563,19 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) f_ctl = ntoh24(fh->fh_f_ctl); fr_seq(fp) = sp; + + spin_lock_bh(&ep->ex_lock); if (f_ctl & FC_FC_SEQ_INIT) ep->esb_stat |= ESB_ST_SEQ_INIT; + spin_unlock_bh(&ep->ex_lock); if (fc_sof_needs_ack(sof)) fc_seq_send_ack(sp, fp); - resp = ep->resp; - ex_resp_arg = ep->arg; if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T && (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { spin_lock_bh(&ep->ex_lock); - resp = ep->resp; rc = fc_exch_done_locked(ep); WARN_ON(fc_seq_exch(sp) != ep); spin_unlock_bh(&ep->ex_lock); @@ -1511,10 +1596,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) * If new exch resp handler is valid then call that * first. */ - if (resp) - resp(sp, fp, ex_resp_arg); - else - fc_frame_free(fp); + fc_invoke_resp(ep, sp, fp); + fc_exch_release(ep); return; rel: @@ -1553,8 +1636,6 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) */ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) { - void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); - void *ex_resp_arg; struct fc_frame_header *fh; struct fc_ba_acc *ap; struct fc_seq *sp; @@ -1599,9 +1680,6 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) break; } - resp = ep->resp; - ex_resp_arg = ep->arg; - /* do we need to do some other checks here. Can we reuse more of * fc_exch_recv_seq_resp */ @@ -1613,17 +1691,14 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ) rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); + + fc_exch_hold(ep); if (!rc) fc_exch_delete(ep); - - if (resp) - resp(sp, fp, ex_resp_arg); - else - fc_frame_free(fp); - + fc_invoke_resp(ep, sp, fp); if (has_rec) fc_exch_timer_set(ep, ep->r_a_tov); - + fc_exch_release(ep); } /** @@ -1662,7 +1737,7 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) break; default: if (ep) - FC_EXCH_DBG(ep, "BLS rctl %x - %s received", + FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n", fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl)); break; @@ -1745,32 +1820,33 @@ static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason, /** * fc_exch_reset() - Reset an exchange * @ep: The exchange to be reset + * + * Note: May sleep if invoked from outside a response handler. */ static void fc_exch_reset(struct fc_exch *ep) { struct fc_seq *sp; - void (*resp)(struct fc_seq *, struct fc_frame *, void *); - void *arg; int rc = 1; spin_lock_bh(&ep->ex_lock); fc_exch_abort_locked(ep, 0); ep->state |= FC_EX_RST_CLEANUP; fc_exch_timer_cancel(ep); - resp = ep->resp; - ep->resp = NULL; if (ep->esb_stat & ESB_ST_REC_QUAL) atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */ ep->esb_stat &= ~ESB_ST_REC_QUAL; - arg = ep->arg; sp = &ep->seq; rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); + + fc_exch_hold(ep); + if (!rc) fc_exch_delete(ep); - if (resp) - resp(sp, ERR_PTR(-FC_EX_CLOSED), arg); + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); + fc_seq_set_resp(sp, NULL, ep->arg); + fc_exch_release(ep); } /** @@ -1956,13 +2032,13 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg) switch (op) { case ELS_LS_RJT: - FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ"); + FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n"); /* fall through */ case ELS_LS_ACC: goto cleanup; default: - FC_EXCH_DBG(aborted_ep, "unexpected response op %x " - "for RRQ", op); + FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n", + op); return; } @@ -2533,13 +2609,8 @@ int fc_setup_exch_mgr(void) * cpu on which exchange originated by simple bitwise * AND operation between fc_cpu_mask and exchange id. */ - fc_cpu_mask = 1; - fc_cpu_order = 0; - while (fc_cpu_mask < nr_cpu_ids) { - fc_cpu_mask <<= 1; - fc_cpu_order++; - } - fc_cpu_mask--; + fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids)); + fc_cpu_mask = (1 << fc_cpu_order) - 1; fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue"); if (!fc_exch_workqueue) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 5fd0f1fbe586..1d7e76e8b447 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -902,7 +902,8 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) /* * Check for missing or extra data frames. */ - if (unlikely(fsp->xfer_len != expected_len)) { + if (unlikely(fsp->cdb_status == SAM_STAT_GOOD && + fsp->xfer_len != expected_len)) { if (fsp->xfer_len < expected_len) { /* * Some data may be queued locally, @@ -955,12 +956,11 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) * Test for transport underrun, independent of response * underrun status. */ - if (fsp->xfer_len < fsp->data_len && !fsp->io_status && + if (fsp->cdb_status == SAM_STAT_GOOD && + fsp->xfer_len < fsp->data_len && !fsp->io_status && (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) || - fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) { + fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) fsp->status_code = FC_DATA_UNDRUN; - fsp->io_status = 0; - } } seq = fsp->seq_ptr; diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index f04d15c67df3..e01a29863c38 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -516,7 +516,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport, * @lport: The local port receiving the LOGO * @fp: The LOGO request frame * - * Locking Note: The lport lock is exected to be held before calling + * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) @@ -1088,7 +1088,7 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) { unsigned long delay = 0; FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n", - PTR_ERR(fp), fc_lport_state(lport), + IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_lport_state(lport), lport->retry_count); if (PTR_ERR(fp) == -FC_EX_CLOSED) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index c710d908fda6..589ff9aedd31 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -1705,7 +1705,7 @@ reject: * @rdata: The remote port that sent the PRLI request * @rx_fp: The PRLI request frame * - * Locking Note: The rport lock is exected to be held before calling + * Locking Note: The rport lock is expected to be held before calling * this function. */ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, @@ -1824,7 +1824,7 @@ drop: * @rdata: The remote port that sent the PRLO request * @rx_fp: The PRLO request frame * - * Locking Note: The rport lock is exected to be held before calling + * Locking Note: The rport lock is expected to be held before calling * this function. */ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, @@ -1895,7 +1895,7 @@ drop: * @lport: The local port that received the LOGO request * @fp: The LOGO request frame * - * Locking Note: The rport lock is exected to be held before calling + * Locking Note: The rport lock is expected to be held before calling * this function. */ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 161c98efade9..d2895836f9fa 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -211,7 +211,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) qc->tf.nsect = 0; } - ata_tf_to_fis(&qc->tf, 1, 0, (u8*)&task->ata_task.fis); + ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis); task->uldd_task = qc; if (ata_is_atapi(qc->tf.protocol)) { memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index df43bfe6d573..4e1b75ca7451 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -708,6 +708,7 @@ struct lpfc_hba { uint32_t cfg_multi_ring_type; uint32_t cfg_poll; uint32_t cfg_poll_tmo; + uint32_t cfg_task_mgmt_tmo; uint32_t cfg_use_msi; uint32_t cfg_fcp_imax; uint32_t cfg_fcp_cpu_map; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 16498e030c70..00656fc92b93 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1865,8 +1865,10 @@ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ { \ if (val >= minval && val <= maxval) {\ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ - "3053 lpfc_" #attr " changed from %d to %d\n", \ - vport->cfg_##attr, val); \ + "3053 lpfc_" #attr \ + " changed from %d (x%x) to %d (x%x)\n", \ + vport->cfg_##attr, vport->cfg_##attr, \ + val, val); \ vport->cfg_##attr = val;\ return 0;\ }\ @@ -4011,8 +4013,11 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); # For [0], FCP commands are issued to Work Queues ina round robin fashion. # For [1], FCP commands are issued to a Work Queue associated with the # current CPU. +# It would be set to 1 by the driver if it's able to set up cpu affinity +# for FCP I/Os through Work Queue associated with the current CPU. Otherwise, +# roundrobin scheduling of FCP I/Os through WQs will be used. */ -LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for " +LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for " "issuing commands [0] - Round Robin, [1] - Current CPU"); /* @@ -4110,6 +4115,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, "Milliseconds driver will wait between polling FCP ring"); /* +# lpfc_task_mgmt_tmo: Maximum time to wait for task management commands +# to complete in seconds. Value range is [5,180], default value is 60. +*/ +LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180, + "Maximum time to wait for task management commands to complete"); +/* # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that # support this feature # 0 = MSI disabled @@ -4295,6 +4306,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_issue_reset, &dev_attr_lpfc_poll, &dev_attr_lpfc_poll_tmo, + &dev_attr_lpfc_task_mgmt_tmo, &dev_attr_lpfc_use_msi, &dev_attr_lpfc_fcp_imax, &dev_attr_lpfc_fcp_cpu_map, @@ -5274,6 +5286,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_topology_init(phba, lpfc_topology); lpfc_link_speed_init(phba, lpfc_link_speed); lpfc_poll_tmo_init(phba, lpfc_poll_tmo); + lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo); lpfc_enable_npiv_init(phba, lpfc_enable_npiv); lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); lpfc_enable_rrq_init(phba, lpfc_enable_rrq); diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 79c13c3263f1..82134d20e2d8 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -317,6 +317,11 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, } spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + /* Close the timeout handler abort window */ + spin_lock_irqsave(&phba->hbalock, flags); + cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; + spin_unlock_irqrestore(&phba->hbalock, flags); + iocb = &dd_data->context_un.iocb; ndlp = iocb->ndlp; rmp = iocb->rmp; @@ -387,6 +392,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) int request_nseg; int reply_nseg; struct bsg_job_data *dd_data; + unsigned long flags; uint32_t creg_val; int rc = 0; int iocb_stat; @@ -501,14 +507,24 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) } iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); - if (iocb_stat == IOCB_SUCCESS) + + if (iocb_stat == IOCB_SUCCESS) { + spin_lock_irqsave(&phba->hbalock, flags); + /* make sure the I/O had not been completed yet */ + if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { + /* open up abort window to timeout handler */ + cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; + } + spin_unlock_irqrestore(&phba->hbalock, flags); return 0; /* done for now */ - else if (iocb_stat == IOCB_BUSY) + } else if (iocb_stat == IOCB_BUSY) { rc = -EAGAIN; - else + } else { rc = -EIO; + } /* iocb failed so cleanup */ + job->dd_data = NULL; free_rmp: lpfc_free_bsg_buffers(phba, rmp); @@ -577,6 +593,11 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, } spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + /* Close the timeout handler abort window */ + spin_lock_irqsave(&phba->hbalock, flags); + cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; + spin_unlock_irqrestore(&phba->hbalock, flags); + rsp = &rspiocbq->iocb; pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; prsp = (struct lpfc_dmabuf *)pcmd->list.next; @@ -639,6 +660,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) struct lpfc_iocbq *cmdiocbq; uint16_t rpi = 0; struct bsg_job_data *dd_data; + unsigned long flags; uint32_t creg_val; int rc = 0; @@ -721,15 +743,25 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); - if (rc == IOCB_SUCCESS) + if (rc == IOCB_SUCCESS) { + spin_lock_irqsave(&phba->hbalock, flags); + /* make sure the I/O had not been completed/released */ + if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { + /* open up abort window to timeout handler */ + cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; + } + spin_unlock_irqrestore(&phba->hbalock, flags); return 0; /* done for now */ - else if (rc == IOCB_BUSY) + } else if (rc == IOCB_BUSY) { rc = -EAGAIN; - else + } else { rc = -EIO; + } -linkdown_err: + /* iocb failed so cleanup */ + job->dd_data = NULL; +linkdown_err: cmdiocbq->context1 = ndlp; lpfc_els_free_iocb(phba, cmdiocbq); @@ -1249,7 +1281,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job) struct lpfc_hba *phba = vport->phba; struct get_ct_event *event_req; struct get_ct_event_reply *event_reply; - struct lpfc_bsg_event *evt; + struct lpfc_bsg_event *evt, *evt_next; struct event_data *evt_dat = NULL; unsigned long flags; uint32_t rc = 0; @@ -1269,7 +1301,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job) event_reply = (struct get_ct_event_reply *) job->reply->reply_data.vendor_reply.vendor_rsp; spin_lock_irqsave(&phba->ct_ev_lock, flags); - list_for_each_entry(evt, &phba->ct_ev_waiters, node) { + list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) { if (evt->reg_id == event_req->ev_reg_id) { if (list_empty(&evt->events_to_get)) break; @@ -1370,6 +1402,11 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, } spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + /* Close the timeout handler abort window */ + spin_lock_irqsave(&phba->hbalock, flags); + cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; + spin_unlock_irqrestore(&phba->hbalock, flags); + ndlp = dd_data->context_un.iocb.ndlp; cmp = cmdiocbq->context2; bmp = cmdiocbq->context3; @@ -1433,6 +1470,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, int rc = 0; struct lpfc_nodelist *ndlp = NULL; struct bsg_job_data *dd_data; + unsigned long flags; uint32_t creg_val; /* allocate our bsg tracking structure */ @@ -1542,8 +1580,19 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); - if (rc == IOCB_SUCCESS) + if (rc == IOCB_SUCCESS) { + spin_lock_irqsave(&phba->hbalock, flags); + /* make sure the I/O had not been completed/released */ + if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) { + /* open up abort window to timeout handler */ + ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; + } + spin_unlock_irqrestore(&phba->hbalock, flags); return 0; /* done for now */ + } + + /* iocb failed so cleanup */ + job->dd_data = NULL; issue_ct_rsp_exit: lpfc_sli_release_iocbq(phba, ctiocb); @@ -2580,7 +2629,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, rspiocbq, (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); - if (iocb_stat) { + if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) { ret_val = -EIO; goto err_get_xri_exit; } @@ -3155,8 +3204,9 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) rspiocbq, (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); - if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) && - (rsp->ulpStatus != IOCB_SUCCESS))) { + if ((iocb_stat != IOCB_SUCCESS) || + ((phba->sli_rev < LPFC_SLI_REV4) && + (rsp->ulpStatus != IOSTAT_SUCCESS))) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "3126 Failed loopback test issue iocb: " "iocb_stat:x%x\n", iocb_stat); @@ -5284,9 +5334,15 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) * remove it from the txq queue and call cancel iocbs. * Otherwise, call abort iotag */ - cmdiocb = dd_data->context_un.iocb.cmdiocbq; - spin_lock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + spin_lock_irqsave(&phba->hbalock, flags); + /* make sure the I/O abort window is still open */ + if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) { + spin_unlock_irqrestore(&phba->hbalock, flags); + return -EAGAIN; + } list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, list) { if (check_iocb == cmdiocb) { @@ -5296,8 +5352,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) } if (list_empty(&completions)) lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); - spin_unlock_irq(&phba->hbalock); - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + spin_unlock_irqrestore(&phba->hbalock, flags); if (!list_empty(&completions)) { lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, @@ -5321,9 +5376,10 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) * remove it from the txq queue and call cancel iocbs. * Otherwise, call abort iotag. */ - cmdiocb = dd_data->context_un.menlo.cmdiocbq; - spin_lock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + spin_lock_irqsave(&phba->hbalock, flags); list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, list) { if (check_iocb == cmdiocb) { @@ -5333,8 +5389,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) } if (list_empty(&completions)) lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); - spin_unlock_irq(&phba->hbalock); - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + spin_unlock_irqrestore(&phba->hbalock, flags); if (!list_empty(&completions)) { lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 02e8cd923d0a..da61d8dc0449 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -280,7 +280,7 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb) buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3; lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); kfree(buf_ptr); - ctiocb->context1 = NULL; + ctiocb->context3 = NULL; } lpfc_sli_release_iocbq(phba, ctiocb); return 0; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 60d6ca2f68c2..883ea2d9f237 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -4171,8 +4171,6 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, NLP_INT_NODE_ACT(ndlp); atomic_set(&ndlp->cmd_pending, 0); ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; - if (vport->phba->sli_rev == LPFC_SLI_REV4) - ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); } struct lpfc_nodelist * @@ -4217,6 +4215,9 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_initialize_node(vport, ndlp, did); spin_unlock_irqrestore(&phba->ndlp_lock, flags); + if (vport->phba->sli_rev == LPFC_SLI_REV4) + ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); + if (state != NLP_STE_UNUSED_NODE) lpfc_nlp_set_state(vport, ndlp, state); @@ -4437,6 +4438,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (!ndlp) return; lpfc_issue_els_logo(vport, ndlp, 0); + mempool_free(pmb, phba->mbox_mem_pool); } /* @@ -4456,7 +4458,15 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) int rc; uint16_t rpi; - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (ndlp->nlp_flag & NLP_RPI_REGISTERED || + ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { + if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "3366 RPI x%x needs to be " + "unregistered nlp_flag x%x " + "did x%x\n", + ndlp->nlp_rpi, ndlp->nlp_flag, + ndlp->nlp_DID); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { /* SLI4 ports require the physical rpi value. */ @@ -5608,6 +5618,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_initialize_node(vport, ndlp, did); INIT_LIST_HEAD(&ndlp->nlp_listp); + if (vport->phba->sli_rev == LPFC_SLI_REV4) + ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, "node init: did:x%x", diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 086c3f28caa6..5464b116d328 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -3439,7 +3439,8 @@ struct els_request64_wqe { #define els_req64_hopcnt_SHIFT 24 #define els_req64_hopcnt_MASK 0x000000ff #define els_req64_hopcnt_WORD word13 - uint32_t reserved[2]; + uint32_t word14; + uint32_t max_response_payload_len; }; struct xmit_els_rsp64_wqe { @@ -3554,7 +3555,8 @@ struct gen_req64_wqe { uint32_t relative_offset; struct wqe_rctl_dfctl wge_ctl; /* word 5 */ struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; + uint32_t rsvd_12_14[3]; + uint32_t max_response_payload_len; }; struct create_xri_wqe { @@ -3584,7 +3586,13 @@ struct abort_cmd_wqe { struct fcp_iwrite64_wqe { struct ulp_bde64 bde; - uint32_t payload_offset_len; + uint32_t word3; +#define cmd_buff_len_SHIFT 16 +#define cmd_buff_len_MASK 0x00000ffff +#define cmd_buff_len_WORD word3 +#define payload_offset_len_SHIFT 0 +#define payload_offset_len_MASK 0x0000ffff +#define payload_offset_len_WORD word3 uint32_t total_xfer_len; uint32_t initial_xfer_len; struct wqe_common wqe_com; /* words 6-11 */ @@ -3594,7 +3602,13 @@ struct fcp_iwrite64_wqe { struct fcp_iread64_wqe { struct ulp_bde64 bde; - uint32_t payload_offset_len; /* word 3 */ + uint32_t word3; +#define cmd_buff_len_SHIFT 16 +#define cmd_buff_len_MASK 0x00000ffff +#define cmd_buff_len_WORD word3 +#define payload_offset_len_SHIFT 0 +#define payload_offset_len_MASK 0x0000ffff +#define payload_offset_len_WORD word3 uint32_t total_xfer_len; /* word 4 */ uint32_t rsrvd5; /* word 5 */ struct wqe_common wqe_com; /* words 6-11 */ @@ -3604,7 +3618,13 @@ struct fcp_iread64_wqe { struct fcp_icmnd64_wqe { struct ulp_bde64 bde; /* words 0-2 */ - uint32_t rsrvd3; /* word 3 */ + uint32_t word3; +#define cmd_buff_len_SHIFT 16 +#define cmd_buff_len_MASK 0x00000ffff +#define cmd_buff_len_WORD word3 +#define payload_offset_len_SHIFT 0 +#define payload_offset_len_MASK 0x0000ffff +#define payload_offset_len_WORD word3 uint32_t rsrvd4; /* word 4 */ uint32_t rsrvd5; /* word 5 */ struct wqe_common wqe_com; /* words 6-11 */ diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index c1fca8df6355..68c94cc85c35 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -3031,10 +3031,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) phba->sli4_hba.scsi_xri_max); spin_lock_irq(&phba->scsi_buf_list_get_lock); - spin_lock_irq(&phba->scsi_buf_list_put_lock); + spin_lock(&phba->scsi_buf_list_put_lock); list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); - spin_unlock_irq(&phba->scsi_buf_list_put_lock); + spin_unlock(&phba->scsi_buf_list_put_lock); spin_unlock_irq(&phba->scsi_buf_list_get_lock); if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { @@ -3070,10 +3070,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; } spin_lock_irq(&phba->scsi_buf_list_get_lock); - spin_lock_irq(&phba->scsi_buf_list_put_lock); + spin_lock(&phba->scsi_buf_list_put_lock); list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); - spin_unlock_irq(&phba->scsi_buf_list_put_lock); + spin_unlock(&phba->scsi_buf_list_put_lock); spin_unlock_irq(&phba->scsi_buf_list_get_lock); return 0; @@ -4545,7 +4545,7 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba) pci_save_state(pdev); /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ - if (pci_find_capability(pdev, PCI_CAP_ID_EXP)) + if (pci_is_pcie(pdev)) pdev->needs_freset = 1; return 0; @@ -4857,6 +4857,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) struct lpfc_mqe *mqe; int longs; + /* Get all the module params for configuring this host */ + lpfc_get_cfgparam(phba); + /* Before proceed, wait for POST done and device ready */ rc = lpfc_sli4_post_status_check(phba); if (rc) @@ -4900,15 +4903,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) sizeof(struct lpfc_mbox_ext_buf_ctx)); INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); - /* - * We need to do a READ_CONFIG mailbox command here before - * calling lpfc_get_cfgparam. For VFs this will report the - * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. - * All of the resources allocated - * for this Port are tied to these values. - */ - /* Get all the module params for configuring this host */ - lpfc_get_cfgparam(phba); phba->max_vpi = LPFC_MAX_VPI; /* This will be set to correct value after the read_config mbox */ @@ -7139,19 +7133,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) phba->sli4_hba.fcp_wq = NULL; } - if (phba->pci_bar0_memmap_p) { - iounmap(phba->pci_bar0_memmap_p); - phba->pci_bar0_memmap_p = NULL; - } - if (phba->pci_bar2_memmap_p) { - iounmap(phba->pci_bar2_memmap_p); - phba->pci_bar2_memmap_p = NULL; - } - if (phba->pci_bar4_memmap_p) { - iounmap(phba->pci_bar4_memmap_p); - phba->pci_bar4_memmap_p = NULL; - } - /* Release FCP CQ mapping array */ if (phba->sli4_hba.fcp_cq_map != NULL) { kfree(phba->sli4_hba.fcp_cq_map); @@ -7940,9 +7921,9 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) * particular PCI BARs regions is dependent on the type of * SLI4 device. */ - if (pci_resource_start(pdev, 0)) { - phba->pci_bar0_map = pci_resource_start(pdev, 0); - bar0map_len = pci_resource_len(pdev, 0); + if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { + phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); + bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); /* * Map SLI4 PCI Config Space Register base to a kernel virtual @@ -7956,6 +7937,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) "registers.\n"); goto out; } + phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; /* Set up BAR0 PCI config space register memory map */ lpfc_sli4_bar0_register_memmap(phba, if_type); } else { @@ -7978,13 +7960,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) } if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && - (pci_resource_start(pdev, 2))) { + (pci_resource_start(pdev, PCI_64BIT_BAR2))) { /* * Map SLI4 if type 0 HBA Control Register base to a kernel * virtual address and setup the registers. */ - phba->pci_bar1_map = pci_resource_start(pdev, 2); - bar1map_len = pci_resource_len(pdev, 2); + phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); + bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); phba->sli4_hba.ctrl_regs_memmap_p = ioremap(phba->pci_bar1_map, bar1map_len); if (!phba->sli4_hba.ctrl_regs_memmap_p) { @@ -7992,17 +7974,18 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) "ioremap failed for SLI4 HBA control registers.\n"); goto out_iounmap_conf; } + phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; lpfc_sli4_bar1_register_memmap(phba); } if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && - (pci_resource_start(pdev, 4))) { + (pci_resource_start(pdev, PCI_64BIT_BAR4))) { /* * Map SLI4 if type 0 HBA Doorbell Register base to a kernel * virtual address and setup the registers. */ - phba->pci_bar2_map = pci_resource_start(pdev, 4); - bar2map_len = pci_resource_len(pdev, 4); + phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); + bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); phba->sli4_hba.drbl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); if (!phba->sli4_hba.drbl_regs_memmap_p) { @@ -8010,6 +7993,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) "ioremap failed for SLI4 HBA doorbell registers.\n"); goto out_iounmap_ctrl; } + phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); if (error) goto out_iounmap_all; @@ -8403,7 +8387,8 @@ static int lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) { int i, idx, saved_chann, used_chann, cpu, phys_id; - int max_phys_id, num_io_channel, first_cpu; + int max_phys_id, min_phys_id; + int num_io_channel, first_cpu, chan; struct lpfc_vector_map_info *cpup; #ifdef CONFIG_X86 struct cpuinfo_x86 *cpuinfo; @@ -8421,6 +8406,7 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) phba->sli4_hba.num_present_cpu)); max_phys_id = 0; + min_phys_id = 0xff; phys_id = 0; num_io_channel = 0; first_cpu = LPFC_VECTOR_MAP_EMPTY; @@ -8444,9 +8430,12 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) if (cpup->phys_id > max_phys_id) max_phys_id = cpup->phys_id; + if (cpup->phys_id < min_phys_id) + min_phys_id = cpup->phys_id; cpup++; } + phys_id = min_phys_id; /* Now associate the HBA vectors with specific CPUs */ for (idx = 0; idx < vectors; idx++) { cpup = phba->sli4_hba.cpu_map; @@ -8457,13 +8446,25 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) for (i = 1; i < max_phys_id; i++) { phys_id++; if (phys_id > max_phys_id) - phys_id = 0; + phys_id = min_phys_id; cpu = lpfc_find_next_cpu(phba, phys_id); if (cpu == LPFC_VECTOR_MAP_EMPTY) continue; goto found; } + /* Use round robin for scheduling */ + phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN; + chan = 0; + cpup = phba->sli4_hba.cpu_map; + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + cpup->channel_id = chan; + cpup++; + chan++; + if (chan >= phba->cfg_fcp_io_channel) + chan = 0; + } + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3329 Cannot set affinity:" "Error mapping vector %d (%d)\n", @@ -8501,7 +8502,7 @@ found: /* Spread vector mapping across multple physical CPU nodes */ phys_id++; if (phys_id > max_phys_id) - phys_id = 0; + phys_id = min_phys_id; } /* @@ -8511,7 +8512,7 @@ found: * Base the remaining IO channel assigned, to IO channels already * assigned to other CPUs on the same phys_id. */ - for (i = 0; i <= max_phys_id; i++) { + for (i = min_phys_id; i <= max_phys_id; i++) { /* * If there are no io channels already mapped to * this phys_id, just round robin thru the io_channels. @@ -8593,10 +8594,11 @@ out: if (num_io_channel != phba->sli4_hba.num_present_cpu) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3333 Set affinity mismatch:" - "%d chann != %d cpus: %d vactors\n", + "%d chann != %d cpus: %d vectors\n", num_io_channel, phba->sli4_hba.num_present_cpu, vectors); + /* Enable using cpu affinity for scheduling */ phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; return 1; } @@ -8687,9 +8689,12 @@ enable_msix_vectors: cfg_fail_out: /* free the irq already requested */ - for (--index; index >= 0; index--) + for (--index; index >= 0; index--) { + irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. + vector, NULL); free_irq(phba->sli4_hba.msix_entries[index].vector, &phba->sli4_hba.fcp_eq_hdl[index]); + } msi_fail_out: /* Unconfigure MSI-X capability structure */ @@ -8710,9 +8715,12 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba) int index; /* Free up MSI-X multi-message vectors */ - for (index = 0; index < phba->cfg_fcp_io_channel; index++) + for (index = 0; index < phba->cfg_fcp_io_channel; index++) { + irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. + vector, NULL); free_irq(phba->sli4_hba.msix_entries[index].vector, &phba->sli4_hba.fcp_eq_hdl[index]); + } /* Disable MSI-X */ pci_disable_msix(phba->pcidev); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 1242b6c4308b..b2ede05a5f0a 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -926,10 +926,10 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) /* get all SCSI buffers need to repost to a local list */ spin_lock_irq(&phba->scsi_buf_list_get_lock); - spin_lock_irq(&phba->scsi_buf_list_put_lock); + spin_lock(&phba->scsi_buf_list_put_lock); list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist); list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist); - spin_unlock_irq(&phba->scsi_buf_list_put_lock); + spin_unlock(&phba->scsi_buf_list_put_lock); spin_unlock_irq(&phba->scsi_buf_list_get_lock); /* post the list of scsi buffer sgls to port if available */ @@ -1000,29 +1000,37 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) } memset(psb->data, 0, phba->cfg_sg_dma_buf_size); - /* Page alignment is CRITICAL, double check to be sure */ - if (((unsigned long)(psb->data) & - (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) { + /* + * 4K Page alignment is CRITICAL to BlockGuard, double check + * to be sure. + */ + if (phba->cfg_enable_bg && (((unsigned long)(psb->data) & + (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data, psb->dma_handle); kfree(psb); break; } - /* Allocate iotag for psb->cur_iocbq. */ - iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); - if (iotag == 0) { + + lxri = lpfc_sli4_next_xritag(phba); + if (lxri == NO_XRI) { pci_pool_free(phba->lpfc_scsi_dma_buf_pool, - psb->data, psb->dma_handle); + psb->data, psb->dma_handle); kfree(psb); break; } - lxri = lpfc_sli4_next_xritag(phba); - if (lxri == NO_XRI) { + /* Allocate iotag for psb->cur_iocbq. */ + iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); + if (iotag == 0) { pci_pool_free(phba->lpfc_scsi_dma_buf_pool, - psb->data, psb->dma_handle); + psb->data, psb->dma_handle); kfree(psb); + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "3368 Failed to allocated IOTAG for" + " XRI:0x%x\n", lxri); + lpfc_sli4_free_xri(phba, lxri); break; } psb->cur_iocbq.sli4_lxritag = lxri; @@ -1134,22 +1142,21 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { struct lpfc_scsi_buf * lpfc_cmd = NULL; struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; - unsigned long gflag = 0; - unsigned long pflag = 0; + unsigned long iflag = 0; - spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); + spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf, list); if (!lpfc_cmd) { - spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); + spin_lock(&phba->scsi_buf_list_put_lock); list_splice(&phba->lpfc_scsi_buf_list_put, &phba->lpfc_scsi_buf_list_get); INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf, list); - spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); + spin_unlock(&phba->scsi_buf_list_put_lock); } - spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); + spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); return lpfc_cmd; } /** @@ -1167,11 +1174,10 @@ static struct lpfc_scsi_buf* lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next; - unsigned long gflag = 0; - unsigned long pflag = 0; + unsigned long iflag = 0; int found = 0; - spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); + spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, &phba->lpfc_scsi_buf_list_get, list) { if (lpfc_test_rrq_active(phba, ndlp, @@ -1182,11 +1188,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) break; } if (!found) { - spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); + spin_lock(&phba->scsi_buf_list_put_lock); list_splice(&phba->lpfc_scsi_buf_list_put, &phba->lpfc_scsi_buf_list_get); INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); - spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); + spin_unlock(&phba->scsi_buf_list_put_lock); list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, &phba->lpfc_scsi_buf_list_get, list) { if (lpfc_test_rrq_active( @@ -1197,7 +1203,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) break; } } - spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); + spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); if (!found) return NULL; return lpfc_cmd; @@ -3966,11 +3972,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, /* * Check SLI validation that all the transfer was actually done - * (fcpi_parm should be zero). + * (fcpi_parm should be zero). Apply check only to reads. */ - } else if (fcpi_parm) { + } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, - "9029 FCP Data Transfer Check Error: " + "9029 FCP Read Check Error Data: " "x%x x%x x%x x%x x%x\n", be32_to_cpu(fcpcmd->fcpDl), be32_to_cpu(fcprsp->rspResId), @@ -4342,6 +4348,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, char tag[2]; uint8_t *ptr; bool sli4; + uint32_t fcpdl; if (!pnode || !NLP_CHK_NODE_ACT(pnode)) return; @@ -4389,8 +4396,12 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, iocb_cmd->ulpPU = PARM_READ_CHECK; if (vport->cfg_first_burst_size && (pnode->nlp_flag & NLP_FIRSTBURST)) { - piocbq->iocb.un.fcpi.fcpi_XRdy = - vport->cfg_first_burst_size; + fcpdl = scsi_bufflen(scsi_cmnd); + if (fcpdl < vport->cfg_first_burst_size) + piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl; + else + piocbq->iocb.un.fcpi.fcpi_XRdy = + vport->cfg_first_burst_size; } fcp_cmnd->fcpCntl3 = WRITE_DATA; phba->fc4OutputRequests++; @@ -4479,9 +4490,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, piocb->ulpContext = vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; } - if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { - piocb->ulpFCP2Rcvy = 1; - } + piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0; piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); /* ulpTimeout is only one byte */ @@ -4878,6 +4887,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) goto out_unlock; } + /* Indicate the IO is being aborted by the driver. */ + iocb->iocb_flag |= LPFC_DRIVER_ABORTED; + /* * The scsi command can not be in txq and it is in flight because the * pCmd is still pointig at the SCSI command we have to abort. There @@ -4972,6 +4984,73 @@ lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) } } + +/** + * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. + * + * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded + * + * Return code : + * 0x2003 - Error + * 0x2002 - Success + **/ +static int +lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd) +{ + struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; + uint32_t rsp_info; + uint32_t rsp_len; + uint8_t rsp_info_code; + int ret = FAILED; + + + if (fcprsp == NULL) + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0703 fcp_rsp is missing\n"); + else { + rsp_info = fcprsp->rspStatus2; + rsp_len = be32_to_cpu(fcprsp->rspRspLen); + rsp_info_code = fcprsp->rspInfo3; + + + lpfc_printf_vlog(vport, KERN_INFO, + LOG_FCP, + "0706 fcp_rsp valid 0x%x," + " rsp len=%d code 0x%x\n", + rsp_info, + rsp_len, rsp_info_code); + + if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) { + switch (rsp_info_code) { + case RSP_NO_FAILURE: + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0715 Task Mgmt No Failure\n"); + ret = SUCCESS; + break; + case RSP_TM_NOT_SUPPORTED: /* TM rejected */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0716 Task Mgmt Target " + "reject\n"); + break; + case RSP_TM_NOT_COMPLETED: /* TM failed */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0717 Task Mgmt Target " + "failed TM\n"); + break; + case RSP_TM_INVALID_LU: /* TM to invalid LU! */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0718 Task Mgmt to invalid " + "LUN\n"); + break; + } + } + } + return ret; +} + + /** * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler * @vport: The virtual port for which this call is being executed. @@ -5006,7 +5085,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode); if (lpfc_cmd == NULL) return FAILED; - lpfc_cmd->timeout = 60; + lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; lpfc_cmd->rdata = rdata; status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, @@ -5033,12 +5112,8 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, iocbq, iocbqrsp, lpfc_cmd->timeout); - if (status != IOCB_SUCCESS) { - if (status == IOCB_TIMEDOUT) { - ret = TIMEOUT_ERROR; - } else - ret = FAILED; - lpfc_cmd->status = IOSTAT_DRIVER_REJECT; + if ((status != IOCB_SUCCESS) || + (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0727 TMF %s to TGT %d LUN %d failed (%d, %d) " "iocb_flag x%x\n", @@ -5046,9 +5121,21 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, iocbqrsp->iocb.un.ulpWord[4], iocbq->iocb_flag); - } else if (status == IOCB_BUSY) - ret = FAILED; - else + /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ + if (status == IOCB_SUCCESS) { + if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) + /* Something in the FCP_RSP was invalid. + * Check conditions */ + ret = lpfc_check_fcp_rsp(vport, lpfc_cmd); + else + ret = FAILED; + } else if (status == IOCB_TIMEDOUT) { + ret = TIMEOUT_ERROR; + } else { + ret = FAILED; + } + lpfc_cmd->status = IOSTAT_DRIVER_REJECT; + } else ret = SUCCESS; lpfc_sli_release_iocbq(phba, iocbqrsp); @@ -5172,7 +5259,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) unsigned tgt_id = cmnd->device->id; unsigned int lun_id = cmnd->device->lun; struct lpfc_scsi_event_header scsi_event; - int status, ret = SUCCESS; + int status; if (!rdata) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, @@ -5213,9 +5300,11 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) * So, continue on. * We will report success if all the i/o aborts successfully. */ - ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, + if (status == SUCCESS) + status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, LPFC_CTX_LUN); - return ret; + + return status; } /** @@ -5239,7 +5328,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) unsigned tgt_id = cmnd->device->id; unsigned int lun_id = cmnd->device->lun; struct lpfc_scsi_event_header scsi_event; - int status, ret = SUCCESS; + int status; if (!rdata) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, @@ -5280,9 +5369,10 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) * So, continue on. * We will report success if all the i/o aborts successfully. */ - ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, + if (status == SUCCESS) + status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, LPFC_CTX_TGT); - return ret; + return status; } /** diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index b1d9f7fcb911..852ff7def493 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -73,6 +73,7 @@ struct fcp_rsp { #define RSP_RO_MISMATCH_ERR 0x03 #define RSP_TM_NOT_SUPPORTED 0x04 /* Task mgmt function not supported */ #define RSP_TM_NOT_COMPLETED 0x05 /* Task mgmt function not performed */ +#define RSP_TM_INVALID_LU 0x09 /* Task mgmt function to invalid LU */ uint32_t rspInfoRsvd; /* FCP_RSP_INFO bytes 4-7 (reserved) */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 0392e114531c..8f580fda443f 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -71,6 +71,8 @@ static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *, int); static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *, uint32_t); +static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); +static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); static IOCB_t * lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) @@ -6566,6 +6568,108 @@ lpfc_mbox_timeout(unsigned long ptr) return; } +/** + * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions + * are pending + * @phba: Pointer to HBA context object. + * + * This function checks if any mailbox completions are present on the mailbox + * completion queue. + **/ +bool +lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) +{ + + uint32_t idx; + struct lpfc_queue *mcq; + struct lpfc_mcqe *mcqe; + bool pending_completions = false; + + if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) + return false; + + /* Check for completions on mailbox completion queue */ + + mcq = phba->sli4_hba.mbx_cq; + idx = mcq->hba_index; + while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) { + mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; + if (bf_get_le32(lpfc_trailer_completed, mcqe) && + (!bf_get_le32(lpfc_trailer_async, mcqe))) { + pending_completions = true; + break; + } + idx = (idx + 1) % mcq->entry_count; + if (mcq->hba_index == idx) + break; + } + return pending_completions; + +} + +/** + * lpfc_sli4_process_missed_mbox_completions - process mbox completions + * that were missed. + * @phba: Pointer to HBA context object. + * + * For sli4, it is possible to miss an interrupt. As such mbox completions + * maybe missed causing erroneous mailbox timeouts to occur. This function + * checks to see if mbox completions are on the mailbox completion queue + * and will process all the completions associated with the eq for the + * mailbox completion queue. + **/ +bool +lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) +{ + + uint32_t eqidx; + struct lpfc_queue *fpeq = NULL; + struct lpfc_eqe *eqe; + bool mbox_pending; + + if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) + return false; + + /* Find the eq associated with the mcq */ + + if (phba->sli4_hba.hba_eq) + for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++) + if (phba->sli4_hba.hba_eq[eqidx]->queue_id == + phba->sli4_hba.mbx_cq->assoc_qid) { + fpeq = phba->sli4_hba.hba_eq[eqidx]; + break; + } + if (!fpeq) + return false; + + /* Turn off interrupts from this EQ */ + + lpfc_sli4_eq_clr_intr(fpeq); + + /* Check to see if a mbox completion is pending */ + + mbox_pending = lpfc_sli4_mbox_completions_pending(phba); + + /* + * If a mbox completion is pending, process all the events on EQ + * associated with the mbox completion queue (this could include + * mailbox commands, async events, els commands, receive queue data + * and fcp commands) + */ + + if (mbox_pending) + while ((eqe = lpfc_sli4_eq_get(fpeq))) { + lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx); + fpeq->EQ_processed++; + } + + /* Always clear and re-arm the EQ */ + + lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); + + return mbox_pending; + +} /** * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout @@ -6583,6 +6687,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; + /* If the mailbox completed, process the completion and return */ + if (lpfc_sli4_process_missed_mbox_completions(phba)) + return; + /* Check the pmbox pointer first. There is a race condition * between the mbox timeout handler getting executed in the * worklist and the mailbox actually completing. When this @@ -7077,6 +7185,10 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 1000) + jiffies; spin_unlock_irq(&phba->hbalock); + /* Make sure the mailbox is really active */ + if (timeout) + lpfc_sli4_process_missed_mbox_completions(phba); + /* Wait for the outstnading mailbox command to complete */ while (phba->sli.mbox_active) { /* Check active mailbox complete status every 2ms */ @@ -8076,6 +8188,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); + wqe->els_req.max_response_payload_len = total_len - xmit_len; break; case CMD_XMIT_SEQUENCE64_CX: bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, @@ -8120,8 +8233,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, command_type = FCP_COMMAND_DATA_OUT; /* word3 iocb=iotag wqe=payload_offset_len */ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ - wqe->fcp_iwrite.payload_offset_len = - xmit_len + sizeof(struct fcp_rsp); + bf_set(payload_offset_len, &wqe->fcp_iwrite, + xmit_len + sizeof(struct fcp_rsp)); + bf_set(cmd_buff_len, &wqe->fcp_iwrite, + 0); /* word4 iocb=parameter wqe=total_xfer_length memcpy */ /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, @@ -8139,8 +8254,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, case CMD_FCP_IREAD64_CR: /* word3 iocb=iotag wqe=payload_offset_len */ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ - wqe->fcp_iread.payload_offset_len = - xmit_len + sizeof(struct fcp_rsp); + bf_set(payload_offset_len, &wqe->fcp_iread, + xmit_len + sizeof(struct fcp_rsp)); + bf_set(cmd_buff_len, &wqe->fcp_iread, + 0); /* word4 iocb=parameter wqe=total_xfer_length memcpy */ /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, @@ -8156,8 +8273,13 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); break; case CMD_FCP_ICMND64_CR: + /* word3 iocb=iotag wqe=payload_offset_len */ + /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ + bf_set(payload_offset_len, &wqe->fcp_icmd, + xmit_len + sizeof(struct fcp_rsp)); + bf_set(cmd_buff_len, &wqe->fcp_icmd, + 0); /* word3 iocb=IO_TAG wqe=reserved */ - wqe->fcp_icmd.rsrvd3 = 0; bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); /* Always open the exchange */ bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); @@ -8203,6 +8325,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); + wqe->gen_req.max_response_payload_len = total_len - xmit_len; command_type = OTHER_COMMAND; break; case CMD_XMIT_ELS_RSP64_CX: @@ -9831,6 +9954,13 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, abort_cmd) != 0) continue; + /* + * If the iocbq is already being aborted, don't take a second + * action, but do count it. + */ + if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) + continue; + /* issue ABTS for this IOCB based on iotag */ abtsiocb = lpfc_sli_get_iocbq(phba); if (abtsiocb == NULL) { @@ -9838,6 +9968,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, continue; } + /* indicate the IO is being aborted by the driver. */ + iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; + cmd = &iocbq->iocb; abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; @@ -9847,7 +9980,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; abtsiocb->iocb.ulpLe = 1; abtsiocb->iocb.ulpClass = cmd->ulpClass; - abtsiocb->vport = phba->pport; + abtsiocb->vport = vport; /* ABTS WQE must go to the same WQ as the WQE to be aborted */ abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; @@ -10063,6 +10196,11 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, if (iocb_completed) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "0331 IOCB wake signaled\n"); + /* Note: we are not indicating if the IOCB has a success + * status or not - that's for the caller to check. + * IOCB_SUCCESS means just that the command was sent and + * completed. Not that it completed successfully. + * */ } else if (timeleft == 0) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0338 IOCB wait timeout error - no " @@ -11064,8 +11202,11 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbOut, struct lpfc_wcqe_complete *wcqe) { + int numBdes, i; unsigned long iflags; - uint32_t status; + uint32_t status, max_response; + struct lpfc_dmabuf *dmabuf; + struct ulp_bde64 *bpl, bde; size_t offset = offsetof(struct lpfc_iocbq, iocb); memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, @@ -11082,7 +11223,36 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; else { pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; - pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed; + switch (pIocbOut->iocb.ulpCommand) { + case CMD_ELS_REQUEST64_CR: + dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; + bpl = (struct ulp_bde64 *)dmabuf->virt; + bde.tus.w = le32_to_cpu(bpl[1].tus.w); + max_response = bde.tus.f.bdeSize; + break; + case CMD_GEN_REQUEST64_CR: + max_response = 0; + if (!pIocbOut->context3) + break; + numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ + sizeof(struct ulp_bde64); + dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; + bpl = (struct ulp_bde64 *)dmabuf->virt; + for (i = 0; i < numBdes; i++) { + bde.tus.w = le32_to_cpu(bpl[i].tus.w); + if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) + max_response += bde.tus.f.bdeSize; + } + break; + default: + max_response = wcqe->total_data_placed; + break; + } + if (max_response < wcqe->total_data_placed) + pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; + else + pIocbIn->iocb.un.genreq64.bdl.bdeSize = + wcqe->total_data_placed; } /* Convert BG errors for completion status */ @@ -12233,7 +12403,6 @@ static void __iomem * lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) { struct pci_dev *pdev; - unsigned long bar_map, bar_map_len; if (!phba->pcidev) return NULL; @@ -12242,25 +12411,10 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) switch (pci_barset) { case WQ_PCI_BAR_0_AND_1: - if (!phba->pci_bar0_memmap_p) { - bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0); - bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); - phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len); - } return phba->pci_bar0_memmap_p; case WQ_PCI_BAR_2_AND_3: - if (!phba->pci_bar2_memmap_p) { - bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2); - bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); - phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len); - } return phba->pci_bar2_memmap_p; case WQ_PCI_BAR_4_AND_5: - if (!phba->pci_bar4_memmap_p) { - bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4); - bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); - phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len); - } return phba->pci_bar4_memmap_p; default: break; @@ -15104,6 +15258,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) uint16_t max_rpi, rpi_limit; uint16_t rpi_remaining, lrpi = 0; struct lpfc_rpi_hdr *rpi_hdr; + unsigned long iflag; max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; rpi_limit = phba->sli4_hba.next_rpi; @@ -15112,7 +15267,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) * Fetch the next logical rpi. Because this index is logical, * the driver starts at 0 each time. */ - spin_lock_irq(&phba->hbalock); + spin_lock_irqsave(&phba->hbalock, iflag); rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); if (rpi >= rpi_limit) rpi = LPFC_RPI_ALLOC_ERROR; @@ -15128,7 +15283,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) */ if ((rpi == LPFC_RPI_ALLOC_ERROR) && (phba->sli4_hba.rpi_count >= max_rpi)) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflag); return rpi; } @@ -15137,7 +15292,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) * extents. */ if (!phba->sli4_hba.rpi_hdrs_in_use) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflag); return rpi; } @@ -15148,7 +15303,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) * how many are supported max by the device. */ rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflag); if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); if (!rpi_hdr) { @@ -15808,7 +15963,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) { - struct lpfc_fcf_pri *fcf_pri; + struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2762 FCF (x%x) reached driver's book " @@ -15818,7 +15973,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) } /* Clear the eligible FCF record index bmask */ spin_lock_irq(&phba->hbalock); - list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { + list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, + list) { if (fcf_pri->fcf_rec.fcf_index == fcf_index) { list_del_init(&fcf_pri->list); break; diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 97617996206d..6b0f2478706e 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -58,7 +58,7 @@ struct lpfc_iocbq { IOCB_t iocb; /* IOCB cmd */ uint8_t retry; /* retry counter for IOCB cmd - if needed */ - uint16_t iocb_flag; + uint32_t iocb_flag; #define LPFC_IO_LIBDFC 1 /* libdfc iocb */ #define LPFC_IO_WAKE 2 /* Synchronous I/O completed */ #define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */ @@ -73,11 +73,11 @@ struct lpfc_iocbq { #define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */ #define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */ #define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */ +#define LPFC_IO_CMD_OUTSTANDING 0x2000 /* timeout handler abort window */ #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ #define LPFC_FIP_ELS_ID_SHIFT 14 - uint8_t rsvd2; uint32_t drvrTimeout; /* driver timeout in seconds */ uint32_t fcp_wqidx; /* index to FCP work queue */ struct lpfc_vport *vport;/* virtual port pointer */ diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 5bcc38223ac9..298c8cd1a89d 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -523,7 +523,7 @@ struct lpfc_sli4_hba { struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ - uint8_t fw_func_mode; /* FW function protocol mode */ + uint32_t fw_func_mode; /* FW function protocol mode */ uint32_t ulp0_mode; /* ULP0 protocol mode */ uint32_t ulp1_mode; /* ULP1 protocol mode */ @@ -673,6 +673,7 @@ void lpfc_sli4_queue_unset(struct lpfc_hba *); int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); +void lpfc_sli4_free_xri(struct lpfc_hba *, int); int lpfc_sli4_post_async_mbox(struct lpfc_hba *); int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int); struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 21859d2006ce..e3094c4e143b 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.41" +#define LPFC_DRIVER_VERSION "8.3.43" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 90c95a3385d1..816db12ef5d5 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -4244,6 +4244,7 @@ static struct scsi_host_template megaraid_template = { .eh_device_reset_handler = megaraid_reset, .eh_bus_reset_handler = megaraid_reset, .eh_host_reset_handler = megaraid_reset, + .no_write_same = 1, }; static int diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index d1a4b82836ea..e2237a97cb9d 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -367,6 +367,7 @@ static struct scsi_host_template megaraid_template_g = { .eh_host_reset_handler = megaraid_reset_handler, .change_queue_depth = megaraid_change_queue_depth, .use_clustering = ENABLE_CLUSTERING, + .no_write_same = 1, .sdev_attrs = megaraid_sdev_attrs, .shost_attrs = megaraid_shost_attrs, }; diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 04a42a505852..e9e543c58485 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -33,9 +33,9 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "06.600.18.00-rc1" -#define MEGASAS_RELDATE "May. 15, 2013" -#define MEGASAS_EXT_VERSION "Wed. May. 15 17:00:00 PDT 2013" +#define MEGASAS_VERSION "06.700.06.00-rc1" +#define MEGASAS_RELDATE "Aug. 31, 2013" +#define MEGASAS_EXT_VERSION "Sat. Aug. 31 17:00:00 PDT 2013" /* * Device IDs @@ -170,6 +170,7 @@ #define MR_DCMD_CTRL_GET_INFO 0x01010000 #define MR_DCMD_LD_GET_LIST 0x03010000 +#define MR_DCMD_LD_LIST_QUERY 0x03010100 #define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000 #define MR_FLUSH_CTRL_CACHE 0x01 @@ -345,6 +346,15 @@ enum MR_PD_QUERY_TYPE { MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, }; +enum MR_LD_QUERY_TYPE { + MR_LD_QUERY_TYPE_ALL = 0, + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST = 1, + MR_LD_QUERY_TYPE_USED_TGT_IDS = 2, + MR_LD_QUERY_TYPE_CLUSTER_ACCESS = 3, + MR_LD_QUERY_TYPE_CLUSTER_LOCALE = 4, +}; + + #define MR_EVT_CFG_CLEARED 0x0004 #define MR_EVT_LD_STATE_CHANGE 0x0051 #define MR_EVT_PD_INSERTED 0x005b @@ -435,6 +445,14 @@ struct MR_LD_LIST { } ldList[MAX_LOGICAL_DRIVES]; } __packed; +struct MR_LD_TARGETID_LIST { + u32 size; + u32 count; + u8 pad[3]; + u8 targetId[MAX_LOGICAL_DRIVES]; +}; + + /* * SAS controller properties */ @@ -474,21 +492,39 @@ struct megasas_ctrl_prop { * a bit in the following structure. */ struct { - u32 copyBackDisabled : 1; - u32 SMARTerEnabled : 1; - u32 prCorrectUnconfiguredAreas : 1; - u32 useFdeOnly : 1; - u32 disableNCQ : 1; - u32 SSDSMARTerEnabled : 1; - u32 SSDPatrolReadEnabled : 1; - u32 enableSpinDownUnconfigured : 1; - u32 autoEnhancedImport : 1; - u32 enableSecretKeyControl : 1; - u32 disableOnlineCtrlReset : 1; - u32 allowBootWithPinnedCache : 1; - u32 disableSpinDownHS : 1; - u32 enableJBOD : 1; - u32 reserved :18; +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved:18; + u32 enableJBOD:1; + u32 disableSpinDownHS:1; + u32 allowBootWithPinnedCache:1; + u32 disableOnlineCtrlReset:1; + u32 enableSecretKeyControl:1; + u32 autoEnhancedImport:1; + u32 enableSpinDownUnconfigured:1; + u32 SSDPatrolReadEnabled:1; + u32 SSDSMARTerEnabled:1; + u32 disableNCQ:1; + u32 useFdeOnly:1; + u32 prCorrectUnconfiguredAreas:1; + u32 SMARTerEnabled:1; + u32 copyBackDisabled:1; +#else + u32 copyBackDisabled:1; + u32 SMARTerEnabled:1; + u32 prCorrectUnconfiguredAreas:1; + u32 useFdeOnly:1; + u32 disableNCQ:1; + u32 SSDSMARTerEnabled:1; + u32 SSDPatrolReadEnabled:1; + u32 enableSpinDownUnconfigured:1; + u32 autoEnhancedImport:1; + u32 enableSecretKeyControl:1; + u32 disableOnlineCtrlReset:1; + u32 allowBootWithPinnedCache:1; + u32 disableSpinDownHS:1; + u32 enableJBOD:1; + u32 reserved:18; +#endif } OnOffProperties; u8 autoSnapVDSpace; u8 viewSpace; @@ -802,6 +838,30 @@ struct megasas_ctrl_info { u16 cacheMemorySize; /*7A2h */ struct { /*7A4h */ +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved:11; + u32 supportUnevenSpans:1; + u32 dedicatedHotSparesLimited:1; + u32 headlessMode:1; + u32 supportEmulatedDrives:1; + u32 supportResetNow:1; + u32 realTimeScheduler:1; + u32 supportSSDPatrolRead:1; + u32 supportPerfTuning:1; + u32 disableOnlinePFKChange:1; + u32 supportJBOD:1; + u32 supportBootTimePFKChange:1; + u32 supportSetLinkSpeed:1; + u32 supportEmergencySpares:1; + u32 supportSuspendResumeBGops:1; + u32 blockSSDWriteCacheChange:1; + u32 supportShieldState:1; + u32 supportLdBBMInfo:1; + u32 supportLdPIType3:1; + u32 supportLdPIType2:1; + u32 supportLdPIType1:1; + u32 supportPIcontroller:1; +#else u32 supportPIcontroller:1; u32 supportLdPIType1:1; u32 supportLdPIType2:1; @@ -827,6 +887,7 @@ struct megasas_ctrl_info { u32 supportUnevenSpans:1; u32 reserved:11; +#endif } adapterOperations2; u8 driverVersion[32]; /*7A8h */ @@ -863,7 +924,7 @@ struct megasas_ctrl_info { * =============================== */ #define MEGASAS_MAX_PD_CHANNELS 2 -#define MEGASAS_MAX_LD_CHANNELS 2 +#define MEGASAS_MAX_LD_CHANNELS 1 #define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \ MEGASAS_MAX_LD_CHANNELS) #define MEGASAS_MAX_DEV_PER_CHANNEL 128 @@ -1051,9 +1112,15 @@ union megasas_sgl_frame { typedef union _MFI_CAPABILITIES { struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved:30; + u32 support_additional_msix:1; + u32 support_fp_remote_lun:1; +#else u32 support_fp_remote_lun:1; u32 support_additional_msix:1; u32 reserved:30; +#endif } mfi_capabilities; u32 reg; } MFI_CAPABILITIES; @@ -1464,6 +1531,7 @@ struct megasas_instance { struct megasas_register_set __iomem *reg_set; u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; + struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD]; u8 ld_ids[MEGASAS_MAX_LD_IDS]; s8 init_id; @@ -1656,4 +1724,16 @@ struct megasas_mgmt_info { int max_index; }; +u8 +MR_BuildRaidContext(struct megasas_instance *instance, + struct IO_REQUEST_INFO *io_info, + struct RAID_CONTEXT *pRAID_Context, + struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN); +u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); +struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map); +u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map); +u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map); +u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map); +u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map); + #endif /*LSI_MEGARAID_SAS_H */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index fdd13ef8124f..c99812bf2a73 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -18,7 +18,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * FILE: megaraid_sas_base.c - * Version : 06.600.18.00-rc1 + * Version : 06.700.06.00-rc1 * * Authors: LSI Corporation * Sreenivas Bagalkote @@ -92,6 +92,8 @@ MODULE_DESCRIPTION("LSI MegaRAID SAS Driver"); int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); static int megasas_get_pd_list(struct megasas_instance *instance); +static int megasas_ld_list_query(struct megasas_instance *instance, + u8 query_type); static int megasas_issue_init_mfi(struct megasas_instance *instance); static int megasas_register_aen(struct megasas_instance *instance, u32 seq_num, u32 class_locale_word); @@ -374,13 +376,11 @@ static int megasas_check_reset_xscale(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { - u32 consumer; - consumer = *instance->consumer; if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) && - (*instance->consumer == MEGASAS_ADPRESET_INPROG_SIGN)) { + (le32_to_cpu(*instance->consumer) == + MEGASAS_ADPRESET_INPROG_SIGN)) return 1; - } return 0; } @@ -629,9 +629,10 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance, { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); - writel(0, &(regs)->inbound_high_queue_port); - writel((frame_phys_addr | (frame_count<<1))|1, - &(regs)->inbound_low_queue_port); + writel(upper_32_bits(frame_phys_addr), + &(regs)->inbound_high_queue_port); + writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, + &(regs)->inbound_low_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } @@ -879,8 +880,8 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) struct megasas_header *frame_hdr = &cmd->frame->hdr; - frame_hdr->cmd_status = 0xFF; - frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; + frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE; + frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); /* * Issue the frame using inbound queue port @@ -944,10 +945,12 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, */ abort_fr->cmd = MFI_CMD_ABORT; abort_fr->cmd_status = 0xFF; - abort_fr->flags = 0; - abort_fr->abort_context = cmd_to_abort->index; - abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; - abort_fr->abort_mfi_phys_addr_hi = 0; + abort_fr->flags = cpu_to_le16(0); + abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); + abort_fr->abort_mfi_phys_addr_lo = + cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); + abort_fr->abort_mfi_phys_addr_hi = + cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); cmd->sync_cmd = 1; cmd->cmd_status = 0xFF; @@ -986,8 +989,8 @@ megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { - mfi_sgl->sge32[i].length = sg_dma_len(os_sgl); - mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl); + mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); + mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); } } return sge_count; @@ -1015,8 +1018,8 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { - mfi_sgl->sge64[i].length = sg_dma_len(os_sgl); - mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl); + mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); + mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); } } return sge_count; @@ -1043,10 +1046,11 @@ megasas_make_sgl_skinny(struct megasas_instance *instance, if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { - mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl); + mfi_sgl->sge_skinny[i].length = + cpu_to_le32(sg_dma_len(os_sgl)); mfi_sgl->sge_skinny[i].phys_addr = - sg_dma_address(os_sgl); - mfi_sgl->sge_skinny[i].flag = 0; + cpu_to_le64(sg_dma_address(os_sgl)); + mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); } } return sge_count; @@ -1155,8 +1159,8 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, pthru->cdb_len = scp->cmd_len; pthru->timeout = 0; pthru->pad_0 = 0; - pthru->flags = flags; - pthru->data_xfer_len = scsi_bufflen(scp); + pthru->flags = cpu_to_le16(flags); + pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); @@ -1168,18 +1172,18 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, if ((scp->request->timeout / HZ) > 0xFFFF) pthru->timeout = 0xFFFF; else - pthru->timeout = scp->request->timeout / HZ; + pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); } /* * Construct SGL */ if (instance->flag_ieee == 1) { - pthru->flags |= MFI_FRAME_SGL64; + pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); pthru->sge_count = megasas_make_sgl_skinny(instance, scp, &pthru->sgl); } else if (IS_DMA64) { - pthru->flags |= MFI_FRAME_SGL64; + pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); pthru->sge_count = megasas_make_sgl64(instance, scp, &pthru->sgl); } else @@ -1196,8 +1200,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, * Sense info specific */ pthru->sense_len = SCSI_SENSE_BUFFERSIZE; - pthru->sense_buf_phys_addr_hi = 0; - pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; + pthru->sense_buf_phys_addr_hi = + cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); + pthru->sense_buf_phys_addr_lo = + cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); /* * Compute the total number of frames this command consumes. FW uses @@ -1248,7 +1254,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, ldio->timeout = 0; ldio->reserved_0 = 0; ldio->pad_0 = 0; - ldio->flags = flags; + ldio->flags = cpu_to_le16(flags); ldio->start_lba_hi = 0; ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; @@ -1256,52 +1262,59 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, * 6-byte READ(0x08) or WRITE(0x0A) cdb */ if (scp->cmd_len == 6) { - ldio->lba_count = (u32) scp->cmnd[4]; - ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) | - ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; + ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); + ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | + ((u32) scp->cmnd[2] << 8) | + (u32) scp->cmnd[3]); - ldio->start_lba_lo &= 0x1FFFFF; + ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); } /* * 10-byte READ(0x28) or WRITE(0x2A) cdb */ else if (scp->cmd_len == 10) { - ldio->lba_count = (u32) scp->cmnd[8] | - ((u32) scp->cmnd[7] << 8); - ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | - ((u32) scp->cmnd[3] << 16) | - ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; + ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | + ((u32) scp->cmnd[7] << 8)); + ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | + (u32) scp->cmnd[5]); } /* * 12-byte READ(0xA8) or WRITE(0xAA) cdb */ else if (scp->cmd_len == 12) { - ldio->lba_count = ((u32) scp->cmnd[6] << 24) | - ((u32) scp->cmnd[7] << 16) | - ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; + ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | + ((u32) scp->cmnd[7] << 16) | + ((u32) scp->cmnd[8] << 8) | + (u32) scp->cmnd[9]); - ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | - ((u32) scp->cmnd[3] << 16) | - ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; + ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | + (u32) scp->cmnd[5]); } /* * 16-byte READ(0x88) or WRITE(0x8A) cdb */ else if (scp->cmd_len == 16) { - ldio->lba_count = ((u32) scp->cmnd[10] << 24) | - ((u32) scp->cmnd[11] << 16) | - ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; + ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | + ((u32) scp->cmnd[11] << 16) | + ((u32) scp->cmnd[12] << 8) | + (u32) scp->cmnd[13]); - ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) | - ((u32) scp->cmnd[7] << 16) | - ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; + ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | + ((u32) scp->cmnd[7] << 16) | + ((u32) scp->cmnd[8] << 8) | + (u32) scp->cmnd[9]); - ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) | - ((u32) scp->cmnd[3] << 16) | - ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; + ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | + (u32) scp->cmnd[5]); } @@ -1309,11 +1322,11 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, * Construct SGL */ if (instance->flag_ieee) { - ldio->flags |= MFI_FRAME_SGL64; + ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); ldio->sge_count = megasas_make_sgl_skinny(instance, scp, &ldio->sgl); } else if (IS_DMA64) { - ldio->flags |= MFI_FRAME_SGL64; + ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); } else ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); @@ -1329,7 +1342,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, */ ldio->sense_len = SCSI_SENSE_BUFFERSIZE; ldio->sense_buf_phys_addr_hi = 0; - ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr; + ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); /* * Compute the total number of frames this command consumes. FW uses @@ -1400,20 +1413,32 @@ megasas_dump_pending_frames(struct megasas_instance *instance) ldio = (struct megasas_io_frame *)cmd->frame; mfi_sgl = &ldio->sgl; sgcount = ldio->sge_count; - printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no, cmd->frame_count,ldio->cmd,ldio->target_id, ldio->start_lba_lo,ldio->start_lba_hi,ldio->sense_buf_phys_addr_lo,sgcount); + printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," + " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", + instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, + le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), + le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); } else { pthru = (struct megasas_pthru_frame *) cmd->frame; mfi_sgl = &pthru->sgl; sgcount = pthru->sge_count; - printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no,cmd->frame_count,pthru->cmd,pthru->target_id,pthru->lun,pthru->cdb_len , pthru->data_xfer_len,pthru->sense_buf_phys_addr_lo,sgcount); + printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " + "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", + instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, + pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), + le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); } if(megasas_dbg_lvl & MEGASAS_DBG_LVL){ for (n = 0; n < sgcount; n++){ if (IS_DMA64) - printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%08lx ",mfi_sgl->sge64[n].length , (unsigned long)mfi_sgl->sge64[n].phys_addr) ; + printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%llx ", + le32_to_cpu(mfi_sgl->sge64[n].length), + le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); else - printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",mfi_sgl->sge32[n].length , mfi_sgl->sge32[n].phys_addr) ; + printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ", + le32_to_cpu(mfi_sgl->sge32[n].length), + le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); } } printk(KERN_ERR "\n"); @@ -1674,11 +1699,11 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr) spin_lock_irqsave(&instance->completion_lock, flags); - producer = *instance->producer; - consumer = *instance->consumer; + producer = le32_to_cpu(*instance->producer); + consumer = le32_to_cpu(*instance->consumer); while (consumer != producer) { - context = instance->reply_queue[consumer]; + context = le32_to_cpu(instance->reply_queue[consumer]); if (context >= instance->max_fw_cmds) { printk(KERN_ERR "Unexpected context value %x\n", context); @@ -1695,7 +1720,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr) } } - *instance->consumer = producer; + *instance->consumer = cpu_to_le32(producer); spin_unlock_irqrestore(&instance->completion_lock, flags); @@ -1716,7 +1741,7 @@ void megasas_do_ocr(struct megasas_instance *instance) if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { - *instance->consumer = MEGASAS_ADPRESET_INPROG_SIGN; + *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); } instance->instancet->disable_intr(instance); instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; @@ -2123,6 +2148,7 @@ static struct scsi_host_template megasas_template = { .bios_param = megasas_bios_param, .use_clustering = ENABLE_CLUSTERING, .change_queue_depth = megasas_change_queue_depth, + .no_write_same = 1, }; /** @@ -2186,6 +2212,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, struct megasas_header *hdr = &cmd->frame->hdr; unsigned long flags; struct fusion_context *fusion = instance->ctrl_context; + u32 opcode; /* flag for the retry reset */ cmd->retry_for_fw_reset = 0; @@ -2287,9 +2314,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, case MFI_CMD_SMP: case MFI_CMD_STP: case MFI_CMD_DCMD: + opcode = le32_to_cpu(cmd->frame->dcmd.opcode); /* Check for LD map update */ - if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && - (cmd->frame->dcmd.mbox.b[1] == 1)) { + if ((opcode == MR_DCMD_LD_MAP_GET_INFO) + && (cmd->frame->dcmd.mbox.b[1] == 1)) { fusion->fast_path_io = 0; spin_lock_irqsave(instance->host->host_lock, flags); if (cmd->frame->hdr.cmd_status != 0) { @@ -2323,8 +2351,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, flags); break; } - if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || - cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { + if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || + opcode == MR_DCMD_CTRL_EVENT_GET) { spin_lock_irqsave(&poll_aen_lock, flags); megasas_poll_wait_aen = 0; spin_unlock_irqrestore(&poll_aen_lock, flags); @@ -2333,7 +2361,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, /* * See if got an event notification */ - if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) + if (opcode == MR_DCMD_CTRL_EVENT_WAIT) megasas_service_aen(instance, cmd); else megasas_complete_int_cmd(instance, cmd); @@ -2606,7 +2634,7 @@ megasas_deplete_reply_queue(struct megasas_instance *instance, PCI_DEVICE_ID_LSI_VERDE_ZCR)) { *instance->consumer = - MEGASAS_ADPRESET_INPROG_SIGN; + cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); } @@ -2983,7 +3011,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) } memset(cmd->frame, 0, total_sz); - cmd->frame->io.context = cmd->index; + cmd->frame->io.context = cpu_to_le32(cmd->index); cmd->frame->io.pad_0 = 0; if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) && @@ -3143,13 +3171,13 @@ megasas_get_pd_list(struct megasas_instance *instance) dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); - dcmd->opcode = MR_DCMD_PD_LIST_QUERY; - dcmd->sgl.sge32[0].phys_addr = ci_h; - dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); + dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); + dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); if (!megasas_issue_polled(instance, cmd)) { ret = 0; @@ -3164,22 +3192,24 @@ megasas_get_pd_list(struct megasas_instance *instance) pd_addr = ci->addr; if ( ret == 0 && - (ci->count < + (le32_to_cpu(ci->count) < (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) { - memset(instance->pd_list, 0, + memset(instance->local_pd_list, 0, MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); - for (pd_index = 0; pd_index < ci->count; pd_index++) { + for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { - instance->pd_list[pd_addr->deviceId].tid = - pd_addr->deviceId; - instance->pd_list[pd_addr->deviceId].driveType = + instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = + le16_to_cpu(pd_addr->deviceId); + instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = pd_addr->scsiDevType; - instance->pd_list[pd_addr->deviceId].driveState = + instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = MR_PD_STATE_SYSTEM; pd_addr++; } + memcpy(instance->pd_list, instance->local_pd_list, + sizeof(instance->pd_list)); } pci_free_consistent(instance->pdev, @@ -3207,6 +3237,7 @@ megasas_get_ld_list(struct megasas_instance *instance) struct megasas_dcmd_frame *dcmd; struct MR_LD_LIST *ci; dma_addr_t ci_h = 0; + u32 ld_count; cmd = megasas_get_cmd(instance); @@ -3233,12 +3264,12 @@ megasas_get_ld_list(struct megasas_instance *instance) dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; - dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); - dcmd->opcode = MR_DCMD_LD_GET_LIST; - dcmd->sgl.sge32[0].phys_addr = ci_h; - dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST)); dcmd->pad_0 = 0; if (!megasas_issue_polled(instance, cmd)) { @@ -3247,12 +3278,14 @@ megasas_get_ld_list(struct megasas_instance *instance) ret = -1; } + ld_count = le32_to_cpu(ci->ldCount); + /* the following function will get the instance PD LIST */ - if ((ret == 0) && (ci->ldCount <= MAX_LOGICAL_DRIVES)) { + if ((ret == 0) && (ld_count <= MAX_LOGICAL_DRIVES)) { memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); - for (ld_index = 0; ld_index < ci->ldCount; ld_index++) { + for (ld_index = 0; ld_index < ld_count; ld_index++) { if (ci->ldList[ld_index].state != 0) { ids = ci->ldList[ld_index].ref.targetId; instance->ld_ids[ids] = @@ -3271,6 +3304,87 @@ megasas_get_ld_list(struct megasas_instance *instance) } /** + * megasas_ld_list_query - Returns FW's ld_list structure + * @instance: Adapter soft state + * @ld_list: ld_list structure + * + * Issues an internal command (DCMD) to get the FW's controller PD + * list structure. This information is mainly used to find out SYSTEM + * supported by the FW. + */ +static int +megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) +{ + int ret = 0, ld_index = 0, ids = 0; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct MR_LD_TARGETID_LIST *ci; + dma_addr_t ci_h = 0; + u32 tgtid_count; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + printk(KERN_WARNING + "megasas:(megasas_ld_list_query): Failed to get cmd\n"); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + ci = pci_alloc_consistent(instance->pdev, + sizeof(struct MR_LD_TARGETID_LIST), &ci_h); + + if (!ci) { + printk(KERN_WARNING + "megasas: Failed to alloc mem for ld_list_query\n"); + megasas_return_cmd(instance, cmd); + return -ENOMEM; + } + + memset(ci, 0, sizeof(*ci)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->mbox.b[0] = query_type; + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); + dcmd->timeout = 0; + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); + dcmd->pad_0 = 0; + + if (!megasas_issue_polled(instance, cmd) && !dcmd->cmd_status) { + ret = 0; + } else { + /* On failure, call older LD list DCMD */ + ret = 1; + } + + tgtid_count = le32_to_cpu(ci->count); + + if ((ret == 0) && (tgtid_count <= (MAX_LOGICAL_DRIVES))) { + memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); + for (ld_index = 0; ld_index < tgtid_count; ld_index++) { + ids = ci->targetId[ld_index]; + instance->ld_ids[ids] = ci->targetId[ld_index]; + } + + } + + pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST), + ci, ci_h); + + megasas_return_cmd(instance, cmd); + + return ret; +} + +/** * megasas_get_controller_info - Returns FW's controller structure * @instance: Adapter soft state * @ctrl_info: Controller information structure @@ -3313,13 +3427,13 @@ megasas_get_ctrl_info(struct megasas_instance *instance, dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info); - dcmd->opcode = MR_DCMD_CTRL_GET_INFO; - dcmd->sgl.sge32[0].phys_addr = ci_h; - dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info); + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info)); if (!megasas_issue_polled(instance, cmd)) { ret = 0; @@ -3375,17 +3489,20 @@ megasas_issue_init_mfi(struct megasas_instance *instance) memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); init_frame->context = context; - initq_info->reply_queue_entries = instance->max_fw_cmds + 1; - initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h; + initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); + initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); - initq_info->producer_index_phys_addr_lo = instance->producer_h; - initq_info->consumer_index_phys_addr_lo = instance->consumer_h; + initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); + initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); init_frame->cmd = MFI_CMD_INIT; init_frame->cmd_status = 0xFF; - init_frame->queue_info_new_phys_addr_lo = initq_info_h; + init_frame->queue_info_new_phys_addr_lo = + cpu_to_le32(lower_32_bits(initq_info_h)); + init_frame->queue_info_new_phys_addr_hi = + cpu_to_le32(upper_32_bits(initq_info_h)); - init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info); + init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); /* * disable the intr before firing the init frame to FW @@ -3648,7 +3765,9 @@ static int megasas_init_fw(struct megasas_instance *instance) megasas_get_pd_list(instance); memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); - megasas_get_ld_list(instance); + if (megasas_ld_list_query(instance, + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) + megasas_get_ld_list(instance); ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); @@ -3665,8 +3784,8 @@ static int megasas_init_fw(struct megasas_instance *instance) if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) { max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * - ctrl_info->max_strips_per_io; - max_sectors_2 = ctrl_info->max_request_size; + le16_to_cpu(ctrl_info->max_strips_per_io); + max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); @@ -3675,14 +3794,18 @@ static int megasas_init_fw(struct megasas_instance *instance) instance->is_imr = 0; dev_info(&instance->pdev->dev, "Controller type: MR," "Memory size is: %dMB\n", - ctrl_info->memory_size); + le16_to_cpu(ctrl_info->memory_size)); } else { instance->is_imr = 1; dev_info(&instance->pdev->dev, "Controller type: iMR\n"); } + /* OnOffProperties are converted into CPU arch*/ + le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); instance->disableOnlineCtrlReset = ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; + /* adapterOperations2 are converted into CPU arch*/ + le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); instance->UnevenSpanSupport = ctrl_info->adapterOperations2.supportUnevenSpans; if (instance->UnevenSpanSupport) { @@ -3696,7 +3819,6 @@ static int megasas_init_fw(struct megasas_instance *instance) } } - instance->max_sectors_per_req = instance->max_num_sge * PAGE_SIZE / 512; if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) @@ -3802,20 +3924,24 @@ megasas_get_seq_num(struct megasas_instance *instance, dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info); - dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; - dcmd->sgl.sge32[0].phys_addr = el_info_h; - dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info); + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info)); megasas_issue_blocked_cmd(instance, cmd); /* * Copy the data back into callers buffer */ - memcpy(eli, el_info, sizeof(struct megasas_evt_log_info)); + eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num); + eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num); + eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num); + eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num); + eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num); pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), el_info, el_info_h); @@ -3862,6 +3988,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, if (instance->aen_cmd) { prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; + prev_aen.members.locale = le16_to_cpu(prev_aen.members.locale); /* * A class whose enum value is smaller is inclusive of all @@ -3917,16 +4044,16 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); + dcmd->mbox.w[0] = cpu_to_le32(seq_num); instance->last_seq_num = seq_num; - dcmd->data_xfer_len = sizeof(struct megasas_evt_detail); - dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; - dcmd->mbox.w[0] = seq_num; - dcmd->mbox.w[1] = curr_aen.word; - dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h; - dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail); + dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail)); if (instance->aen_cmd != NULL) { megasas_return_cmd(instance, cmd); @@ -3972,8 +4099,9 @@ static int megasas_start_aen(struct megasas_instance *instance) class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; - return megasas_register_aen(instance, eli.newest_seq_num + 1, - class_locale.word); + return megasas_register_aen(instance, + eli.newest_seq_num + 1, + class_locale.word); } /** @@ -4068,6 +4196,7 @@ megasas_set_dma_mask(struct pci_dev *pdev) if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) goto fail_set_dma_mask; } + return 0; fail_set_dma_mask: @@ -4385,11 +4514,11 @@ static void megasas_flush_cache(struct megasas_instance *instance) dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; - dcmd->flags = MFI_FRAME_DIR_NONE; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; - dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; megasas_issue_blocked_cmd(instance, cmd); @@ -4430,11 +4559,11 @@ static void megasas_shutdown_controller(struct megasas_instance *instance, dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; - dcmd->flags = MFI_FRAME_DIR_NONE; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; - dcmd->opcode = opcode; + dcmd->opcode = cpu_to_le32(opcode); megasas_issue_blocked_cmd(instance, cmd); @@ -4845,10 +4974,11 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, * alone separately */ memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); - cmd->frame->hdr.context = cmd->index; + cmd->frame->hdr.context = cpu_to_le32(cmd->index); cmd->frame->hdr.pad_0 = 0; - cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 | - MFI_FRAME_SENSE64); + cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE | + MFI_FRAME_SGL64 | + MFI_FRAME_SENSE64)); /* * The management interface between applications and the fw uses @@ -4882,8 +5012,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, * We don't change the dma_coherent_mask, so * pci_alloc_consistent only returns 32bit addresses */ - kern_sge32[i].phys_addr = (u32) buf_handle; - kern_sge32[i].length = ioc->sgl[i].iov_len; + kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); + kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); /* * We created a kernel buffer corresponding to the @@ -4906,7 +5036,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, sense_ptr = (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); - *sense_ptr = sense_handle; + *sense_ptr = cpu_to_le32(sense_handle); } /* @@ -4966,9 +5096,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, for (i = 0; i < ioc->sge_count; i++) { if (kbuff_arr[i]) dma_free_coherent(&instance->pdev->dev, - kern_sge32[i].length, + le32_to_cpu(kern_sge32[i].length), kbuff_arr[i], - kern_sge32[i].phys_addr); + le32_to_cpu(kern_sge32[i].phys_addr)); } megasas_return_cmd(instance, cmd); @@ -5322,7 +5452,7 @@ megasas_aen_polling(struct work_struct *work) host = instance->host; if (instance->evt_detail) { - switch (instance->evt_detail->code) { + switch (le32_to_cpu(instance->evt_detail->code)) { case MR_EVT_PD_INSERTED: if (megasas_get_pd_list(instance) == 0) { for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { @@ -5384,7 +5514,9 @@ megasas_aen_polling(struct work_struct *work) case MR_EVT_LD_OFFLINE: case MR_EVT_CFG_CLEARED: case MR_EVT_LD_DELETED: - megasas_get_ld_list(instance); + if (megasas_ld_list_query(instance, + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) + megasas_get_ld_list(instance); for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; @@ -5394,7 +5526,7 @@ megasas_aen_polling(struct work_struct *work) (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, - i + MEGASAS_MAX_LD_CHANNELS, + MEGASAS_MAX_PD_CHANNELS + i, j, 0); @@ -5413,7 +5545,9 @@ megasas_aen_polling(struct work_struct *work) doscan = 0; break; case MR_EVT_LD_CREATED: - megasas_get_ld_list(instance); + if (megasas_ld_list_query(instance, + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) + megasas_get_ld_list(instance); for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; @@ -5422,14 +5556,14 @@ megasas_aen_polling(struct work_struct *work) (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, - i+MEGASAS_MAX_LD_CHANNELS, + MEGASAS_MAX_PD_CHANNELS + i, j, 0); if (instance->ld_ids[ld_index] != 0xff) { if (!sdev1) { scsi_add_device(host, - i + 2, + MEGASAS_MAX_PD_CHANNELS + i, j, 0); } } @@ -5478,18 +5612,20 @@ megasas_aen_polling(struct work_struct *work) } } - megasas_get_ld_list(instance); + if (megasas_ld_list_query(instance, + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) + megasas_get_ld_list(instance); for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, - i+MEGASAS_MAX_LD_CHANNELS, j, 0); + MEGASAS_MAX_PD_CHANNELS + i, j, 0); if (instance->ld_ids[ld_index] != 0xff) { if (!sdev1) { scsi_add_device(host, - i+2, + MEGASAS_MAX_PD_CHANNELS + i, j, 0); } else { scsi_device_put(sdev1); @@ -5509,7 +5645,7 @@ megasas_aen_polling(struct work_struct *work) return ; } - seq_num = instance->evt_detail->seq_num + 1; + seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; /* Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 4f401f753f8e..e24b6eb645b5 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -126,17 +126,17 @@ static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map) return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; } -static u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map) +u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map) { - return map->raidMap.arMapInfo[ar].pd[arm]; + return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]); } -static u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map) +u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map) { - return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef; + return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); } -static u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map) +u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map) { return map->raidMap.devHndlInfo[pd].curDevHdl; } @@ -148,7 +148,7 @@ u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map) u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map) { - return map->raidMap.ldTgtIdToLd[ldTgtId]; + return le16_to_cpu(map->raidMap.ldTgtIdToLd[ldTgtId]); } static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, @@ -167,18 +167,22 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info; PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap; + struct MR_LD_RAID *raid; + int ldCount, num_lds; + u16 ld; + - if (pFwRaidMap->totalSize != + if (le32_to_cpu(pFwRaidMap->totalSize) != (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) + - (sizeof(struct MR_LD_SPAN_MAP) *pFwRaidMap->ldCount))) { + (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) { printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n", (unsigned int)((sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP)) + (sizeof(struct MR_LD_SPAN_MAP) * - pFwRaidMap->ldCount))); + le32_to_cpu(pFwRaidMap->ldCount)))); printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize " ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP), - pFwRaidMap->totalSize); + le32_to_cpu(pFwRaidMap->totalSize)); return 0; } @@ -187,6 +191,15 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) mr_update_load_balance_params(map, lbInfo); + num_lds = le32_to_cpu(map->raidMap.ldCount); + + /*Convert Raid capability values to CPU arch */ + for (ldCount = 0; ldCount < num_lds; ldCount++) { + ld = MR_TargetIdToLdGet(ldCount, map); + raid = MR_LdRaidGet(ld, map); + le32_to_cpus((u32 *)&raid->capability); + } + return 1; } @@ -200,23 +213,20 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { - for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { + for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) { quad = &pSpanBlock->block_span_info.quad[j]; - if (quad->diff == 0) + if (le32_to_cpu(quad->diff) == 0) return SPAN_INVALID; - if (quad->logStart <= row && row <= quad->logEnd && - (mega_mod64(row-quad->logStart, quad->diff)) == 0) { + if (le64_to_cpu(quad->logStart) <= row && row <= + le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart), + le32_to_cpu(quad->diff))) == 0) { if (span_blk != NULL) { u64 blk, debugBlk; - blk = - mega_div64_32( - (row-quad->logStart), - quad->diff); + blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)); debugBlk = blk; - blk = (blk + quad->offsetInSpan) << - raid->stripeShift; + blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift; *span_blk = blk; } return span; @@ -257,8 +267,8 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo) for (span = 0; span < raid->spanDepth; span++) dev_dbg(&instance->pdev->dev, "Span=%x," " number of quads=%x\n", span, - map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements); + le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements)); for (element = 0; element < MAX_QUAD_DEPTH; element++) { span_set = &(ldSpanInfo[ld].span_set[element]); if (span_set->span_row_data_width == 0) @@ -286,22 +296,22 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo) (long unsigned int)span_set->data_strip_end); for (span = 0; span < raid->spanDepth; span++) { - if (map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements >= + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) >= element + 1) { quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span].block_span_info. quad[element]; dev_dbg(&instance->pdev->dev, "Span=%x," "Quad=%x, diff=%x\n", span, - element, quad->diff); + element, le32_to_cpu(quad->diff)); dev_dbg(&instance->pdev->dev, "offset_in_span=0x%08lx\n", - (long unsigned int)quad->offsetInSpan); + (long unsigned int)le64_to_cpu(quad->offsetInSpan)); dev_dbg(&instance->pdev->dev, "logical start=0x%08lx, end=0x%08lx\n", - (long unsigned int)quad->logStart, - (long unsigned int)quad->logEnd); + (long unsigned int)le64_to_cpu(quad->logStart), + (long unsigned int)le64_to_cpu(quad->logEnd)); } } } @@ -348,23 +358,23 @@ u32 mr_spanset_get_span_block(struct megasas_instance *instance, continue; for (span = 0; span < raid->spanDepth; span++) - if (map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements >= info+1) { + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) >= info+1) { quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span]. block_span_info.quad[info]; - if (quad->diff == 0) + if (le32_to_cpu(quad->diff == 0)) return SPAN_INVALID; - if (quad->logStart <= row && - row <= quad->logEnd && - (mega_mod64(row - quad->logStart, - quad->diff)) == 0) { + if (le64_to_cpu(quad->logStart) <= row && + row <= le64_to_cpu(quad->logEnd) && + (mega_mod64(row - le64_to_cpu(quad->logStart), + le32_to_cpu(quad->diff))) == 0) { if (span_blk != NULL) { u64 blk; blk = mega_div64_32 - ((row - quad->logStart), - quad->diff); - blk = (blk + quad->offsetInSpan) + ((row - le64_to_cpu(quad->logStart)), + le32_to_cpu(quad->diff)); + blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift; *span_blk = blk; } @@ -415,8 +425,8 @@ static u64 get_row_from_strip(struct megasas_instance *instance, span_set_Row = mega_div64_32(span_set_Strip, span_set->span_row_data_width) * span_set->diff; for (span = 0, span_offset = 0; span < raid->spanDepth; span++) - if (map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements >= info+1) { + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements >= info+1)) { if (strip_offset >= span_set->strip_offset[span]) span_offset++; @@ -480,18 +490,18 @@ static u64 get_strip_from_row(struct megasas_instance *instance, continue; for (span = 0; span < raid->spanDepth; span++) - if (map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements >= info+1) { + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) >= info+1) { quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span].block_span_info.quad[info]; - if (quad->logStart <= row && - row <= quad->logEnd && - mega_mod64((row - quad->logStart), - quad->diff) == 0) { + if (le64_to_cpu(quad->logStart) <= row && + row <= le64_to_cpu(quad->logEnd) && + mega_mod64((row - le64_to_cpu(quad->logStart)), + le32_to_cpu(quad->diff)) == 0) { strip = mega_div64_32 (((row - span_set->data_row_start) - - quad->logStart), - quad->diff); + - le64_to_cpu(quad->logStart)), + le32_to_cpu(quad->diff)); strip *= span_set->span_row_data_width; strip += span_set->data_strip_start; strip += span_set->strip_offset[span]; @@ -543,8 +553,8 @@ static u32 get_arm_from_strip(struct megasas_instance *instance, span_set->span_row_data_width); for (span = 0, span_offset = 0; span < raid->spanDepth; span++) - if (map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements >= info+1) { + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) >= info+1) { if (strip_offset >= span_set->strip_offset[span]) span_offset = @@ -669,7 +679,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, } } - *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; + *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; return retval; @@ -765,7 +775,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, } } - *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; + *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; return retval; @@ -784,7 +794,7 @@ u8 MR_BuildRaidContext(struct megasas_instance *instance, struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, - struct MR_FW_RAID_MAP_ALL *map) + struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN) { struct MR_LD_RAID *raid; u32 ld, stripSize, stripe_mask; @@ -965,7 +975,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, regSize += stripSize; } - pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; + pRAID_Context->timeoutValue = cpu_to_le16(map->raidMap.fpPdIoTimeoutSec); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) pRAID_Context->regLockFlags = (isRead) ? @@ -974,9 +984,12 @@ MR_BuildRaidContext(struct megasas_instance *instance, pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; pRAID_Context->VirtualDiskTgtId = raid->targetId; - pRAID_Context->regLockRowLBA = regStart; - pRAID_Context->regLockLength = regSize; + pRAID_Context->regLockRowLBA = cpu_to_le64(regStart); + pRAID_Context->regLockLength = cpu_to_le32(regSize); pRAID_Context->configSeqNum = raid->seqNum; + /* save pointer to raid->LUN array */ + *raidLUN = raid->LUN; + /*Get Phy Params only if FP capable, or else leave it to MR firmware to do the calculation.*/ @@ -1047,8 +1060,8 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, raid = MR_LdRaidGet(ld, map); for (element = 0; element < MAX_QUAD_DEPTH; element++) { for (span = 0; span < raid->spanDepth; span++) { - if (map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements < + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) < element + 1) continue; span_set = &(ldSpanInfo[ld].span_set[element]); @@ -1056,14 +1069,14 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, spanBlock[span].block_span_info. quad[element]; - span_set->diff = quad->diff; + span_set->diff = le32_to_cpu(quad->diff); for (count = 0, span_row_width = 0; count < raid->spanDepth; count++) { - if (map->raidMap.ldSpanMap[ld]. + if (le32_to_cpu(map->raidMap.ldSpanMap[ld]. spanBlock[count]. block_span_info. - noElements >= element + 1) { + noElements) >= element + 1) { span_set->strip_offset[count] = span_row_width; span_row_width += @@ -1077,9 +1090,9 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, } span_set->span_row_data_width = span_row_width; - span_row = mega_div64_32(((quad->logEnd - - quad->logStart) + quad->diff), - quad->diff); + span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) - + le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)), + le32_to_cpu(quad->diff)); if (element == 0) { span_set->log_start_lba = 0; @@ -1096,7 +1109,7 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, span_set->data_row_start = 0; span_set->data_row_end = - (span_row * quad->diff) - 1; + (span_row * le32_to_cpu(quad->diff)) - 1; } else { span_set_prev = &(ldSpanInfo[ld]. span_set[element - 1]); @@ -1122,7 +1135,7 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, span_set_prev->data_row_end + 1; span_set->data_row_end = span_set->data_row_start + - (span_row * quad->diff) - 1; + (span_row * le32_to_cpu(quad->diff)) - 1; } break; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 417d5f167aa2..f6555921fd7a 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -72,17 +72,6 @@ megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs); int megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd); - -u8 -MR_BuildRaidContext(struct megasas_instance *instance, - struct IO_REQUEST_INFO *io_info, - struct RAID_CONTEXT *pRAID_Context, - struct MR_FW_RAID_MAP_ALL *map); -u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); -struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map); - -u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map); - void megasas_check_and_restore_queue_depth(struct megasas_instance *instance); @@ -626,23 +615,20 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER; - IOCInitMessage->MsgVersion = MPI2_VERSION; - IOCInitMessage->HeaderVersion = MPI2_HEADER_VERSION; - IOCInitMessage->SystemRequestFrameSize = - MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; - - IOCInitMessage->ReplyDescriptorPostQueueDepth = fusion->reply_q_depth; - IOCInitMessage->ReplyDescriptorPostQueueAddress = - fusion->reply_frames_desc_phys; - IOCInitMessage->SystemRequestFrameBaseAddress = - fusion->io_request_frames_phys; + IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION); + IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); + IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4); + + IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth); + IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys); + IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys); IOCInitMessage->HostMSIxVectors = instance->msix_vectors; init_frame = (struct megasas_init_frame *)cmd->frame; memset(init_frame, 0, MEGAMFI_FRAME_SIZE); frame_hdr = &cmd->frame->hdr; frame_hdr->cmd_status = 0xFF; - frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; + frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); init_frame->cmd = MFI_CMD_INIT; init_frame->cmd_status = 0xFF; @@ -652,17 +638,24 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) init_frame->driver_operations. mfi_capabilities.support_additional_msix = 1; + /* driver supports HA / Remote LUN over Fast Path interface */ + init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun + = 1; + /* Convert capability to LE32 */ + cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); - init_frame->queue_info_new_phys_addr_lo = ioc_init_handle; - init_frame->data_xfer_len = sizeof(struct MPI2_IOC_INIT_REQUEST); + init_frame->queue_info_new_phys_addr_lo = cpu_to_le32((u32)ioc_init_handle); + init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc; - req_desc->Words = cmd->frame_phys_addr; + req_desc->Words = 0; req_desc->MFAIo.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + cpu_to_le32s((u32 *)&req_desc->MFAIo); + req_desc->Words |= cpu_to_le64(cmd->frame_phys_addr); /* * disable the intr before firing the init frame @@ -753,13 +746,13 @@ megasas_get_ld_map_info(struct megasas_instance *instance) dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = size_map_info; - dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; - dcmd->sgl.sge32[0].phys_addr = ci_h; - dcmd->sgl.sge32[0].length = size_map_info; + dcmd->data_xfer_len = cpu_to_le32(size_map_info); + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info); if (!megasas_issue_polled(instance, cmd)) ret = 0; @@ -828,7 +821,7 @@ megasas_sync_map_info(struct megasas_instance *instance) map = fusion->ld_map[instance->map_id & 1]; - num_lds = map->raidMap.ldCount; + num_lds = le32_to_cpu(map->raidMap.ldCount); dcmd = &cmd->frame->dcmd; @@ -856,15 +849,15 @@ megasas_sync_map_info(struct megasas_instance *instance) dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_WRITE; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = size_map_info; + dcmd->data_xfer_len = cpu_to_le32(size_map_info); dcmd->mbox.b[0] = num_lds; dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; - dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; - dcmd->sgl.sge32[0].phys_addr = ci_h; - dcmd->sgl.sge32[0].length = size_map_info; + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info); instance->map_update_cmd = cmd; @@ -1067,9 +1060,8 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance, spin_lock_irqsave(&instance->hba_lock, flags); - writel(req_desc_lo, - &(regs)->inbound_low_queue_port); - writel(req_desc_hi, &(regs)->inbound_high_queue_port); + writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port); + writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } @@ -1157,8 +1149,8 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, return sge_count; scsi_for_each_sg(scp, os_sgl, sge_count, i) { - sgl_ptr->Length = sg_dma_len(os_sgl); - sgl_ptr->Address = sg_dma_address(os_sgl); + sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); + sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); sgl_ptr->Flags = 0; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { @@ -1177,9 +1169,9 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, PCI_DEVICE_ID_LSI_INVADER) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { - if ((cmd->io_request->IoFlags & - MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != - MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) + if ((le16_to_cpu(cmd->io_request->IoFlags) & + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) cmd->io_request->ChainOffset = fusion-> chain_offset_io_request; @@ -1201,9 +1193,8 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); - sg_chain->Length = (sizeof(union MPI2_SGE_IO_UNION) - *(sge_count - sg_processed)); - sg_chain->Address = cmd->sg_frame_phys_addr; + sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed))); + sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr); sgl_ptr = (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; @@ -1261,7 +1252,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, io_request->CDB.EEDP32.PrimaryReferenceTag = cpu_to_be32(ref_tag); io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; - io_request->IoFlags = 32; /* Specify 32-byte cdb */ + io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */ /* Transfer length */ cdb[28] = (u8)((num_blocks >> 24) & 0xff); @@ -1271,19 +1262,19 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, /* set SCSI IO EEDPFlags */ if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) { - io_request->EEDPFlags = + io_request->EEDPFlags = cpu_to_le16( MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | - MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); } else { - io_request->EEDPFlags = + io_request->EEDPFlags = cpu_to_le16( MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | - MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; + MPI2_SCSIIO_EEDPFLAGS_INSERT_OP); } - io_request->Control |= (0x4 << 26); - io_request->EEDPBlockSize = scp->device->sector_size; + io_request->Control |= cpu_to_le32((0x4 << 26)); + io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size); } else { /* Some drives don't support 16/12 byte CDB's, convert to 10 */ if (((cdb_len == 12) || (cdb_len == 16)) && @@ -1311,7 +1302,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, cdb[8] = (u8)(num_blocks & 0xff); cdb[7] = (u8)((num_blocks >> 8) & 0xff); - io_request->IoFlags = 10; /* Specify 10-byte cdb */ + io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */ cdb_len = 10; } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { /* Convert to 16 byte CDB for large LBA's */ @@ -1349,7 +1340,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, cdb[11] = (u8)((num_blocks >> 16) & 0xff); cdb[10] = (u8)((num_blocks >> 24) & 0xff); - io_request->IoFlags = 16; /* Specify 16-byte cdb */ + io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */ cdb_len = 16; } @@ -1410,13 +1401,14 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, struct IO_REQUEST_INFO io_info; struct fusion_context *fusion; struct MR_FW_RAID_MAP_ALL *local_map_ptr; + u8 *raidLUN; device_id = MEGASAS_DEV_INDEX(instance, scp); fusion = instance->ctrl_context; io_request = cmd->io_request; - io_request->RaidContext.VirtualDiskTgtId = device_id; + io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); io_request->RaidContext.status = 0; io_request->RaidContext.exStatus = 0; @@ -1480,7 +1472,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; io_info.numBlocks = datalength; io_info.ldTgtId = device_id; - io_request->DataLength = scsi_bufflen(scp); + io_request->DataLength = cpu_to_le32(scsi_bufflen(scp)); if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) io_info.isRead = 1; @@ -1494,7 +1486,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, } else { if (MR_BuildRaidContext(instance, &io_info, &io_request->RaidContext, - local_map_ptr)) + local_map_ptr, &raidLUN)) fp_possible = io_info.fpOkForIo; } @@ -1520,8 +1512,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); io_request->RaidContext.Type = MPI2_TYPE_CUDA; io_request->RaidContext.nseg = 0x1; - io_request->IoFlags |= - MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; + io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); io_request->RaidContext.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE); @@ -1537,9 +1528,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; io_request->DevHandle = io_info.devHandle; + /* populate the LUN field */ + memcpy(io_request->LUN, raidLUN, 8); } else { io_request->RaidContext.timeoutValue = - local_map_ptr->raidMap.fpPdIoTimeoutSec; + cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); @@ -1557,7 +1550,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_request->RaidContext.nseg = 0x1; } io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; - io_request->DevHandle = device_id; + io_request->DevHandle = cpu_to_le16(device_id); } /* Not FP */ } @@ -1579,6 +1572,11 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, u16 pd_index = 0; struct MR_FW_RAID_MAP_ALL *local_map_ptr; struct fusion_context *fusion = instance->ctrl_context; + u8 span, physArm; + u16 devHandle; + u32 ld, arRef, pd; + struct MR_LD_RAID *raid; + struct RAID_CONTEXT *pRAID_Context; io_request = cmd->io_request; device_id = MEGASAS_DEV_INDEX(instance, scmd); @@ -1586,6 +1584,9 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, +scmd->device->id; local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; + io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); + + /* Check if this is a system PD I/O */ if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { @@ -1623,15 +1624,62 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, scmd->request->timeout / HZ; } } else { + if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS) + goto NonFastPath; + + ld = MR_TargetIdToLdGet(device_id, local_map_ptr); + if ((ld >= MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io)) + goto NonFastPath; + + raid = MR_LdRaidGet(ld, local_map_ptr); + + /* check if this LD is FP capable */ + if (!(raid->capability.fpNonRWCapable)) + /* not FP capable, send as non-FP */ + goto NonFastPath; + + /* get RAID_Context pointer */ + pRAID_Context = &io_request->RaidContext; + + /* set RAID context values */ + pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ; + pRAID_Context->timeoutValue = raid->fpIoTimeoutForLd; + pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); + pRAID_Context->regLockRowLBA = 0; + pRAID_Context->regLockLength = 0; + pRAID_Context->configSeqNum = raid->seqNum; + + /* get the DevHandle for the PD (since this is + fpNonRWCapable, this is a single disk RAID0) */ + span = physArm = 0; + arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr); + pd = MR_ArPdGet(arRef, physArm, local_map_ptr); + devHandle = MR_PdDevHandleGet(pd, local_map_ptr); + + /* build request descriptor */ + cmd->request_desc->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + cmd->request_desc->SCSIIO.DevHandle = devHandle; + + /* populate the LUN field */ + memcpy(io_request->LUN, raid->LUN, 8); + + /* build the raidScsiIO structure */ + io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + io_request->DevHandle = devHandle; + + return; + +NonFastPath: io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; - io_request->DevHandle = device_id; + io_request->DevHandle = cpu_to_le16(device_id); cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); } - io_request->RaidContext.VirtualDiskTgtId = device_id; + io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); io_request->LUN[1] = scmd->device->lun; - io_request->DataLength = scsi_bufflen(scmd); } /** @@ -1670,7 +1718,7 @@ megasas_build_io_fusion(struct megasas_instance *instance, * Just the CDB length,rest of the Flags are zero * This will be modified for FP in build_ldio_fusion */ - io_request->IoFlags = scp->cmd_len; + io_request->IoFlags = cpu_to_le16(scp->cmd_len); if (megasas_is_ldio(scp)) megasas_build_ldio_fusion(instance, scp, cmd); @@ -1695,17 +1743,17 @@ megasas_build_io_fusion(struct megasas_instance *instance, io_request->RaidContext.numSGE = sge_count; - io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; + io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); if (scp->sc_data_direction == PCI_DMA_TODEVICE) - io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE; + io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE); else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) - io_request->Control |= MPI2_SCSIIO_CONTROL_READ; + io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ); io_request->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; - io_request->SenseBufferLowAddress = cmd->sense_phys_addr; + io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr); io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; cmd->scmd = scp; @@ -1770,7 +1818,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, } req_desc = cmd->request_desc; - req_desc->SCSIIO.SMID = index; + req_desc->SCSIIO.SMID = cpu_to_le16(index); if (cmd->io_request->ChainOffset != 0 && cmd->io_request->ChainOffset != 0xF) @@ -1832,7 +1880,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) num_completed = 0; while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) { - smid = reply_desc->SMID; + smid = le16_to_cpu(reply_desc->SMID); cmd_fusion = fusion->cmd_list[smid - 1]; @@ -2050,12 +2098,12 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, SGL) / 4; io_req->ChainOffset = fusion->chain_offset_mfi_pthru; - mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; + mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr); mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; - mpi25_ieee_chain->Length = MEGASAS_MAX_SZ_CHAIN_FRAME; + mpi25_ieee_chain->Length = cpu_to_le32(MEGASAS_MAX_SZ_CHAIN_FRAME); return 0; } @@ -2088,7 +2136,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - req_desc->SCSIIO.SMID = index; + req_desc->SCSIIO.SMID = cpu_to_le16(index); return req_desc; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 4eb84011cb07..35a51397b364 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -93,8 +93,13 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE { */ struct RAID_CONTEXT { +#if defined(__BIG_ENDIAN_BITFIELD) + u8 nseg:4; + u8 Type:4; +#else u8 Type:4; u8 nseg:4; +#endif u8 resvd0; u16 timeoutValue; u8 regLockFlags; @@ -298,8 +303,13 @@ struct MPI2_RAID_SCSI_IO_REQUEST { * MPT RAID MFA IO Descriptor. */ struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 MessageAddress1:24; /* bits 31:8*/ + u32 RequestFlags:8; +#else u32 RequestFlags:8; u32 MessageAddress1:24; /* bits 31:8*/ +#endif u32 MessageAddress2; /* bits 61:32 */ }; @@ -518,6 +528,19 @@ struct MR_SPAN_BLOCK_INFO { struct MR_LD_RAID { struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved4:7; + u32 fpNonRWCapable:1; + u32 fpReadAcrossStripe:1; + u32 fpWriteAcrossStripe:1; + u32 fpReadCapable:1; + u32 fpWriteCapable:1; + u32 encryptionType:8; + u32 pdPiMode:4; + u32 ldPiMode:4; + u32 reserved5:3; + u32 fpCapable:1; +#else u32 fpCapable:1; u32 reserved5:3; u32 ldPiMode:4; @@ -527,7 +550,9 @@ struct MR_LD_RAID { u32 fpReadCapable:1; u32 fpWriteAcrossStripe:1; u32 fpReadAcrossStripe:1; - u32 reserved4:8; + u32 fpNonRWCapable:1; + u32 reserved4:7; +#endif } capability; u32 reserved6; u64 size; @@ -551,7 +576,9 @@ struct MR_LD_RAID { u32 reserved:31; } flags; - u8 reserved3[0x5C]; + u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */ + u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/ + u8 reserved3[0x80-0x2D]; /* 0x2D */ }; struct MR_LD_SPAN_MAP { diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile index 4c1d2e7a1176..efb0c4c2e310 100644 --- a/drivers/scsi/mpt3sas/Makefile +++ b/drivers/scsi/mpt3sas/Makefile @@ -1,5 +1,5 @@ # mpt3sas makefile -obj-m += mpt3sas.o +obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o mpt3sas-y += mpt3sas_base.o \ mpt3sas_config.o \ mpt3sas_scsih.o \ diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index d99f41c2ca13..a04b4ff8c7f6 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -309,6 +309,117 @@ static ssize_t pm8001_ctl_aap_log_show(struct device *cdev, } static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL); /** + * pm8001_ctl_ib_queue_log_show - Out bound Queue log + * @cdev:pointer to embedded class device + * @buf: the buffer returned + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + int offset; + char *str = buf; + int start = 0; +#define IB_MEMMAP(c) \ + (*(u32 *)((u8 *)pm8001_ha-> \ + memoryMap.region[IB].virt_ptr + \ + pm8001_ha->evtlog_ib_offset + (c))) + + for (offset = 0; offset < IB_OB_READ_TIMES; offset++) { + if (pm8001_ha->chip_id != chip_8001) + str += sprintf(str, "0x%08x\n", IB_MEMMAP(start)); + else + str += sprintf(str, "0x%08x\n", IB_MEMMAP(start)); + start = start + 4; + } + pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET; + if ((((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0) + && (pm8001_ha->chip_id != chip_8001)) + pm8001_ha->evtlog_ib_offset = 0; + if ((((pm8001_ha->evtlog_ib_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0) + && (pm8001_ha->chip_id == chip_8001)) + pm8001_ha->evtlog_ib_offset = 0; + + return str - buf; +} + +static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL); +/** + * pm8001_ctl_ob_queue_log_show - Out bound Queue log + * @cdev:pointer to embedded class device + * @buf: the buffer returned + * A sysfs 'read-only' shost attribute. + */ + +static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + int offset; + char *str = buf; + int start = 0; +#define OB_MEMMAP(c) \ + (*(u32 *)((u8 *)pm8001_ha-> \ + memoryMap.region[OB].virt_ptr + \ + pm8001_ha->evtlog_ob_offset + (c))) + + for (offset = 0; offset < IB_OB_READ_TIMES; offset++) { + if (pm8001_ha->chip_id != chip_8001) + str += sprintf(str, "0x%08x\n", OB_MEMMAP(start)); + else + str += sprintf(str, "0x%08x\n", OB_MEMMAP(start)); + start = start + 4; + } + pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET; + if ((((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0) + && (pm8001_ha->chip_id != chip_8001)) + pm8001_ha->evtlog_ob_offset = 0; + if ((((pm8001_ha->evtlog_ob_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0) + && (pm8001_ha->chip_id == chip_8001)) + pm8001_ha->evtlog_ob_offset = 0; + + return str - buf; +} +static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL); +/** + * pm8001_ctl_bios_version_show - Bios version Display + * @cdev:pointer to embedded class device + * @buf:the buffer returned + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_bios_version_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + char *str = buf; + void *virt_addr; + int bios_index; + DECLARE_COMPLETION_ONSTACK(completion); + struct pm8001_ioctl_payload payload; + + pm8001_ha->nvmd_completion = &completion; + payload.minor_function = 7; + payload.offset = 0; + payload.length = 4096; + payload.func_specific = kzalloc(4096, GFP_KERNEL); + PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); + wait_for_completion(&completion); + virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; + for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT; + bios_index++) + str += sprintf(str, "%c", + *((u8 *)((u8 *)virt_addr+bios_index))); + return str - buf; +} +static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL); +/** * pm8001_ctl_aap_log_show - IOP event log * @cdev: pointer to embedded class device * @buf: the buffer returned @@ -344,6 +455,43 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev, } static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL); +/** + ** pm8001_ctl_fatal_log_show - fatal error logging + ** @cdev:pointer to embedded class device + ** @buf: the buffer returned + ** + ** A sysfs 'read-only' shost attribute. + **/ + +static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + u32 count; + + count = pm80xx_get_fatal_dump(cdev, attr, buf); + return count; +} + +static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL); + + +/** + ** pm8001_ctl_gsm_log_show - gsm dump collection + ** @cdev:pointer to embedded class device + ** @buf: the buffer returned + **A sysfs 'read-only' shost attribute. + **/ +static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + u32 count; + + count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf); + return count; +} + +static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL); + #define FLASH_CMD_NONE 0x00 #define FLASH_CMD_UPDATE 0x01 #define FLASH_CMD_SET_NVMD 0x02 @@ -603,12 +751,17 @@ struct device_attribute *pm8001_host_attrs[] = { &dev_attr_update_fw, &dev_attr_aap_log, &dev_attr_iop_log, + &dev_attr_fatal_log, + &dev_attr_gsm_log, &dev_attr_max_out_io, &dev_attr_max_devices, &dev_attr_max_sg_list, &dev_attr_sas_spec_support, &dev_attr_logging_level, &dev_attr_host_sas_address, + &dev_attr_bios_version, + &dev_attr_ib_log, + &dev_attr_ob_log, NULL, }; diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h index 63ad4aa0c422..d0d43a250b9e 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.h +++ b/drivers/scsi/pm8001/pm8001_ctl.h @@ -45,6 +45,8 @@ #define HEADER_LEN 28 #define SIZE_OFFSET 16 +#define BIOSOFFSET 56 +#define BIOS_OFFSET_LIMIT 61 #define FLASH_OK 0x000000 #define FAIL_OPEN_BIOS_FILE 0x000100 @@ -53,5 +55,9 @@ #define FAIL_OUT_MEMORY 0x000c00 #define FLASH_IN_PROGRESS 0x001000 +#define IB_OB_READ_TIMES 256 +#define SYSFS_OFFSET 1024 +#define PM80XX_IB_OB_QUEUE_SIZE (32 * 1024) +#define PM8001_IB_OB_QUEUE_SIZE (16 * 1024) #endif /* PM8001_CTL_H_INCLUDED */ diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h index 479c5a7a863a..74a4bb9af07b 100644 --- a/drivers/scsi/pm8001/pm8001_defs.h +++ b/drivers/scsi/pm8001/pm8001_defs.h @@ -46,7 +46,10 @@ enum chip_flavors { chip_8008, chip_8009, chip_8018, - chip_8019 + chip_8019, + chip_8074, + chip_8076, + chip_8077 }; enum phy_speed { @@ -99,7 +102,8 @@ enum memory_region_num { NVMD, /* NVM device */ DEV_MEM, /* memory for devices */ CCB_MEM, /* memory for command control block */ - FW_FLASH /* memory for fw flash update */ + FW_FLASH, /* memory for fw flash update */ + FORENSIC_MEM /* memory for fw forensic data */ }; #define PM8001_EVENT_LOG_SIZE (128 * 1024) diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 4a2195752198..0a1296a87d66 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -1868,6 +1868,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) if (unlikely(!t || !t->lldd_task || !t->dev)) return; ts = &t->task_status; + /* Print sas address of IO failed device */ + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && + (status != IO_UNDERFLOW)) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SAS Address of IO Failure Drive:" + "%016llx", SAS_ADDR(t->dev->sas_addr))); + switch (status) { case IO_SUCCESS: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS" @@ -2276,6 +2283,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 param; u32 status; u32 tag; + int i, j; + u8 sata_addr_low[4]; + u32 temp_sata_addr_low; + u8 sata_addr_hi[4]; + u32 temp_sata_addr_hi; struct sata_completion_resp *psataPayload; struct task_status_struct *ts; struct ata_task_resp *resp ; @@ -2325,7 +2337,46 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_printk("ts null\n")); return; } - + /* Print sas address of IO failed device */ + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && + (status != IO_UNDERFLOW)) { + if (!((t->dev->parent) && + (DEV_IS_EXPANDER(t->dev->parent->dev_type)))) { + for (i = 0 , j = 4; j <= 7 && i <= 3; i++ , j++) + sata_addr_low[i] = pm8001_ha->sas_addr[j]; + for (i = 0 , j = 0; j <= 3 && i <= 3; i++ , j++) + sata_addr_hi[i] = pm8001_ha->sas_addr[j]; + memcpy(&temp_sata_addr_low, sata_addr_low, + sizeof(sata_addr_low)); + memcpy(&temp_sata_addr_hi, sata_addr_hi, + sizeof(sata_addr_hi)); + temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff) + |((temp_sata_addr_hi << 8) & + 0xff0000) | + ((temp_sata_addr_hi >> 8) + & 0xff00) | + ((temp_sata_addr_hi << 24) & + 0xff000000)); + temp_sata_addr_low = ((((temp_sata_addr_low >> 24) + & 0xff) | + ((temp_sata_addr_low << 8) + & 0xff0000) | + ((temp_sata_addr_low >> 8) + & 0xff00) | + ((temp_sata_addr_low << 24) + & 0xff000000)) + + pm8001_dev->attached_phy + + 0x10); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SAS Address of IO Failure Drive:" + "%08x%08x", temp_sata_addr_hi, + temp_sata_addr_low)); + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SAS Address of IO Failure Drive:" + "%016llx", SAS_ADDR(t->dev->sas_addr))); + } + } switch (status) { case IO_SUCCESS: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); @@ -3087,8 +3138,8 @@ void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev = ccb->device; u32 status = le32_to_cpu(pPayload->status); u32 device_id = le32_to_cpu(pPayload->device_id); - u8 pds = le32_to_cpu(pPayload->pds_nds) | PDS_BITS; - u8 nds = le32_to_cpu(pPayload->pds_nds) | NDS_BITS; + u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS; + u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS; PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state " "from 0x%x to 0x%x status = 0x%x!\n", device_id, pds, nds, status)); @@ -3352,6 +3403,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) unsigned long flags; u8 deviceType = pPayload->sas_identify.dev_type; port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPC; PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n", port_id, phy_id)); @@ -3432,6 +3484,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d," " phy id = %d\n", port_id, phy_id)); port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPC; port->port_attached = 1; pm8001_get_lrate_mode(phy, link_rate); phy->phy_type |= PORT_TYPE_SATA; @@ -4700,6 +4753,8 @@ int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, sspTMCmd.tmf = cpu_to_le32(tmf->tmf); memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); + if (pm8001_ha->chip_id != chip_8001) + sspTMCmd.ds_ads_m = 0x08; circularQ = &pm8001_ha->inbnd_q_tbl[0]; ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0); return ret; @@ -4778,6 +4833,16 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); break; } + case IOP_RDUMP: { + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | IOP_RDUMP); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); + nvmd_req.vpd_offset = cpu_to_le32(ioctl_payload->offset); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } default: break; } @@ -4938,6 +5003,89 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, return rc; } +ssize_t +pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf) +{ + u32 value, rem, offset = 0, bar = 0; + u32 index, work_offset, dw_length; + u32 shift_value, gsm_base, gsm_dump_offset; + char *direct_data; + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + direct_data = buf; + gsm_dump_offset = pm8001_ha->fatal_forensic_shift_offset; + + /* check max is 1 Mbytes */ + if ((length > 0x100000) || (gsm_dump_offset & 3) || + ((gsm_dump_offset + length) > 0x1000000)) + return 1; + + if (pm8001_ha->chip_id == chip_8001) + bar = 2; + else + bar = 1; + + work_offset = gsm_dump_offset & 0xFFFF0000; + offset = gsm_dump_offset & 0x0000FFFF; + gsm_dump_offset = work_offset; + /* adjust length to dword boundary */ + rem = length & 3; + dw_length = length >> 2; + + for (index = 0; index < dw_length; index++) { + if ((work_offset + offset) & 0xFFFF0000) { + if (pm8001_ha->chip_id == chip_8001) + shift_value = ((gsm_dump_offset + offset) & + SHIFT_REG_64K_MASK); + else + shift_value = (((gsm_dump_offset + offset) & + SHIFT_REG_64K_MASK) >> + SHIFT_REG_BIT_SHIFT); + + if (pm8001_ha->chip_id == chip_8001) { + gsm_base = GSM_BASE; + if (-1 == pm8001_bar4_shift(pm8001_ha, + (gsm_base + shift_value))) + return 1; + } else { + gsm_base = 0; + if (-1 == pm80xx_bar4_shift(pm8001_ha, + (gsm_base + shift_value))) + return 1; + } + gsm_dump_offset = (gsm_dump_offset + offset) & + 0xFFFF0000; + work_offset = 0; + offset = offset & 0x0000FFFF; + } + value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) & + 0x0000FFFF); + direct_data += sprintf(direct_data, "%08x ", value); + offset += 4; + } + if (rem != 0) { + value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) & + 0x0000FFFF); + /* xfr for non_dw */ + direct_data += sprintf(direct_data, "%08x ", value); + } + /* Shift back to BAR4 original address */ + if (pm8001_ha->chip_id == chip_8001) { + if (-1 == pm8001_bar4_shift(pm8001_ha, 0)) + return 1; + } else { + if (-1 == pm80xx_bar4_shift(pm8001_ha, 0)) + return 1; + } + pm8001_ha->fatal_forensic_shift_offset += 1024; + + if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000) + pm8001_ha->fatal_forensic_shift_offset = 0; + return direct_data - buf; +} + int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u32 state) diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h index d7c1e2034226..e4867e690c84 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.h +++ b/drivers/scsi/pm8001/pm8001_hwi.h @@ -131,6 +131,10 @@ #define LINKRATE_30 (0x02 << 8) #define LINKRATE_60 (0x04 << 8) +/* for phy state */ + +#define PHY_STATE_LINK_UP_SPC 0x1 + /* for new SPC controllers MEMBASE III is shared between BIOS and DATA */ #define GSM_SM_BASE 0x4F0000 struct mpi_msg_hdr{ @@ -1027,5 +1031,8 @@ struct set_dev_state_resp { #define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 #define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 +#define GSM_BASE 0x4F0000 +#define SHIFT_REG_64K_MASK 0xffff0000 +#define SHIFT_REG_BIT_SHIFT 8 #endif diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 0dba7c7856ab..73a120d81b4d 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -54,6 +54,9 @@ static const struct pm8001_chip_info pm8001_chips[] = { [chip_8009] = {1, 8, &pm8001_80xx_dispatch,}, [chip_8018] = {0, 16, &pm8001_80xx_dispatch,}, [chip_8019] = {1, 16, &pm8001_80xx_dispatch,}, + [chip_8074] = {0, 8, &pm8001_80xx_dispatch,}, + [chip_8076] = {0, 16, &pm8001_80xx_dispatch,}, + [chip_8077] = {0, 16, &pm8001_80xx_dispatch,}, }; static int pm8001_id; @@ -172,20 +175,16 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha) static void pm8001_tasklet(unsigned long opaque) { struct pm8001_hba_info *pm8001_ha; - u32 vec; - pm8001_ha = (struct pm8001_hba_info *)opaque; + struct isr_param *irq_vector; + + irq_vector = (struct isr_param *)opaque; + pm8001_ha = irq_vector->drv_inst; if (unlikely(!pm8001_ha)) BUG_ON(1); - vec = pm8001_ha->int_vector; - PM8001_CHIP_DISP->isr(pm8001_ha, vec); + PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id); } #endif -static struct pm8001_hba_info *outq_to_hba(u8 *outq) -{ - return container_of((outq - *outq), struct pm8001_hba_info, outq[0]); -} - /** * pm8001_interrupt_handler_msix - main MSIX interrupt handler. * It obtains the vector number and calls the equivalent bottom @@ -195,18 +194,20 @@ static struct pm8001_hba_info *outq_to_hba(u8 *outq) */ static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque) { - struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque); - u8 outq = *(u8 *)opaque; + struct isr_param *irq_vector; + struct pm8001_hba_info *pm8001_ha; irqreturn_t ret = IRQ_HANDLED; + irq_vector = (struct isr_param *)opaque; + pm8001_ha = irq_vector->drv_inst; + if (unlikely(!pm8001_ha)) return IRQ_NONE; if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) return IRQ_NONE; - pm8001_ha->int_vector = outq; #ifdef PM8001_USE_TASKLET - tasklet_schedule(&pm8001_ha->tasklet); + tasklet_schedule(&pm8001_ha->tasklet[irq_vector->irq_id]); #else - ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq); + ret = PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id); #endif return ret; } @@ -227,9 +228,8 @@ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id) if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) return IRQ_NONE; - pm8001_ha->int_vector = 0; #ifdef PM8001_USE_TASKLET - tasklet_schedule(&pm8001_ha->tasklet); + tasklet_schedule(&pm8001_ha->tasklet[0]); #else ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0); #endif @@ -344,6 +344,10 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, /* Memory region for fw flash */ pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096; + pm8001_ha->memoryMap.region[FORENSIC_MEM].num_elements = 1; + pm8001_ha->memoryMap.region[FORENSIC_MEM].total_len = 0x10000; + pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000; + pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000; for (i = 0; i < USI_MAX_MEMCNT; i++) { if (pm8001_mem_alloc(pm8001_ha->pdev, &pm8001_ha->memoryMap.region[i].virt_ptr, @@ -450,7 +454,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, { struct pm8001_hba_info *pm8001_ha; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); - + int j; pm8001_ha = sha->lldd_ha; if (!pm8001_ha) @@ -473,12 +477,14 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, pm8001_ha->iomb_size = IOMB_SIZE_SPC; #ifdef PM8001_USE_TASKLET - /** - * default tasklet for non msi-x interrupt handler/first msi-x - * interrupt handler - **/ - tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, - (unsigned long)pm8001_ha); + /* Tasklet for non msi-x interrupt handler */ + if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001)) + tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[0])); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[j])); #endif pm8001_ioremap(pm8001_ha); if (!pm8001_alloc(pm8001_ha, ent)) @@ -664,6 +670,31 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) #endif } +/* + * pm8001_get_phy_settings_info : Read phy setting values. + * @pm8001_ha : our hba. + */ +void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) +{ + +#ifdef PM8001_READ_VPD + /*OPTION ROM FLASH read for the SPC cards */ + DECLARE_COMPLETION_ONSTACK(completion); + struct pm8001_ioctl_payload payload; + + pm8001_ha->nvmd_completion = &completion; + /* SAS ADDRESS read from flash / EEPROM */ + payload.minor_function = 6; + payload.offset = 0; + payload.length = 4096; + payload.func_specific = kzalloc(4096, GFP_KERNEL); + /* Read phy setting values from flash */ + PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); + wait_for_completion(&completion); + pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific); +#endif +} + #ifdef PM8001_USE_MSIX /** * pm8001_setup_msix - enable MSI-X interrupt @@ -701,19 +732,20 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) "pci_enable_msix request ret:%d no of intr %d\n", rc, pm8001_ha->number_of_intr)); - for (i = 0; i < number_of_intr; i++) - pm8001_ha->outq[i] = i; for (i = 0; i < number_of_intr; i++) { snprintf(intr_drvname[i], sizeof(intr_drvname[0]), DRV_NAME"%d", i); + pm8001_ha->irq_vector[i].irq_id = i; + pm8001_ha->irq_vector[i].drv_inst = pm8001_ha; + if (request_irq(pm8001_ha->msix_entries[i].vector, pm8001_interrupt_handler_msix, flag, - intr_drvname[i], &pm8001_ha->outq[i])) { + intr_drvname[i], &(pm8001_ha->irq_vector[i]))) { for (j = 0; j < i; j++) free_irq( pm8001_ha->msix_entries[j].vector, - &pm8001_ha->outq[j]); + &(pm8001_ha->irq_vector[i])); pci_disable_msix(pm8001_ha->pdev); break; } @@ -844,6 +876,10 @@ static int pm8001_pci_probe(struct pci_dev *pdev, } pm8001_init_sas_add(pm8001_ha); + /* phy setting support for motherboard controller */ + if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 && + pdev->subsystem_vendor != 0) + pm8001_get_phy_settings_info(pm8001_ha); pm8001_post_sas_ha_init(shost, chip); rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); if (rc) @@ -871,7 +907,7 @@ static void pm8001_pci_remove(struct pci_dev *pdev) { struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct pm8001_hba_info *pm8001_ha; - int i; + int i, j; pm8001_ha = sha->lldd_ha; sas_unregister_ha(sha); sas_remove_host(pm8001_ha->shost); @@ -885,13 +921,18 @@ static void pm8001_pci_remove(struct pci_dev *pdev) synchronize_irq(pm8001_ha->msix_entries[i].vector); for (i = 0; i < pm8001_ha->number_of_intr; i++) free_irq(pm8001_ha->msix_entries[i].vector, - &pm8001_ha->outq[i]); + &(pm8001_ha->irq_vector[i])); pci_disable_msix(pdev); #else free_irq(pm8001_ha->irq, sha); #endif #ifdef PM8001_USE_TASKLET - tasklet_kill(&pm8001_ha->tasklet); + /* For non-msix and msix interrupts */ + if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001)) + tasklet_kill(&pm8001_ha->tasklet[0]); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_kill(&pm8001_ha->tasklet[j]); #endif pm8001_free(pm8001_ha); kfree(sha->sas_phy); @@ -912,7 +953,7 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct pm8001_hba_info *pm8001_ha; - int i; + int i, j; u32 device_state; pm8001_ha = sha->lldd_ha; flush_workqueue(pm8001_wq); @@ -928,13 +969,18 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state) synchronize_irq(pm8001_ha->msix_entries[i].vector); for (i = 0; i < pm8001_ha->number_of_intr; i++) free_irq(pm8001_ha->msix_entries[i].vector, - &pm8001_ha->outq[i]); + &(pm8001_ha->irq_vector[i])); pci_disable_msix(pdev); #else free_irq(pm8001_ha->irq, sha); #endif #ifdef PM8001_USE_TASKLET - tasklet_kill(&pm8001_ha->tasklet); + /* For non-msix and msix interrupts */ + if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001)) + tasklet_kill(&pm8001_ha->tasklet[0]); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_kill(&pm8001_ha->tasklet[j]); #endif device_state = pci_choose_state(pdev, state); pm8001_printk("pdev=0x%p, slot=%s, entering " @@ -957,7 +1003,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev) struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct pm8001_hba_info *pm8001_ha; int rc; - u8 i = 0; + u8 i = 0, j; u32 device_state; pm8001_ha = sha->lldd_ha; device_state = pdev->current_state; @@ -997,10 +1043,14 @@ static int pm8001_pci_resume(struct pci_dev *pdev) if (rc) goto err_out_disable; #ifdef PM8001_USE_TASKLET - /* default tasklet for non msi-x interrupt handler/first msi-x - * interrupt handler */ - tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, - (unsigned long)pm8001_ha); + /* Tasklet for non msi-x interrupt handler */ + if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001)) + tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[0])); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[j])); #endif PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); if (pm8001_ha->chip_id != chip_8001) { @@ -1036,6 +1086,12 @@ static struct pci_device_id pm8001_pci_table[] = { { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 }, { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 }, { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 }, + { PCI_VDEVICE(PMC_Sierra, 0x8074), chip_8074 }, + { PCI_VDEVICE(ADAPTEC2, 0x8074), chip_8074 }, + { PCI_VDEVICE(PMC_Sierra, 0x8076), chip_8076 }, + { PCI_VDEVICE(ADAPTEC2, 0x8076), chip_8076 }, + { PCI_VDEVICE(PMC_Sierra, 0x8077), chip_8077 }, + { PCI_VDEVICE(ADAPTEC2, 0x8077), chip_8077 }, { PCI_VENDOR_ID_ADAPTEC2, 0x8081, PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 }, { PCI_VENDOR_ID_ADAPTEC2, 0x8081, @@ -1056,6 +1112,24 @@ static struct pci_device_id pm8001_pci_table[] = { PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 }, { PCI_VENDOR_ID_ADAPTEC2, 0x8089, PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8074, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8074 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8076, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8076 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8077, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8077 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8074, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8074 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8076, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8076 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8077, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8077 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8076, + PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8076 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8077, + PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8077 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8074, + PCI_VENDOR_ID_ADAPTEC2, 0x0404, 0, 0, chip_8074 }, {} /* terminate list */ }; @@ -1107,8 +1181,12 @@ module_init(pm8001_init); module_exit(pm8001_exit); MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>"); +MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>"); +MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>"); +MODULE_AUTHOR("Nikith Ganigarakoppal <Nikith.Ganigarakoppal@pmcs.com>"); MODULE_DESCRIPTION( - "PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver"); + "PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 " + "SAS/SATA controller driver"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, pm8001_pci_table); diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index a85d73de7c80..f50ac44b950e 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -447,7 +447,6 @@ static int pm8001_task_exec(struct sas_task *task, const int num, break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: rc = pm8001_task_prep_ata(pm8001_ha, ccb); break; default: @@ -704,6 +703,8 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, int res, retry; struct sas_task *task = NULL; struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); + struct pm8001_device *pm8001_dev = dev->lldd_dev; + DECLARE_COMPLETION_ONSTACK(completion_setstate); for (retry = 0; retry < 3; retry++) { task = sas_alloc_slow_task(GFP_KERNEL); @@ -729,6 +730,12 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, goto ex_err; } wait_for_completion(&task->slow_task->completion); + if (pm8001_ha->chip_id != chip_8001) { + pm8001_dev->setds_completion = &completion_setstate; + PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, + pm8001_dev, 0x01); + wait_for_completion(&completion_setstate); + } res = -TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { @@ -1091,15 +1098,17 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun) struct pm8001_tmf_task tmf_task; struct pm8001_device *pm8001_dev = dev->lldd_dev; struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); + DECLARE_COMPLETION_ONSTACK(completion_setstate); if (dev_is_sata(dev)) { struct sas_phy *phy = sas_get_local_phy(dev); rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , dev, 1, 0); rc = sas_phy_reset(phy, 1); sas_put_local_phy(phy); + pm8001_dev->setds_completion = &completion_setstate; rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, pm8001_dev, 0x01); - msleep(2000); + wait_for_completion(&completion_setstate); } else { tmf_task.tmf = TMF_LU_RESET; rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 570819464d90..6c5fd5ee22d3 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -104,6 +104,9 @@ do { \ #define DEV_IS_EXPANDER(type) ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) +#define IS_SPCV_12G(dev) ((dev->device == 0X8074) \ + || (dev->device == 0X8076) \ + || (dev->device == 0X8077)) #define PM8001_NAME_LENGTH 32/* generic length of strings */ extern struct list_head hba_list; @@ -129,6 +132,61 @@ struct pm8001_ioctl_payload { u8 *func_specific; }; +#define MPI_FATAL_ERROR_TABLE_OFFSET_MASK 0xFFFFFF +#define MPI_FATAL_ERROR_TABLE_SIZE(value) ((0xFF000000 & value) >> SHIFT24) +#define MPI_FATAL_EDUMP_TABLE_LO_OFFSET 0x00 /* HNFBUFL */ +#define MPI_FATAL_EDUMP_TABLE_HI_OFFSET 0x04 /* HNFBUFH */ +#define MPI_FATAL_EDUMP_TABLE_LENGTH 0x08 /* HNFBLEN */ +#define MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x0C /* FDDHSHK */ +#define MPI_FATAL_EDUMP_TABLE_STATUS 0x10 /* FDDTSTAT */ +#define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN 0x14 /* ACCDDLEN */ +#define MPI_FATAL_EDUMP_HANDSHAKE_RDY 0x1 +#define MPI_FATAL_EDUMP_HANDSHAKE_BUSY 0x0 +#define MPI_FATAL_EDUMP_TABLE_STAT_RSVD 0x0 +#define MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED 0x1 +#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA 0x2 +#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE 0x3 +#define TYPE_GSM_SPACE 1 +#define TYPE_QUEUE 2 +#define TYPE_FATAL 3 +#define TYPE_NON_FATAL 4 +#define TYPE_INBOUND 1 +#define TYPE_OUTBOUND 2 +struct forensic_data { + u32 data_type; + union { + struct { + u32 direct_len; + u32 direct_offset; + void *direct_data; + } gsm_buf; + struct { + u16 queue_type; + u16 queue_index; + u32 direct_len; + void *direct_data; + } queue_buf; + struct { + u32 direct_len; + u32 direct_offset; + u32 read_len; + void *direct_data; + } data_buf; + }; +}; + +/* bit31-26 - mask bar */ +#define SCRATCH_PAD0_BAR_MASK 0xFC000000 +/* bit25-0 - offset mask */ +#define SCRATCH_PAD0_OFFSET_MASK 0x03FFFFFF +/* if AAP error state */ +#define SCRATCH_PAD0_AAPERR_MASK 0xFFFFFFFF +/* Inbound doorbell bit7 */ +#define SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP 0x80 +/* Inbound doorbell bit7 SPCV */ +#define SPCV_MSGU_CFG_TABLE_TRANSFER_DEBUG_INFO 0x80 +#define MAIN_MERRDCTO_MERRDCES 0xA0/* DWORD 0x28) */ + struct pm8001_dispatch { char *name; int (*chip_init)(struct pm8001_hba_info *pm8001_ha); @@ -343,6 +401,7 @@ union main_cfg_table { u32 phy_attr_table_offset; u32 port_recovery_timer; u32 interrupt_reassertion_delay; + u32 fatal_n_non_fatal_dump; /* 0x28 */ } pm80xx_tbl; }; @@ -407,6 +466,10 @@ struct pm8001_hba_memspace { u64 membase; u32 memsize; }; +struct isr_param { + struct pm8001_hba_info *drv_inst; + u32 irq_id; +}; struct pm8001_hba_info { char name[PM8001_NAME_LENGTH]; struct list_head list; @@ -417,6 +480,13 @@ struct pm8001_hba_info { struct pm8001_hba_memspace io_mem[6]; struct mpi_mem_req memoryMap; struct encrypt encrypt_info; /* support encryption */ + struct forensic_data forensic_info; + u32 fatal_bar_loc; + u32 forensic_last_offset; + u32 fatal_forensic_shift_offset; + u32 forensic_fatal_step; + u32 evtlog_ib_offset; + u32 evtlog_ob_offset; void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/ void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/ void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/ @@ -425,6 +495,7 @@ struct pm8001_hba_info { void __iomem *pspa_q_tbl_addr; /*MPI SAS PHY attributes Queue Config Table Addr*/ void __iomem *ivt_tbl_addr; /*MPI IVT Table Addr */ + void __iomem *fatal_tbl_addr; /*MPI IVT Table Addr */ union main_cfg_table main_cfg_tbl; union general_status_table gs_tbl; struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM]; @@ -452,14 +523,13 @@ struct pm8001_hba_info { int number_of_intr;/*will be used in remove()*/ #endif #ifdef PM8001_USE_TASKLET - struct tasklet_struct tasklet; + struct tasklet_struct tasklet[PM8001_MAX_MSIX_VEC]; #endif u32 logging_level; u32 fw_status; u32 smp_exp_mode; - u32 int_vector; const struct firmware *fw_image; - u8 outq[PM8001_MAX_MSIX_VEC]; + struct isr_param irq_vector[PM8001_MAX_MSIX_VEC]; }; struct pm8001_work { @@ -629,7 +699,12 @@ struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha); int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); - +void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha, + u32 length, u8 *buf); +int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); +ssize_t pm80xx_get_fatal_dump(struct device *cdev, + struct device_attribute *attr, char *buf); +ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf); /* ctl shared API */ extern struct device_attribute *pm8001_host_attrs[]; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 9f91030211e8..c950dc5c9943 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -45,6 +45,228 @@ #define SMP_DIRECT 1 #define SMP_INDIRECT 2 + + +int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value) +{ + u32 reg_val; + unsigned long start; + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, shift_value); + /* confirm the setting is written */ + start = jiffies + HZ; /* 1 sec */ + do { + reg_val = pm8001_cr32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER); + } while ((reg_val != shift_value) && time_before(jiffies, start)); + if (reg_val != shift_value) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER" + " = 0x%x\n", reg_val)); + return -1; + } + return 0; +} + +void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset, + const void *destination, + u32 dw_count, u32 bus_base_number) +{ + u32 index, value, offset; + u32 *destination1; + destination1 = (u32 *)destination; + + for (index = 0; index < dw_count; index += 4, destination1++) { + offset = (soffset + index / 4); + if (offset < (64 * 1024)) { + value = pm8001_cr32(pm8001_ha, bus_base_number, offset); + *destination1 = cpu_to_le32(value); + } + } + return; +} + +ssize_t pm80xx_get_fatal_dump(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr; + u32 status = 1; + u32 accum_len , reg_val, index, *temp; + unsigned long start; + u8 *direct_data; + char *fatal_error_data = buf; + + pm8001_ha->forensic_info.data_buf.direct_data = buf; + if (pm8001_ha->chip_id == chip_8001) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "Not supported for SPC controller"); + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf; + } + if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("forensic_info TYPE_NON_FATAL..............\n")); + direct_data = (u8 *)fatal_error_data; + pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL; + pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET; + pm8001_ha->forensic_info.data_buf.direct_offset = 0; + pm8001_ha->forensic_info.data_buf.read_len = 0; + + pm8001_ha->forensic_info.data_buf.direct_data = direct_data; + } + + if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) { + /* start to get data */ + /* Program the MEMBASE II Shifting Register with 0x00.*/ + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, + pm8001_ha->fatal_forensic_shift_offset); + pm8001_ha->forensic_last_offset = 0; + pm8001_ha->forensic_fatal_step = 0; + pm8001_ha->fatal_bar_loc = 0; + } + /* Read until accum_len is retrived */ + accum_len = pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); + PM8001_IO_DBG(pm8001_ha, pm8001_printk("accum_len 0x%x\n", + accum_len)); + if (accum_len == 0xFFFFFFFF) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Possible PCI issue 0x%x not expected\n", + accum_len)); + return status; + } + if (accum_len == 0 || accum_len >= 0x100000) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 0xFFFFFFFF); + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf; + } + temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr; + if (pm8001_ha->forensic_fatal_step == 0) { +moreData: + if (pm8001_ha->forensic_info.data_buf.direct_data) { + /* Data is in bar, copy to host memory */ + pm80xx_pci_mem_copy(pm8001_ha, pm8001_ha->fatal_bar_loc, + pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr, + pm8001_ha->forensic_info.data_buf.direct_len , + 1); + } + pm8001_ha->fatal_bar_loc += + pm8001_ha->forensic_info.data_buf.direct_len; + pm8001_ha->forensic_info.data_buf.direct_offset += + pm8001_ha->forensic_info.data_buf.direct_len; + pm8001_ha->forensic_last_offset += + pm8001_ha->forensic_info.data_buf.direct_len; + pm8001_ha->forensic_info.data_buf.read_len = + pm8001_ha->forensic_info.data_buf.direct_len; + + if (pm8001_ha->forensic_last_offset >= accum_len) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 3); + for (index = 0; index < (SYSFS_OFFSET / 4); index++) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha-> + forensic_info.data_buf.direct_data, + "%08x ", *(temp + index)); + } + + pm8001_ha->fatal_bar_loc = 0; + pm8001_ha->forensic_fatal_step = 1; + pm8001_ha->fatal_forensic_shift_offset = 0; + pm8001_ha->forensic_last_offset = 0; + status = 0; + return (char *)pm8001_ha-> + forensic_info.data_buf.direct_data - + (char *)buf; + } + if (pm8001_ha->fatal_bar_loc < (64 * 1024)) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha-> + forensic_info.data_buf.direct_data, + "%08x ", 2); + for (index = 0; index < (SYSFS_OFFSET / 4); index++) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha-> + forensic_info.data_buf.direct_data, + "%08x ", *(temp + index)); + } + status = 0; + return (char *)pm8001_ha-> + forensic_info.data_buf.direct_data - + (char *)buf; + } + + /* Increment the MEMBASE II Shifting Register value by 0x100.*/ + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 2); + for (index = 0; index < 256; index++) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha-> + forensic_info.data_buf.direct_data, + "%08x ", *(temp + index)); + } + pm8001_ha->fatal_forensic_shift_offset += 0x100; + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, + pm8001_ha->fatal_forensic_shift_offset); + pm8001_ha->fatal_bar_loc = 0; + status = 0; + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf; + } + if (pm8001_ha->forensic_fatal_step == 1) { + pm8001_ha->fatal_forensic_shift_offset = 0; + /* Read 64K of the debug data. */ + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, + pm8001_ha->fatal_forensic_shift_offset); + pm8001_mw32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_HANDSHAKE, + MPI_FATAL_EDUMP_HANDSHAKE_RDY); + + /* Poll FDDHSHK until clear */ + start = jiffies + (2 * HZ); /* 2 sec */ + + do { + reg_val = pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_HANDSHAKE); + } while ((reg_val) && time_before(jiffies, start)); + + if (reg_val != 0) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER" + " = 0x%x\n", reg_val)); + return -1; + } + + /* Read the next 64K of the debug data. */ + pm8001_ha->forensic_fatal_step = 0; + if (pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_STATUS) != + MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) { + pm8001_mw32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 0); + goto moreData; + } else { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha-> + forensic_info.data_buf.direct_data, + "%08x ", 4); + pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF; + pm8001_ha->forensic_info.data_buf.direct_len = 0; + pm8001_ha->forensic_info.data_buf.direct_offset = 0; + pm8001_ha->forensic_info.data_buf.read_len = 0; + status = 0; + } + } + + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf; +} + /** * read_main_config_table - read the configure table and save it. * @pm8001_ha: our hba card information @@ -430,7 +652,11 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) table is updated */ pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE); /* wait until Inbound DoorBell Clear Register toggled */ - max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */ + if (IS_SPCV_12G(pm8001_ha->pdev)) { + max_wait_count = 4 * 1000 * 1000;/* 4 sec */ + } else { + max_wait_count = 2 * 1000 * 1000;/* 2 sec */ + } do { udelay(1); value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); @@ -579,6 +805,9 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) pm8001_ha->pspa_q_tbl_addr = base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) & 0xFFFFFF); + pm8001_ha->fatal_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0xA0) & + 0xFFFFFF); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("GST OFFSET 0x%x\n", @@ -913,7 +1142,11 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET); /* wait until Inbound DoorBell Clear Register toggled */ - max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */ + if (IS_SPCV_12G(pm8001_ha->pdev)) { + max_wait_count = 4 * 1000 * 1000;/* 4 sec */ + } else { + max_wait_count = 2 * 1000 * 1000;/* 2 sec */ + } do { udelay(1); value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); @@ -959,6 +1192,7 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) { u32 regval; u32 bootloader_state; + u32 ibutton0, ibutton1; /* Check if MPI is in ready state to reset */ if (mpi_uninit_check(pm8001_ha) != 0) { @@ -1017,7 +1251,27 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) if (-1 == check_fw_ready(pm8001_ha)) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Firmware is not ready!\n")); - return -EBUSY; + /* check iButton feature support for motherboard controller */ + if (pm8001_ha->pdev->subsystem_vendor != + PCI_VENDOR_ID_ADAPTEC2 && + pm8001_ha->pdev->subsystem_vendor != 0) { + ibutton0 = pm8001_cr32(pm8001_ha, 0, + MSGU_HOST_SCRATCH_PAD_6); + ibutton1 = pm8001_cr32(pm8001_ha, 0, + MSGU_HOST_SCRATCH_PAD_7); + if (!ibutton0 && !ibutton1) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("iButton Feature is" + " not Available!!!\n")); + return -EBUSY; + } + if (ibutton0 == 0xdeadbeef && ibutton1 == 0xdeadbeef) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("CRC Check for iButton" + " Feature Failed!!!\n")); + return -EBUSY; + } + } } PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SPCv soft reset Complete\n")); @@ -1268,6 +1522,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) if (unlikely(!t || !t->lldd_task || !t->dev)) return; ts = &t->task_status; + /* Print sas address of IO failed device */ + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && + (status != IO_UNDERFLOW)) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SAS Address of IO Failure Drive" + ":%016llx", SAS_ADDR(t->dev->sas_addr))); + switch (status) { case IO_SUCCESS: PM8001_IO_DBG(pm8001_ha, @@ -1691,6 +1952,10 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 param; u32 status; u32 tag; + int i, j; + u8 sata_addr_low[4]; + u32 temp_sata_addr_low, temp_sata_addr_hi; + u8 sata_addr_hi[4]; struct sata_completion_resp *psataPayload; struct task_status_struct *ts; struct ata_task_resp *resp ; @@ -1740,7 +2005,47 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_printk("ts null\n")); return; } + /* Print sas address of IO failed device */ + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && + (status != IO_UNDERFLOW)) { + if (!((t->dev->parent) && + (DEV_IS_EXPANDER(t->dev->parent->dev_type)))) { + for (i = 0 , j = 4; i <= 3 && j <= 7; i++ , j++) + sata_addr_low[i] = pm8001_ha->sas_addr[j]; + for (i = 0 , j = 0; i <= 3 && j <= 3; i++ , j++) + sata_addr_hi[i] = pm8001_ha->sas_addr[j]; + memcpy(&temp_sata_addr_low, sata_addr_low, + sizeof(sata_addr_low)); + memcpy(&temp_sata_addr_hi, sata_addr_hi, + sizeof(sata_addr_hi)); + temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff) + |((temp_sata_addr_hi << 8) & + 0xff0000) | + ((temp_sata_addr_hi >> 8) + & 0xff00) | + ((temp_sata_addr_hi << 24) & + 0xff000000)); + temp_sata_addr_low = ((((temp_sata_addr_low >> 24) + & 0xff) | + ((temp_sata_addr_low << 8) + & 0xff0000) | + ((temp_sata_addr_low >> 8) + & 0xff00) | + ((temp_sata_addr_low << 24) + & 0xff000000)) + + pm8001_dev->attached_phy + + 0x10); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SAS Address of IO Failure Drive:" + "%08x%08x", temp_sata_addr_hi, + temp_sata_addr_low)); + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SAS Address of IO Failure Drive:" + "%016llx", SAS_ADDR(t->dev->sas_addr))); + } + } switch (status) { case IO_SUCCESS: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); @@ -2589,6 +2894,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) unsigned long flags; u8 deviceType = pPayload->sas_identify.dev_type; port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPCV; PM8001_MSG_DBG(pm8001_ha, pm8001_printk( "portid:%d; phyid:%d; linkrate:%d; " "portstate:%x; devicetype:%x\n", @@ -2673,6 +2979,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) port_id, phy_id, link_rate, portstate)); port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPCV; port->port_attached = 1; pm8001_get_lrate_mode(phy, link_rate); phy->phy_type |= PORT_TYPE_SATA; @@ -3103,9 +3410,27 @@ static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { - PM8001_MSG_DBG(pm8001_ha, - pm8001_printk(" pm80xx_addition_functionality\n")); + u8 page_code; + struct set_phy_profile_resp *pPayload = + (struct set_phy_profile_resp *)(piomb + 4); + u32 ppc_phyid = le32_to_cpu(pPayload->ppc_phyid); + u32 status = le32_to_cpu(pPayload->status); + page_code = (u8)((ppc_phyid & 0xFF00) >> 8); + if (status) { + /* status is FAILED */ + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("PhyProfile command failed with status " + "0x%08X \n", status)); + return -1; + } else { + if (page_code != SAS_PHY_ANALOG_SETTINGS_PAGE) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Invalid page code 0x%X\n", + page_code)); + return -1; + } + } return 0; } @@ -3484,8 +3809,6 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha, else pm8001_ha->smp_exp_mode = SMP_INDIRECT; - /* DIRECT MODE support only in spcv/ve */ - pm8001_ha->smp_exp_mode = SMP_DIRECT; tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); preq_dma_addr = (char *)phys_to_virt(tmp_addr); @@ -3501,7 +3824,7 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha, /* exclude top 4 bytes for SMP req header */ smp_cmd.long_smp_req.long_req_addr = cpu_to_le64((u64)sg_dma_address - (&task->smp_task.smp_req) - 4); + (&task->smp_task.smp_req) + 4); /* exclude 4 bytes for SMP req header and CRC */ smp_cmd.long_smp_req.long_req_size = cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8); @@ -3604,10 +3927,10 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, struct ssp_ini_io_start_req ssp_cmd; u32 tag = ccb->ccb_tag; int ret; - u64 phys_addr; + u64 phys_addr, start_addr, end_addr; + u32 end_addr_high, end_addr_low; struct inbound_queue_table *circularQ; - static u32 inb; - static u32 outb; + u32 q_index; u32 opc = OPC_INB_SSPINIIOSTART; memset(&ssp_cmd, 0, sizeof(ssp_cmd)); memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); @@ -3626,7 +3949,8 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd, task->ssp_task.cmd->cmd_len); - circularQ = &pm8001_ha->inbnd_q_tbl[0]; + q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM; + circularQ = &pm8001_ha->inbnd_q_tbl[q_index]; /* Check if encryption is set */ if (pm8001_ha->chip->encrypt && @@ -3658,6 +3982,30 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(upper_32_bits(dma_addr)); ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); ssp_cmd.enc_esgl = 0; + /* Check 4G Boundary */ + start_addr = cpu_to_le64(dma_addr); + end_addr = (start_addr + ssp_cmd.enc_len) - 1; + end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); + end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); + if (end_addr_high != ssp_cmd.enc_addr_high) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("The sg list address " + "start_addr=0x%016llx data_len=0x%x " + "end_addr_high=0x%08x end_addr_low=" + "0x%08x has crossed 4G boundary\n", + start_addr, ssp_cmd.enc_len, + end_addr_high, end_addr_low)); + pm8001_chip_make_sg(task->scatter, 1, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, + buf_prd[0]); + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.enc_esgl = cpu_to_le32(1<<31); + } } else if (task->num_scatter == 0) { ssp_cmd.enc_addr_low = 0; ssp_cmd.enc_addr_high = 0; @@ -3674,7 +4022,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, } else { PM8001_IO_DBG(pm8001_ha, pm8001_printk( "Sending Normal SAS command 0x%x inb q %x\n", - task->ssp_task.cmd->cmnd[0], inb)); + task->ssp_task.cmd->cmnd[0], q_index)); /* fill in PRD (scatter/gather) table, if any */ if (task->num_scatter > 1) { pm8001_chip_make_sg(task->scatter, ccb->n_elem, @@ -3693,6 +4041,30 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(upper_32_bits(dma_addr)); ssp_cmd.len = cpu_to_le32(task->total_xfer_len); ssp_cmd.esgl = 0; + /* Check 4G Boundary */ + start_addr = cpu_to_le64(dma_addr); + end_addr = (start_addr + ssp_cmd.len) - 1; + end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); + end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); + if (end_addr_high != ssp_cmd.addr_high) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("The sg list address " + "start_addr=0x%016llx data_len=0x%x " + "end_addr_high=0x%08x end_addr_low=" + "0x%08x has crossed 4G boundary\n", + start_addr, ssp_cmd.len, + end_addr_high, end_addr_low)); + pm8001_chip_make_sg(task->scatter, 1, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, + buf_prd[0]); + ssp_cmd.addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.esgl = cpu_to_le32(1<<31); + } } else if (task->num_scatter == 0) { ssp_cmd.addr_low = 0; ssp_cmd.addr_high = 0; @@ -3700,11 +4072,9 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.esgl = 0; } } - ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++); - - /* rotate the outb queue */ - outb = outb%PM8001_MAX_SPCV_OUTB_NUM; - + q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM; + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, + &ssp_cmd, q_index); return ret; } @@ -3716,18 +4086,19 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; u32 tag = ccb->ccb_tag; int ret; - static u32 inb; - static u32 outb; + u32 q_index; struct sata_start_req sata_cmd; u32 hdr_tag, ncg_tag = 0; - u64 phys_addr; + u64 phys_addr, start_addr, end_addr; + u32 end_addr_high, end_addr_low; u32 ATAP = 0x0; u32 dir; struct inbound_queue_table *circularQ; unsigned long flags; u32 opc = OPC_INB_SATA_HOST_OPSTART; memset(&sata_cmd, 0, sizeof(sata_cmd)); - circularQ = &pm8001_ha->inbnd_q_tbl[0]; + q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM; + circularQ = &pm8001_ha->inbnd_q_tbl[q_index]; if (task->data_dir == PCI_DMA_NONE) { ATAP = 0x04; /* no data*/ @@ -3788,6 +4159,31 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, sata_cmd.enc_addr_high = upper_32_bits(dma_addr); sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); sata_cmd.enc_esgl = 0; + /* Check 4G Boundary */ + start_addr = cpu_to_le64(dma_addr); + end_addr = (start_addr + sata_cmd.enc_len) - 1; + end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); + end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); + if (end_addr_high != sata_cmd.enc_addr_high) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("The sg list address " + "start_addr=0x%016llx data_len=0x%x " + "end_addr_high=0x%08x end_addr_low" + "=0x%08x has crossed 4G boundary\n", + start_addr, sata_cmd.enc_len, + end_addr_high, end_addr_low)); + pm8001_chip_make_sg(task->scatter, 1, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, + buf_prd[0]); + sata_cmd.enc_addr_low = + lower_32_bits(phys_addr); + sata_cmd.enc_addr_high = + upper_32_bits(phys_addr); + sata_cmd.enc_esgl = + cpu_to_le32(1 << 31); + } } else if (task->num_scatter == 0) { sata_cmd.enc_addr_low = 0; sata_cmd.enc_addr_high = 0; @@ -3808,7 +4204,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, } else { PM8001_IO_DBG(pm8001_ha, pm8001_printk( "Sending Normal SATA command 0x%x inb %x\n", - sata_cmd.sata_fis.command, inb)); + sata_cmd.sata_fis.command, q_index)); /* dad (bit 0-1) is 0 */ sata_cmd.ncqtag_atap_dir_m_dad = cpu_to_le32(((ncg_tag & 0xff)<<16) | @@ -3829,6 +4225,30 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, sata_cmd.addr_high = upper_32_bits(dma_addr); sata_cmd.len = cpu_to_le32(task->total_xfer_len); sata_cmd.esgl = 0; + /* Check 4G Boundary */ + start_addr = cpu_to_le64(dma_addr); + end_addr = (start_addr + sata_cmd.len) - 1; + end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); + end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); + if (end_addr_high != sata_cmd.addr_high) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("The sg list address " + "start_addr=0x%016llx data_len=0x%x" + "end_addr_high=0x%08x end_addr_low=" + "0x%08x has crossed 4G boundary\n", + start_addr, sata_cmd.len, + end_addr_high, end_addr_low)); + pm8001_chip_make_sg(task->scatter, 1, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, + buf_prd[0]); + sata_cmd.addr_low = + lower_32_bits(phys_addr); + sata_cmd.addr_high = + upper_32_bits(phys_addr); + sata_cmd.esgl = cpu_to_le32(1 << 31); + } } else if (task->num_scatter == 0) { sata_cmd.addr_low = 0; sata_cmd.addr_high = 0; @@ -3905,12 +4325,9 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, } } } - + q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM; ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, - &sata_cmd, outb++); - - /* rotate the outb queue */ - outb = outb%PM8001_MAX_SPCV_OUTB_NUM; + &sata_cmd, q_index); return ret; } @@ -3941,9 +4358,16 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) ** [14] 0b disable spin up hold; 1b enable spin up hold ** [15] ob no change in current PHY analig setup 1b enable using SPAST */ - payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | - LINKMODE_AUTO | LINKRATE_15 | - LINKRATE_30 | LINKRATE_60 | phy_id); + if (!IS_SPCV_12G(pm8001_ha->pdev)) + payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | + LINKRATE_30 | LINKRATE_60 | phy_id); + else + payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | + LINKRATE_30 | LINKRATE_60 | LINKRATE_120 | + phy_id); + /* SSC Disable and SAS Analog ST configuration */ /** payload.ase_sh_lm_slr_phyid = @@ -4102,6 +4526,45 @@ pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) return IRQ_HANDLED; } +void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha, + u32 operation, u32 phyid, u32 length, u32 *buf) +{ + u32 tag , i, j = 0; + int rc; + struct set_phy_profile_req payload; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SET_PHY_PROFILE; + + memset(&payload, 0, sizeof(payload)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Invalid tag\n")); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + payload.ppc_phyid = (((operation & 0xF) << 8) | (phyid & 0xFF)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk(" phy profile command for phy %x ,length is %d\n", + payload.ppc_phyid, length)); + for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) { + payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i)); + j++; + } + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); +} + +void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha, + u32 length, u8 *buf) +{ + u32 page_code, i; + + page_code = SAS_PHY_ANALOG_SETTINGS_PAGE; + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + mpi_set_phy_profile_req(pm8001_ha, + SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf); + length = length + PHY_DWORD_LENGTH; + } + PM8001_INIT_DBG(pm8001_ha, pm8001_printk("phy settings completed\n")); +} const struct pm8001_dispatch pm8001_80xx_dispatch = { .name = "pmc80xx", .chip_init = pm80xx_chip_init, diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h index 2b760ba75d7b..9970a385795d 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.h +++ b/drivers/scsi/pm8001/pm80xx_hwi.h @@ -168,6 +168,11 @@ #define LINKRATE_15 (0x01 << 8) #define LINKRATE_30 (0x02 << 8) #define LINKRATE_60 (0x06 << 8) +#define LINKRATE_120 (0x08 << 8) + +/* phy_profile */ +#define SAS_PHY_ANALOG_SETTINGS_PAGE 0x04 +#define PHY_DWORD_LENGTH 0xC /* Thermal related */ #define THERMAL_ENABLE 0x1 @@ -210,6 +215,8 @@ #define SAS_DOPNRJT_RTRY_TMO 128 #define SAS_COPNRJT_RTRY_TMO 128 +/* for phy state */ +#define PHY_STATE_LINK_UP_SPCV 0x2 /* Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second. Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128 @@ -1223,10 +1230,10 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t; /* MSGU CONFIGURATION TABLE*/ -#define SPCv_MSGU_CFG_TABLE_UPDATE 0x01 -#define SPCv_MSGU_CFG_TABLE_RESET 0x02 -#define SPCv_MSGU_CFG_TABLE_FREEZE 0x04 -#define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x08 +#define SPCv_MSGU_CFG_TABLE_UPDATE 0x001 +#define SPCv_MSGU_CFG_TABLE_RESET 0x002 +#define SPCv_MSGU_CFG_TABLE_FREEZE 0x004 +#define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x008 #define MSGU_IBDB_SET 0x00 #define MSGU_HOST_INT_STATUS 0x08 #define MSGU_HOST_INT_MASK 0x0C @@ -1520,4 +1527,6 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t; #define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 #define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 + +#define MEMBASE_II_SHIFT_REGISTER 0x1010 #endif diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index e43db7742047..be8ce54f99b2 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -1404,11 +1404,22 @@ enum { }; #define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1) +static struct genl_multicast_group pmcraid_mcgrps[] = { + { .name = "events", /* not really used - see ID discussion below */ }, +}; + static struct genl_family pmcraid_event_family = { - .id = GENL_ID_GENERATE, + /* + * Due to prior multicast group abuse (the code having assumed that + * the family ID can be used as a multicast group ID) we need to + * statically allocate a family (and thus group) ID. + */ + .id = GENL_ID_PMCRAID, .name = "pmcraid", .version = 1, - .maxattr = PMCRAID_AEN_ATTR_MAX + .maxattr = PMCRAID_AEN_ATTR_MAX, + .mcgrps = pmcraid_mcgrps, + .n_mcgrps = ARRAY_SIZE(pmcraid_mcgrps), }; /** @@ -1511,8 +1522,8 @@ static int pmcraid_notify_aen( return result; } - result = - genlmsg_multicast(skb, 0, pmcraid_event_family.id, GFP_ATOMIC); + result = genlmsg_multicast(&pmcraid_event_family, skb, + 0, 0, GFP_ATOMIC); /* If there are no listeners, genlmsg_multicast may return non-zero * value. @@ -4314,6 +4325,7 @@ static struct scsi_host_template pmcraid_host_template = { .this_id = -1, .sg_tablesize = PMCRAID_MAX_IOADLS, .max_sectors = PMCRAID_IOA_MAX_SECTORS, + .no_write_same = 1, .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = pmcraid_host_attrs, diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 2ef497ebadc0..ee5c1833eb73 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -20,7 +20,7 @@ * | Device Discovery | 0x2095 | 0x2020-0x2022, | * | | | 0x2011-0x2012, | * | | | 0x2016 | - * | Queue Command and IO tracing | 0x3058 | 0x3006-0x300b | + * | Queue Command and IO tracing | 0x3059 | 0x3006-0x300b | * | | | 0x3027-0x3028 | * | | | 0x303d-0x3041 | * | | | 0x302d,0x3033 | diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index df1b30ba938c..ff9c86b1a0d8 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1957,6 +1957,15 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) que = MSW(sts->handle); req = ha->req_q_map[que]; + /* Check for invalid queue pointer */ + if (req == NULL || + que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { + ql_dbg(ql_dbg_io, vha, 0x3059, + "Invalid status handle (0x%x): Bad req pointer. req=%p, " + "que=%u.\n", sts->handle, req, que); + return; + } + /* Validate handle. */ if (handle < req->num_outstanding_cmds) sp = req->outstanding_cmds[handle]; diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 62ee7131b204..30d20e74e48a 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -507,7 +507,7 @@ qlafx00_pci_config(scsi_qla_host_t *vha) pci_write_config_word(ha->pdev, PCI_COMMAND, w); /* PCIe -- adjust Maximum Read Request Size (2048). */ - if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) + if (pci_is_pcie(ha->pdev)) pcie_set_readrq(ha->pdev, 2048); ha->chip_revision = ha->pdev->revision; @@ -660,10 +660,8 @@ char * qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str) { struct qla_hw_data *ha = vha->hw; - int pcie_reg; - pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); - if (pcie_reg) { + if (pci_is_pcie(ha->pdev)) { strcpy(str, "PCIe iSA"); return str; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 1db4819261cd..52be35e0300c 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -494,18 +494,14 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str) static char *pci_bus_modes[] = { "33", "66", "100", "133", }; struct qla_hw_data *ha = vha->hw; uint32_t pci_bus; - int pcie_reg; - pcie_reg = pci_pcie_cap(ha->pdev); - if (pcie_reg) { + if (pci_is_pcie(ha->pdev)) { char lwstr[6]; - uint16_t pcie_lstat, lspeed, lwidth; + uint32_t lstat, lspeed, lwidth; - pcie_reg += PCI_EXP_LNKCAP; - pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat); - lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3); - lwidth = (pcie_lstat & - (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4; + pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat); + lspeed = lstat & PCI_EXP_LNKCAP_SLS; + lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4; strcpy(str, "PCIe ("); switch (lspeed) { diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index ff12d4677cc4..596480022b0a 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -10,7 +10,7 @@ * * Forward port and refactoring to modern qla2xxx and target/configfs * - * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org> + * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index a6da313e253b..7eb19be35d46 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -2,12 +2,9 @@ * This file contains tcm implementation using v4 configfs fabric infrastructure * for QLogic target mode HBAs * - * ?? Copyright 2010-2011 RisingTide Systems LLC. + * (c) Copyright 2010-2013 Datera, Inc. * - * Licensed to the Linux Foundation under the General Public License (GPL) - * version 2. - * - * Author: Nicholas A. Bellinger <nab@risingtidesystems.com> + * Author: Nicholas A. Bellinger <nab@daterainc.com> * * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from * the TCM_FC / Open-FCoE.org fabric module. @@ -333,7 +330,7 @@ static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->generate_node_acls; + return tpg->tpg_attrib.generate_node_acls; } static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) @@ -341,7 +338,7 @@ static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls; + return tpg->tpg_attrib.cache_dynamic_acls; } static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) @@ -349,7 +346,7 @@ static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect; + return tpg->tpg_attrib.demo_mode_write_protect; } static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) @@ -357,7 +354,15 @@ static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect; + return tpg->tpg_attrib.prod_mode_write_protect; +} + +static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + return tpg->tpg_attrib.demo_mode_login_only; } static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( @@ -489,38 +494,13 @@ static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess) return 0; } -/* - * The LIO target core uses DMA_TO_DEVICE to mean that data is going - * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean - * that data is coming from the target (eg handling a READ). However, - * this is just the opposite of what we have to tell the DMA mapping - * layer -- eg when handling a READ, the HBA will have to DMA the data - * out of memory so it can send it to the initiator, which means we - * need to use DMA_TO_DEVICE when we map the data. - */ -static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd) -{ - if (se_cmd->se_cmd_flags & SCF_BIDI) - return DMA_BIDIRECTIONAL; - - switch (se_cmd->data_direction) { - case DMA_TO_DEVICE: - return DMA_FROM_DEVICE; - case DMA_FROM_DEVICE: - return DMA_TO_DEVICE; - case DMA_NONE: - default: - return DMA_NONE; - } -} - static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); cmd->bufflen = se_cmd->data_length; - cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); + cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); cmd->sg_cnt = se_cmd->t_data_nents; cmd->sg = se_cmd->t_data_sg; @@ -656,7 +636,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) struct qla_tgt_cmd, se_cmd); cmd->bufflen = se_cmd->data_length; - cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); + cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); cmd->sg_cnt = se_cmd->t_data_nents; @@ -680,7 +660,7 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) cmd->sg = NULL; cmd->sg_cnt = 0; cmd->offset = 0; - cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); + cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); if (se_cmd->data_direction == DMA_FROM_DEVICE) { @@ -867,7 +847,7 @@ static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ struct tcm_qla2xxx_tpg, se_tpg); \ \ - return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \ + return sprintf(page, "%u\n", tpg->tpg_attrib.name); \ } \ \ static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \ @@ -939,11 +919,19 @@ DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect); DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR); +/* + * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_login_only + */ +DEF_QLA_TPG_ATTR_BOOL(demo_mode_login_only); +DEF_QLA_TPG_ATTRIB(demo_mode_login_only); +QLA_TPG_ATTR(demo_mode_login_only, S_IRUGO | S_IWUSR); + static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr, &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr, &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr, &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr, + &tcm_qla2xxx_tpg_attrib_demo_mode_login_only.attr, NULL, }; @@ -1039,9 +1027,10 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg( * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic * NodeACLs */ - QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1; - QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1; - QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1; + tpg->tpg_attrib.generate_node_acls = 1; + tpg->tpg_attrib.demo_mode_write_protect = 1; + tpg->tpg_attrib.cache_dynamic_acls = 1; + tpg->tpg_attrib.demo_mode_login_only = 1; ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn, &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); @@ -1736,7 +1725,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { tcm_qla2xxx_check_demo_write_protect, .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_prod_write_protect, - .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true, + .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, @@ -1784,7 +1773,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true, .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true, .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false, - .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true, + .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, @@ -1841,16 +1830,16 @@ static int tcm_qla2xxx_register_configfs(void) /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = tcm_qla2xxx_tpg_attrib_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; /* * Register the fabric for use within TCM */ @@ -1881,15 +1870,15 @@ static int tcm_qla2xxx_register_configfs(void) /* * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl */ - TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; + npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; /* * Register the npiv_fabric for use within TCM */ diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index 9ba075fe9781..771f7b816443 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -29,6 +29,7 @@ struct tcm_qla2xxx_tpg_attrib { int cache_dynamic_acls; int demo_mode_write_protect; int prod_mode_write_protect; + int demo_mode_login_only; }; struct tcm_qla2xxx_tpg { @@ -44,8 +45,6 @@ struct tcm_qla2xxx_tpg { struct se_portal_group se_tpg; }; -#define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib) - struct tcm_qla2xxx_fc_loopid { struct se_node_acl *se_nacl; }; diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index 41327d46ecf5..084d1fd59c9e 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -306,6 +306,7 @@ struct ddb_entry { struct qla_ddb_index { struct list_head list; uint16_t fw_ddb_idx; + uint16_t flash_ddb_idx; struct dev_db_entry fw_ddb; uint8_t flash_isid[6]; }; diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index 51d1a70f8b45..1243e5942b76 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h @@ -539,6 +539,10 @@ struct qla_flt_region { #define ENABLE_INTERNAL_LOOPBACK 0x04 #define ENABLE_EXTERNAL_LOOPBACK 0x08 +/* generic defines to enable/disable params */ +#define QL4_PARAM_DISABLE 0 +#define QL4_PARAM_ENABLE 1 + /*************************************************************************/ /* Host Adapter Initialization Control Block (from host) */ diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index e6f2a2669dbd..5cef2527180a 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h @@ -83,6 +83,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts); int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username, char *password, int bidi, uint16_t *chap_index); +int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password, + uint16_t idx, int bidi); void qla4xxx_queue_iocb(struct scsi_qla_host *ha); void qla4xxx_complete_iocb(struct scsi_qla_host *ha); diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h index 8503ad643bdd..655b7bb644d9 100644 --- a/drivers/scsi/qla4xxx/ql4_inline.h +++ b/drivers/scsi/qla4xxx/ql4_inline.h @@ -82,3 +82,15 @@ qla4xxx_disable_intrs(struct scsi_qla_host *ha) __qla4xxx_disable_intrs(ha); spin_unlock_irqrestore(&ha->hardware_lock, flags); } + +static inline int qla4xxx_get_chap_type(struct ql4_chap_table *chap_entry) +{ + int type; + + if (chap_entry->flags & BIT_7) + type = LOCAL_CHAP; + else + type = BIDI_CHAP; + + return type; +} diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 62d4208af21f..22cbd005bdf4 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -1530,13 +1530,26 @@ exit_get_chap: return ret; } -static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, - char *password, uint16_t idx, int bidi) +/** + * qla4xxx_set_chap - Make a chap entry at the given index + * @ha: pointer to adapter structure + * @username: CHAP username to set + * @password: CHAP password to set + * @idx: CHAP index at which to make the entry + * @bidi: type of chap entry (chap_in or chap_out) + * + * Create chap entry at the given index with the information provided. + * + * Note: Caller should acquire the chap lock before getting here. + **/ +int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password, + uint16_t idx, int bidi) { int ret = 0; int rval = QLA_ERROR; uint32_t offset = 0; struct ql4_chap_table *chap_table; + uint32_t chap_size = 0; dma_addr_t chap_dma; chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); @@ -1554,7 +1567,20 @@ static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN); strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN); chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE); - offset = FLASH_CHAP_OFFSET | (idx * sizeof(struct ql4_chap_table)); + + if (is_qla40XX(ha)) { + chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table); + offset = FLASH_CHAP_OFFSET; + } else { /* Single region contains CHAP info for both ports which is + * divided into half for each port. + */ + chap_size = ha->hw.flt_chap_size / 2; + offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); + if (ha->port_num == 1) + offset += chap_size; + } + + offset += (idx * sizeof(struct ql4_chap_table)); rval = qla4xxx_set_flash(ha, chap_dma, offset, sizeof(struct ql4_chap_table), FLASH_OPT_RMW_COMMIT); @@ -1611,7 +1637,7 @@ int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username, goto exit_unlock_uni_chap; } - if (!(chap_table->flags & BIT_6)) { + if (!(chap_table->flags & BIT_7)) { ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n"); rval = QLA_ERROR; goto exit_unlock_uni_chap; diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 1be6cefc390b..a28d5e624aab 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -149,6 +149,8 @@ static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, uint32_t *num_entries, char *buf); static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx); +static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, + int len); /* * SCSI host template entry points @@ -252,6 +254,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = { .send_ping = qla4xxx_send_ping, .get_chap = qla4xxx_get_chap_list, .delete_chap = qla4xxx_delete_chap, + .set_chap = qla4xxx_set_chap_entry, .get_flashnode_param = qla4xxx_sysfs_ddb_get_param, .set_flashnode_param = qla4xxx_sysfs_ddb_set_param, .new_flashnode = qla4xxx_sysfs_ddb_add, @@ -508,6 +511,95 @@ static umode_t qla4_attr_is_visible(int param_type, int param) return 0; } +static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha, + int16_t chap_index, + struct ql4_chap_table **chap_entry) +{ + int rval = QLA_ERROR; + int max_chap_entries; + + if (!ha->chap_list) { + ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); + rval = QLA_ERROR; + goto exit_get_chap; + } + + if (is_qla80XX(ha)) + max_chap_entries = (ha->hw.flt_chap_size / 2) / + sizeof(struct ql4_chap_table); + else + max_chap_entries = MAX_CHAP_ENTRIES_40XX; + + if (chap_index > max_chap_entries) { + ql4_printk(KERN_ERR, ha, "Invalid Chap index\n"); + rval = QLA_ERROR; + goto exit_get_chap; + } + + *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index; + if ((*chap_entry)->cookie != + __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { + rval = QLA_ERROR; + *chap_entry = NULL; + } else { + rval = QLA_SUCCESS; + } + +exit_get_chap: + return rval; +} + +/** + * qla4xxx_find_free_chap_index - Find the first free chap index + * @ha: pointer to adapter structure + * @chap_index: CHAP index to be returned + * + * Find the first free chap index available in the chap table + * + * Note: Caller should acquire the chap lock before getting here. + **/ +static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha, + uint16_t *chap_index) +{ + int i, rval; + int free_index = -1; + int max_chap_entries = 0; + struct ql4_chap_table *chap_table; + + if (is_qla80XX(ha)) + max_chap_entries = (ha->hw.flt_chap_size / 2) / + sizeof(struct ql4_chap_table); + else + max_chap_entries = MAX_CHAP_ENTRIES_40XX; + + if (!ha->chap_list) { + ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); + rval = QLA_ERROR; + goto exit_find_chap; + } + + for (i = 0; i < max_chap_entries; i++) { + chap_table = (struct ql4_chap_table *)ha->chap_list + i; + + if ((chap_table->cookie != + __constant_cpu_to_le16(CHAP_VALID_COOKIE)) && + (i > MAX_RESRV_CHAP_IDX)) { + free_index = i; + break; + } + } + + if (free_index != -1) { + *chap_index = free_index; + rval = QLA_SUCCESS; + } else { + rval = QLA_ERROR; + } + +exit_find_chap: + return rval; +} + static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, uint32_t *num_entries, char *buf) { @@ -691,6 +783,111 @@ exit_delete_chap: return ret; } +/** + * qla4xxx_set_chap_entry - Make chap entry with given information + * @shost: pointer to host + * @data: chap info - credentials, index and type to make chap entry + * @len: length of data + * + * Add or update chap entry with the given information + **/ +static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len) +{ + struct scsi_qla_host *ha = to_qla_host(shost); + struct iscsi_chap_rec chap_rec; + struct ql4_chap_table *chap_entry = NULL; + struct iscsi_param_info *param_info; + struct nlattr *attr; + int max_chap_entries = 0; + int type; + int rem = len; + int rc = 0; + + memset(&chap_rec, 0, sizeof(chap_rec)); + + nla_for_each_attr(attr, data, len, rem) { + param_info = nla_data(attr); + + switch (param_info->param) { + case ISCSI_CHAP_PARAM_INDEX: + chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value; + break; + case ISCSI_CHAP_PARAM_CHAP_TYPE: + chap_rec.chap_type = param_info->value[0]; + break; + case ISCSI_CHAP_PARAM_USERNAME: + memcpy(chap_rec.username, param_info->value, + param_info->len); + break; + case ISCSI_CHAP_PARAM_PASSWORD: + memcpy(chap_rec.password, param_info->value, + param_info->len); + break; + case ISCSI_CHAP_PARAM_PASSWORD_LEN: + chap_rec.password_length = param_info->value[0]; + break; + default: + ql4_printk(KERN_ERR, ha, + "%s: No such sysfs attribute\n", __func__); + rc = -ENOSYS; + goto exit_set_chap; + }; + } + + if (chap_rec.chap_type == CHAP_TYPE_IN) + type = BIDI_CHAP; + else + type = LOCAL_CHAP; + + if (is_qla80XX(ha)) + max_chap_entries = (ha->hw.flt_chap_size / 2) / + sizeof(struct ql4_chap_table); + else + max_chap_entries = MAX_CHAP_ENTRIES_40XX; + + mutex_lock(&ha->chap_sem); + if (chap_rec.chap_tbl_idx < max_chap_entries) { + rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx, + &chap_entry); + if (!rc) { + if (!(type == qla4xxx_get_chap_type(chap_entry))) { + ql4_printk(KERN_INFO, ha, + "Type mismatch for CHAP entry %d\n", + chap_rec.chap_tbl_idx); + rc = -EINVAL; + goto exit_unlock_chap; + } + + /* If chap index is in use then don't modify it */ + rc = qla4xxx_is_chap_active(shost, + chap_rec.chap_tbl_idx); + if (rc) { + ql4_printk(KERN_INFO, ha, + "CHAP entry %d is in use\n", + chap_rec.chap_tbl_idx); + rc = -EBUSY; + goto exit_unlock_chap; + } + } + } else { + rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx); + if (rc) { + ql4_printk(KERN_INFO, ha, "CHAP entry not available\n"); + rc = -EBUSY; + goto exit_unlock_chap; + } + } + + rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password, + chap_rec.chap_tbl_idx, type); + +exit_unlock_chap: + mutex_unlock(&ha->chap_sem); + +exit_set_chap: + return rc; +} + static int qla4xxx_get_iface_param(struct iscsi_iface *iface, enum iscsi_param_type param_type, int param, char *buf) @@ -1455,9 +1652,12 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, struct iscsi_session *sess = cls_sess->dd_data; struct ddb_entry *ddb_entry = sess->dd_data; struct scsi_qla_host *ha = ddb_entry->ha; + struct iscsi_cls_conn *cls_conn = ddb_entry->conn; + struct ql4_chap_table chap_tbl; int rval, len; uint16_t idx; + memset(&chap_tbl, 0, sizeof(chap_tbl)); switch (param) { case ISCSI_PARAM_CHAP_IN_IDX: rval = qla4xxx_get_chap_index(ha, sess->username_in, @@ -1469,14 +1669,46 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, len = sprintf(buf, "%hu\n", idx); break; case ISCSI_PARAM_CHAP_OUT_IDX: - rval = qla4xxx_get_chap_index(ha, sess->username, - sess->password, LOCAL_CHAP, - &idx); + if (ddb_entry->ddb_type == FLASH_DDB) { + if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { + idx = ddb_entry->chap_tbl_idx; + rval = QLA_SUCCESS; + } else { + rval = QLA_ERROR; + } + } else { + rval = qla4xxx_get_chap_index(ha, sess->username, + sess->password, + LOCAL_CHAP, &idx); + } if (rval) len = sprintf(buf, "\n"); else len = sprintf(buf, "%hu\n", idx); break; + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + /* First, populate session username and password for FLASH DDB, + * if not already done. This happens when session login fails + * for a FLASH DDB. + */ + if (ddb_entry->ddb_type == FLASH_DDB && + ddb_entry->chap_tbl_idx != INVALID_ENTRY && + !sess->username && !sess->password) { + idx = ddb_entry->chap_tbl_idx; + rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, + chap_tbl.secret, + idx); + if (!rval) { + iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, + (char *)chap_tbl.name, + strlen((char *)chap_tbl.name)); + iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, + (char *)chap_tbl.secret, + chap_tbl.secret_len); + } + } + /* allow fall-through */ default: return iscsi_session_get_param(cls_sess, param, buf); } @@ -2373,11 +2605,6 @@ static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn, COPY_ISID(sess->isid, fw_ddb_entry->isid); ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); - if (ddb_link < MAX_DDB_ENTRIES) - sess->discovery_parent_idx = ddb_link; - else - sess->discovery_parent_idx = DDB_NO_LINK; - if (ddb_link == DDB_ISNS) disc_parent = ISCSI_DISC_PARENT_ISNS; else if (ddb_link == DDB_NO_LINK) @@ -2402,6 +2629,7 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, int buflen = 0; struct iscsi_session *sess; struct ddb_entry *ddb_entry; + struct ql4_chap_table chap_tbl; struct iscsi_conn *conn; char ip_addr[DDB_IPADDR_LEN]; uint16_t options = 0; @@ -2409,6 +2637,7 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, sess = cls_sess->dd_data; ddb_entry = sess->dd_data; conn = cls_conn->dd_data; + memset(&chap_tbl, 0, sizeof(chap_tbl)); ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); @@ -2435,6 +2664,19 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, (char *)fw_ddb_entry->iscsi_name, buflen); iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME, (char *)ha->name_string, buflen); + + if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { + if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, + chap_tbl.secret, + ddb_entry->chap_tbl_idx)) { + iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, + (char *)chap_tbl.name, + strlen((char *)chap_tbl.name)); + iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, + (char *)chap_tbl.secret, + chap_tbl.secret_len); + } + } } void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, @@ -4937,7 +5179,8 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, } static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, - struct dev_db_entry *fw_ddb_entry) + struct dev_db_entry *fw_ddb_entry, + uint32_t *index) { struct ddb_entry *ddb_entry; struct ql4_tuple_ddb *fw_tddb = NULL; @@ -4971,6 +5214,8 @@ static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, qla4xxx_get_param_ddb(ddb_entry, tmp_tddb); if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) { ret = QLA_SUCCESS; /* found */ + if (index != NULL) + *index = idx; goto exit_check; } } @@ -5206,6 +5451,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, ddb_entry->ha = ha; ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb; ddb_entry->ddb_change = qla4xxx_flash_ddb_change; + ddb_entry->chap_tbl_idx = INVALID_ENTRY; atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); atomic_set(&ddb_entry->relogin_timer, 0); @@ -5267,6 +5513,87 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha) } while (time_after(wtime, jiffies)); } +static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry, + struct dev_db_entry *flash_ddb_entry) +{ + uint16_t options = 0; + size_t ip_len = IP_ADDR_LEN; + + options = le16_to_cpu(fw_ddb_entry->options); + if (options & DDB_OPT_IPV6_DEVICE) + ip_len = IPv6_ADDR_LEN; + + if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len)) + return QLA_ERROR; + + if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0], + sizeof(fw_ddb_entry->isid))) + return QLA_ERROR; + + if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port, + sizeof(fw_ddb_entry->port))) + return QLA_ERROR; + + return QLA_SUCCESS; +} + +static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + uint32_t fw_idx, uint32_t *flash_index) +{ + struct dev_db_entry *flash_ddb_entry; + dma_addr_t flash_ddb_entry_dma; + uint32_t idx = 0; + int max_ddbs; + int ret = QLA_ERROR, status; + + max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : + MAX_DEV_DB_ENTRIES; + + flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, + &flash_ddb_entry_dma); + if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) { + ql4_printk(KERN_ERR, ha, "Out of memory\n"); + goto exit_find_st_idx; + } + + status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, + flash_ddb_entry_dma, fw_idx); + if (status == QLA_SUCCESS) { + status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); + if (status == QLA_SUCCESS) { + *flash_index = fw_idx; + ret = QLA_SUCCESS; + goto exit_find_st_idx; + } + } + + for (idx = 0; idx < max_ddbs; idx++) { + status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, + flash_ddb_entry_dma, idx); + if (status == QLA_ERROR) + continue; + + status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); + if (status == QLA_SUCCESS) { + *flash_index = idx; + ret = QLA_SUCCESS; + goto exit_find_st_idx; + } + } + + if (idx == max_ddbs) + ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n", + fw_idx); + +exit_find_st_idx: + if (flash_ddb_entry) + dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry, + flash_ddb_entry_dma); + + return ret; +} + static void qla4xxx_build_st_list(struct scsi_qla_host *ha, struct list_head *list_st) { @@ -5278,6 +5605,7 @@ static void qla4xxx_build_st_list(struct scsi_qla_host *ha, int ret; uint32_t idx = 0, next_idx = 0; uint32_t state = 0, conn_err = 0; + uint32_t flash_index = -1; uint16_t conn_id = 0; fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, @@ -5310,6 +5638,19 @@ static void qla4xxx_build_st_list(struct scsi_qla_host *ha, if (!st_ddb_idx) break; + ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx, + &flash_index); + if (ret == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, + "No flash entry for ST at idx [%d]\n", idx); + st_ddb_idx->flash_ddb_idx = idx; + } else { + ql4_printk(KERN_INFO, ha, + "ST at idx [%d] is stored at flash [%d]\n", + idx, flash_index); + st_ddb_idx->flash_ddb_idx = flash_index; + } + st_ddb_idx->fw_ddb_idx = idx; list_add_tail(&st_ddb_idx->list, list_st); @@ -5354,6 +5695,28 @@ static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha, } } +static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry, + struct dev_db_entry *fw_ddb_entry) +{ + struct iscsi_cls_session *cls_sess; + struct iscsi_session *sess; + uint32_t max_ddbs = 0; + uint16_t ddb_link = -1; + + max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : + MAX_DEV_DB_ENTRIES; + + cls_sess = ddb_entry->sess; + sess = cls_sess->dd_data; + + ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); + if (ddb_link < max_ddbs) + sess->discovery_parent_idx = ddb_link; + else + sess->discovery_parent_idx = DDB_NO_LINK; +} + static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry, int is_reset, uint16_t idx) @@ -5418,6 +5781,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, /* Update sess/conn params */ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); + qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry); if (is_reset == RESET_ADAPTER) { iscsi_block_session(cls_sess); @@ -5434,17 +5798,43 @@ exit_setup: return ret; } +static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha, + struct list_head *list_ddb, + struct dev_db_entry *fw_ddb_entry) +{ + struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; + uint16_t ddb_link; + + ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); + + list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { + if (ddb_idx->fw_ddb_idx == ddb_link) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "Updating NT parent idx from [%d] to [%d]\n", + ddb_link, ddb_idx->flash_ddb_idx)); + fw_ddb_entry->ddb_link = + cpu_to_le16(ddb_idx->flash_ddb_idx); + return; + } + } +} + static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, - struct list_head *list_nt, int is_reset) + struct list_head *list_nt, + struct list_head *list_st, + int is_reset) { struct dev_db_entry *fw_ddb_entry; + struct ddb_entry *ddb_entry = NULL; dma_addr_t fw_ddb_dma; int max_ddbs; int fw_idx_size; int ret; uint32_t idx = 0, next_idx = 0; uint32_t state = 0, conn_err = 0; + uint32_t ddb_idx = -1; uint16_t conn_id = 0; + uint16_t ddb_link = -1; struct qla_ddb_index *nt_ddb_idx; fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, @@ -5471,12 +5861,18 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) goto continue_next_nt; + ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); + if (ddb_link < max_ddbs) + qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry); + if (!(state == DDB_DS_NO_CONNECTION_ACTIVE || - state == DDB_DS_SESSION_FAILED)) + state == DDB_DS_SESSION_FAILED) && + (is_reset == INIT_ADAPTER)) goto continue_next_nt; DEBUG2(ql4_printk(KERN_INFO, ha, "Adding DDB to session = 0x%x\n", idx)); + if (is_reset == INIT_ADAPTER) { nt_ddb_idx = vmalloc(fw_idx_size); if (!nt_ddb_idx) @@ -5506,9 +5902,17 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, list_add_tail(&nt_ddb_idx->list, list_nt); } else if (is_reset == RESET_ADAPTER) { - if (qla4xxx_is_session_exists(ha, fw_ddb_entry) == - QLA_SUCCESS) + ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, + &ddb_idx); + if (ret == QLA_SUCCESS) { + ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, + ddb_idx); + if (ddb_entry != NULL) + qla4xxx_update_sess_disc_idx(ha, + ddb_entry, + fw_ddb_entry); goto continue_next_nt; + } } ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx); @@ -5526,7 +5930,8 @@ exit_nt_list: } static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha, - struct list_head *list_nt) + struct list_head *list_nt, + uint16_t target_id) { struct dev_db_entry *fw_ddb_entry; dma_addr_t fw_ddb_dma; @@ -5571,13 +5976,16 @@ static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha, nt_ddb_idx->fw_ddb_idx = idx; - ret = qla4xxx_is_session_exists(ha, fw_ddb_entry); + ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); if (ret == QLA_SUCCESS) { /* free nt_ddb_idx and do not add to list_nt */ vfree(nt_ddb_idx); goto continue_next_new_nt; } + if (target_id < max_ddbs) + fw_ddb_entry->ddb_link = cpu_to_le16(target_id); + list_add_tail(&nt_ddb_idx->list, list_nt); ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, @@ -5894,7 +6302,8 @@ exit_ddb_conn_open: } static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha, - struct dev_db_entry *fw_ddb_entry) + struct dev_db_entry *fw_ddb_entry, + uint16_t target_id) { struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; struct list_head list_nt; @@ -5919,7 +6328,7 @@ static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha, if (ret == QLA_ERROR) goto exit_login_st; - qla4xxx_build_new_nt_list(ha, &list_nt); + qla4xxx_build_new_nt_list(ha, &list_nt, target_id); list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) { list_del_init(&ddb_idx->list); @@ -5946,7 +6355,7 @@ static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha, { int ret = QLA_ERROR; - ret = qla4xxx_is_session_exists(ha, fw_ddb_entry); + ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); if (ret != QLA_SUCCESS) ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, idx); @@ -6001,7 +6410,8 @@ static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, fw_ddb_entry->cookie = DDB_VALID_COOKIE; if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) - ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry); + ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry, + fnode_sess->target_id); else ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry, fnode_sess->target_id); @@ -6522,10 +6932,13 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); struct scsi_qla_host *ha = to_qla_host(shost); struct iscsi_flashnode_param_info *fnode_param; + struct ql4_chap_table chap_tbl; struct nlattr *attr; + uint16_t chap_out_idx = INVALID_ENTRY; int rc = QLA_ERROR; uint32_t rem = len; + memset((void *)&chap_tbl, 0, sizeof(chap_tbl)); nla_for_each_attr(attr, data, len, rem) { fnode_param = nla_data(attr); @@ -6567,6 +6980,10 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, break; case ISCSI_FLASHNODE_CHAP_AUTH_EN: fnode_sess->chap_auth_en = fnode_param->value[0]; + /* Invalidate chap index if chap auth is disabled */ + if (!fnode_sess->chap_auth_en) + fnode_sess->chap_out_idx = INVALID_ENTRY; + break; case ISCSI_FLASHNODE_SNACK_REQ_EN: fnode_conn->snack_req_en = fnode_param->value[0]; @@ -6705,6 +7122,17 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, fnode_conn->exp_statsn = *(uint32_t *)fnode_param->value; break; + case ISCSI_FLASHNODE_CHAP_OUT_IDX: + chap_out_idx = *(uint16_t *)fnode_param->value; + if (!qla4xxx_get_uni_chap_at_index(ha, + chap_tbl.name, + chap_tbl.secret, + chap_out_idx)) { + fnode_sess->chap_out_idx = chap_out_idx; + /* Enable chap auth if chap index is valid */ + fnode_sess->chap_auth_en = QL4_PARAM_ENABLE; + } + break; default: ql4_printk(KERN_ERR, ha, "%s: No such sysfs attribute\n", __func__); @@ -6926,11 +7354,10 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) schedule_timeout_uninterruptible(HZ / 10); } while (time_after(wtime, jiffies)); - /* Free up the sendtargets list */ - qla4xxx_free_ddb_list(&list_st); - qla4xxx_build_nt_list(ha, &list_nt, is_reset); + qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset); + qla4xxx_free_ddb_list(&list_st); qla4xxx_free_ddb_list(&list_nt); qla4xxx_free_ddb_index(ha); diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index eaa808e6ba91..fe0bcb18fb26 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -78,11 +78,6 @@ static void scsi_done(struct scsi_cmnd *cmd); * Definitions and constants. */ -#define MIN_RESET_DELAY (2*HZ) - -/* Do not call reset on error if we just did a reset within 15 sec. */ -#define MIN_RESET_PERIOD (15*HZ) - /* * Note - the initial logging level can be set here to log events at boot time. * After the system is up, you may enable logging via the /proc interface. @@ -658,7 +653,6 @@ EXPORT_SYMBOL(scsi_cmd_get_serial); int scsi_dispatch_cmd(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; - unsigned long timeout; int rtn = 0; atomic_inc(&cmd->device->iorequest_cnt); @@ -704,28 +698,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) (cmd->device->lun << 5 & 0xe0); } - /* - * We will wait MIN_RESET_DELAY clock ticks after the last reset so - * we can avoid the drive not being ready. - */ - timeout = host->last_reset + MIN_RESET_DELAY; - - if (host->resetting && time_before(jiffies, timeout)) { - int ticks_remaining = timeout - jiffies; - /* - * NOTE: This may be executed from within an interrupt - * handler! This is bad, but for now, it'll do. The irq - * level of the interrupt handler has been masked out by the - * platform dependent interrupt handling code already, so the - * sti() here will not cause another call to the SCSI host's - * interrupt handler (assuming there is one irq-level per - * host). - */ - while (--ticks_remaining >= 0) - mdelay(1 + 999 / HZ); - host->resetting = 0; - } - scsi_log_send(cmd); /* diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 01c0ffa31276..80b8b10edf41 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -169,7 +169,7 @@ static int scsi_debug_dix = DEF_DIX; static int scsi_debug_dsense = DEF_D_SENSE; static int scsi_debug_every_nth = DEF_EVERY_NTH; static int scsi_debug_fake_rw = DEF_FAKE_RW; -static int scsi_debug_guard = DEF_GUARD; +static unsigned int scsi_debug_guard = DEF_GUARD; static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; static int scsi_debug_max_luns = DEF_MAX_LUNS; static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE; @@ -293,6 +293,20 @@ static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, 0, 0, 0x0, 0x0}; +static void *fake_store(unsigned long long lba) +{ + lba = do_div(lba, sdebug_store_sectors); + + return fake_storep + lba * scsi_debug_sector_size; +} + +static struct sd_dif_tuple *dif_store(sector_t sector) +{ + sector = do_div(sector, sdebug_store_sectors); + + return dif_storep + sector; +} + static int sdebug_add_adapter(void); static void sdebug_remove_adapter(void); @@ -1731,25 +1745,22 @@ static int do_device_access(struct scsi_cmnd *scmd, return ret; } -static u16 dif_compute_csum(const void *buf, int len) +static __be16 dif_compute_csum(const void *buf, int len) { - u16 csum; + __be16 csum; - switch (scsi_debug_guard) { - case 1: - csum = ip_compute_csum(buf, len); - break; - case 0: + if (scsi_debug_guard) + csum = (__force __be16)ip_compute_csum(buf, len); + else csum = cpu_to_be16(crc_t10dif(buf, len)); - break; - } + return csum; } static int dif_verify(struct sd_dif_tuple *sdt, const void *data, sector_t sector, u32 ei_lba) { - u16 csum = dif_compute_csum(data, scsi_debug_sector_size); + __be16 csum = dif_compute_csum(data, scsi_debug_sector_size); if (sdt->guard_tag != csum) { pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n", @@ -1775,59 +1786,71 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data, return 0; } -static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, - unsigned int sectors, u32 ei_lba) +static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector, + unsigned int sectors, bool read) { unsigned int i, resid; struct scatterlist *psgl; - struct sd_dif_tuple *sdt; - sector_t sector; - sector_t tmp_sec = start_sec; void *paddr; + const void *dif_store_end = dif_storep + sdebug_store_sectors; - start_sec = do_div(tmp_sec, sdebug_store_sectors); + /* Bytes of protection data to copy into sgl */ + resid = sectors * sizeof(*dif_storep); - sdt = dif_storep + start_sec; + scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) { + int len = min(psgl->length, resid); + void *start = dif_store(sector); + int rest = 0; - for (i = 0 ; i < sectors ; i++) { - int ret; + if (dif_store_end < start + len) + rest = start + len - dif_store_end; - if (sdt[i].app_tag == 0xffff) - continue; + paddr = kmap_atomic(sg_page(psgl)) + psgl->offset; - sector = start_sec + i; + if (read) + memcpy(paddr, start, len - rest); + else + memcpy(start, paddr, len - rest); - ret = dif_verify(&sdt[i], - fake_storep + sector * scsi_debug_sector_size, - sector, ei_lba); - if (ret) { - dif_errors++; - return ret; + if (rest) { + if (read) + memcpy(paddr + len - rest, dif_storep, rest); + else + memcpy(dif_storep, paddr + len - rest, rest); } - ei_lba++; + sector += len / sizeof(*dif_storep); + resid -= len; + kunmap_atomic(paddr); } +} - /* Bytes of protection data to copy into sgl */ - resid = sectors * sizeof(*dif_storep); - sector = start_sec; +static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, + unsigned int sectors, u32 ei_lba) +{ + unsigned int i; + struct sd_dif_tuple *sdt; + sector_t sector; - scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) { - int len = min(psgl->length, resid); + for (i = 0; i < sectors; i++) { + int ret; - paddr = kmap_atomic(sg_page(psgl)) + psgl->offset; - memcpy(paddr, dif_storep + sector, len); + sector = start_sec + i; + sdt = dif_store(sector); - sector += len / sizeof(*dif_storep); - if (sector >= sdebug_store_sectors) { - /* Force wrap */ - tmp_sec = sector; - sector = do_div(tmp_sec, sdebug_store_sectors); + if (sdt->app_tag == cpu_to_be16(0xffff)) + continue; + + ret = dif_verify(sdt, fake_store(sector), sector, ei_lba); + if (ret) { + dif_errors++; + return ret; } - resid -= len; - kunmap_atomic(paddr); + + ei_lba++; } + dif_copy_prot(SCpnt, start_sec, sectors, true); dix_reads++; return 0; @@ -1910,15 +1933,12 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec, { int i, j, ret; struct sd_dif_tuple *sdt; - struct scatterlist *dsgl = scsi_sglist(SCpnt); + struct scatterlist *dsgl; struct scatterlist *psgl = scsi_prot_sglist(SCpnt); void *daddr, *paddr; - sector_t tmp_sec = start_sec; - sector_t sector; + sector_t sector = start_sec; int ppage_offset; - sector = do_div(tmp_sec, sdebug_store_sectors); - BUG_ON(scsi_sg_count(SCpnt) == 0); BUG_ON(scsi_prot_sg_count(SCpnt) == 0); @@ -1946,25 +1966,13 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec, sdt = paddr + ppage_offset; - ret = dif_verify(sdt, daddr + j, start_sec, ei_lba); + ret = dif_verify(sdt, daddr + j, sector, ei_lba); if (ret) { dump_sector(daddr + j, scsi_debug_sector_size); goto out; } - /* Would be great to copy this in bigger - * chunks. However, for the sake of - * correctness we need to verify each sector - * before writing it to "stable" storage - */ - memcpy(dif_storep + sector, sdt, sizeof(*sdt)); - sector++; - - if (sector == sdebug_store_sectors) - sector = 0; /* Force wrap */ - - start_sec++; ei_lba++; ppage_offset += sizeof(struct sd_dif_tuple); } @@ -1973,6 +1981,7 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec, kunmap_atomic(daddr); } + dif_copy_prot(SCpnt, start_sec, sectors, false); dix_writes++; return 0; @@ -2742,7 +2751,7 @@ module_param_named(dix, scsi_debug_dix, int, S_IRUGO); module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR); module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR); module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR); -module_param_named(guard, scsi_debug_guard, int, S_IRUGO); +module_param_named(guard, scsi_debug_guard, uint, S_IRUGO); module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO); module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO); module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO); @@ -3172,7 +3181,7 @@ DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL); static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard); + return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard); } DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL); diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 83e591b60193..e8bee9f0ad0f 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -87,6 +87,18 @@ void scsi_schedule_eh(struct Scsi_Host *shost) } EXPORT_SYMBOL_GPL(scsi_schedule_eh); +static int scsi_host_eh_past_deadline(struct Scsi_Host *shost) +{ + if (!shost->last_reset || !shost->eh_deadline) + return 0; + + if (time_before(jiffies, + shost->last_reset + shost->eh_deadline)) + return 0; + + return 1; +} + /** * scsi_eh_scmd_add - add scsi cmd to error handling. * @scmd: scmd to run eh on. @@ -109,6 +121,9 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag) if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) goto out_unlock; + if (shost->eh_deadline && !shost->last_reset) + shost->last_reset = jiffies; + ret = 1; scmd->eh_eflags |= eh_flag; list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); @@ -138,6 +153,9 @@ enum blk_eh_timer_return scsi_times_out(struct request *req) trace_scsi_dispatch_cmd_timeout(scmd); scsi_log_completion(scmd, TIMEOUT_ERROR); + if (host->eh_deadline && !host->last_reset) + host->last_reset = jiffies; + if (host->transportt->eh_timed_out) rtn = host->transportt->eh_timed_out(scmd); else if (host->hostt->eh_timed_out) @@ -990,13 +1008,26 @@ int scsi_eh_get_sense(struct list_head *work_q, struct list_head *done_q) { struct scsi_cmnd *scmd, *next; + struct Scsi_Host *shost; int rtn; + unsigned long flags; list_for_each_entry_safe(scmd, next, work_q, eh_entry) { if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) || SCSI_SENSE_VALID(scmd)) continue; + shost = scmd->device->host; + spin_lock_irqsave(shost->host_lock, flags); + if (scsi_host_eh_past_deadline(shost)) { + spin_unlock_irqrestore(shost->host_lock, flags); + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "skip %s, past eh deadline\n", + __func__)); + break; + } + spin_unlock_irqrestore(shost->host_lock, flags); SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd, "%s: requesting sense\n", current->comm)); @@ -1082,11 +1113,28 @@ static int scsi_eh_test_devices(struct list_head *cmd_list, struct scsi_cmnd *scmd, *next; struct scsi_device *sdev; int finish_cmds; + unsigned long flags; while (!list_empty(cmd_list)) { scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry); sdev = scmd->device; + if (!try_stu) { + spin_lock_irqsave(sdev->host->host_lock, flags); + if (scsi_host_eh_past_deadline(sdev->host)) { + /* Push items back onto work_q */ + list_splice_init(cmd_list, work_q); + spin_unlock_irqrestore(sdev->host->host_lock, + flags); + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, sdev->host, + "skip %s, past eh deadline", + __func__)); + break; + } + spin_unlock_irqrestore(sdev->host->host_lock, flags); + } + finish_cmds = !scsi_device_online(scmd->device) || (try_stu && !scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) || @@ -1122,26 +1170,42 @@ static int scsi_eh_abort_cmds(struct list_head *work_q, struct scsi_cmnd *scmd, *next; LIST_HEAD(check_list); int rtn; + struct Scsi_Host *shost; + unsigned long flags; list_for_each_entry_safe(scmd, next, work_q, eh_entry) { if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD)) continue; + shost = scmd->device->host; + spin_lock_irqsave(shost->host_lock, flags); + if (scsi_host_eh_past_deadline(shost)) { + spin_unlock_irqrestore(shost->host_lock, flags); + list_splice_init(&check_list, work_q); + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "skip %s, past eh deadline\n", + __func__)); + return list_empty(work_q); + } + spin_unlock_irqrestore(shost->host_lock, flags); SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:" "0x%p\n", current->comm, scmd)); - rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd); - if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { - scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD; - if (rtn == FAST_IO_FAIL) - scsi_eh_finish_cmd(scmd, done_q); - else - list_move_tail(&scmd->eh_entry, &check_list); - } else + rtn = scsi_try_to_abort_cmd(shost->hostt, scmd); + if (rtn == FAILED) { SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting" " cmd failed:" "0x%p\n", current->comm, scmd)); + list_splice_init(&check_list, work_q); + return list_empty(work_q); + } + scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD; + if (rtn == FAST_IO_FAIL) + scsi_eh_finish_cmd(scmd, done_q); + else + list_move_tail(&scmd->eh_entry, &check_list); } return scsi_eh_test_devices(&check_list, work_q, done_q, 0); @@ -1187,8 +1251,19 @@ static int scsi_eh_stu(struct Scsi_Host *shost, { struct scsi_cmnd *scmd, *stu_scmd, *next; struct scsi_device *sdev; + unsigned long flags; shost_for_each_device(sdev, shost) { + spin_lock_irqsave(shost->host_lock, flags); + if (scsi_host_eh_past_deadline(shost)) { + spin_unlock_irqrestore(shost->host_lock, flags); + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "skip %s, past eh deadline\n", + __func__)); + break; + } + spin_unlock_irqrestore(shost->host_lock, flags); stu_scmd = NULL; list_for_each_entry(scmd, work_q, eh_entry) if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) && @@ -1241,9 +1316,20 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, { struct scsi_cmnd *scmd, *bdr_scmd, *next; struct scsi_device *sdev; + unsigned long flags; int rtn; shost_for_each_device(sdev, shost) { + spin_lock_irqsave(shost->host_lock, flags); + if (scsi_host_eh_past_deadline(shost)) { + spin_unlock_irqrestore(shost->host_lock, flags); + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "skip %s, past eh deadline\n", + __func__)); + break; + } + spin_unlock_irqrestore(shost->host_lock, flags); bdr_scmd = NULL; list_for_each_entry(scmd, work_q, eh_entry) if (scmd->device == sdev) { @@ -1303,6 +1389,21 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost, struct scsi_cmnd *next, *scmd; int rtn; unsigned int id; + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + if (scsi_host_eh_past_deadline(shost)) { + spin_unlock_irqrestore(shost->host_lock, flags); + /* push back on work queue for further processing */ + list_splice_init(&check_list, work_q); + list_splice_init(&tmp_list, work_q); + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "skip %s, past eh deadline\n", + __func__)); + return list_empty(work_q); + } + spin_unlock_irqrestore(shost->host_lock, flags); scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry); id = scmd_id(scmd); @@ -1347,6 +1448,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost, LIST_HEAD(check_list); unsigned int channel; int rtn; + unsigned long flags; /* * we really want to loop over the various channels, and do this on @@ -1356,6 +1458,18 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost, */ for (channel = 0; channel <= shost->max_channel; channel++) { + spin_lock_irqsave(shost->host_lock, flags); + if (scsi_host_eh_past_deadline(shost)) { + spin_unlock_irqrestore(shost->host_lock, flags); + list_splice_init(&check_list, work_q); + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "skip %s, past eh deadline\n", + __func__)); + return list_empty(work_q); + } + spin_unlock_irqrestore(shost->host_lock, flags); + chan_scmd = NULL; list_for_each_entry(scmd, work_q, eh_entry) { if (channel == scmd_channel(scmd)) { @@ -1755,8 +1869,9 @@ static void scsi_restart_operations(struct Scsi_Host *shost) * will be requests for character device operations, and also for * ioctls to queued block devices. */ - SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", - __func__)); + SCSI_LOG_ERROR_RECOVERY(3, + printk("scsi_eh_%d waking up host to restart\n", + shost->host_no)); spin_lock_irqsave(shost->host_lock, flags); if (scsi_host_set_state(shost, SHOST_RUNNING)) @@ -1883,6 +1998,10 @@ static void scsi_unjam_host(struct Scsi_Host *shost) if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q)) scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q); + spin_lock_irqsave(shost->host_lock, flags); + if (shost->eh_deadline) + shost->last_reset = 0; + spin_unlock_irqrestore(shost->host_lock, flags); scsi_eh_flush_done_q(&eh_done_q); } @@ -1909,7 +2028,7 @@ int scsi_error_handler(void *data) if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) || shost->host_failed != shost->host_busy) { SCSI_LOG_ERROR_RECOVERY(1, - printk("Error handler scsi_eh_%d sleeping\n", + printk("scsi_eh_%d: sleeping\n", shost->host_no)); schedule(); continue; @@ -1917,8 +2036,9 @@ int scsi_error_handler(void *data) __set_current_state(TASK_RUNNING); SCSI_LOG_ERROR_RECOVERY(1, - printk("Error handler scsi_eh_%d waking up\n", - shost->host_no)); + printk("scsi_eh_%d: waking up %d/%d/%d\n", + shost->host_no, shost->host_eh_scheduled, + shost->host_failed, shost->host_busy)); /* * We have a host that is failing for some reason. Figure out diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index d1549b74e2d1..7bd7f0d5f050 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) host_dev = scsi_get_device(shost); if (host_dev && host_dev->dma_mask) - bounce_limit = *host_dev->dma_mask; + bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT; return bounce_limit; } diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index 4c5aabe21755..af4c050ce6e4 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -54,7 +54,8 @@ scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *)) /* * All the high-level SCSI drivers that implement runtime * PM treat runtime suspend, system suspend, and system - * hibernate identically. + * hibernate nearly identically. In all cases the requirements + * for runtime suspension are stricter. */ if (pm_runtime_suspended(dev)) return 0; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 40c639491b27..8ff62c26a41c 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -281,6 +281,42 @@ exit_store_host_reset: static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset); +static ssize_t +show_shost_eh_deadline(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + + return sprintf(buf, "%d\n", shost->eh_deadline / HZ); +} + +static ssize_t +store_shost_eh_deadline(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + int ret = -EINVAL; + int deadline; + unsigned long flags; + + if (shost->transportt && shost->transportt->eh_strategy_handler) + return ret; + + if (sscanf(buf, "%d\n", &deadline) == 1) { + spin_lock_irqsave(shost->host_lock, flags); + if (scsi_host_in_recovery(shost)) + ret = -EBUSY; + else { + shost->eh_deadline = deadline * HZ; + ret = count; + } + spin_unlock_irqrestore(shost->host_lock, flags); + } + return ret; +} + +static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline); + shost_rd_attr(unique_id, "%u\n"); shost_rd_attr(host_busy, "%hu\n"); shost_rd_attr(cmd_per_lun, "%hd\n"); @@ -308,6 +344,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = { &dev_attr_prot_capabilities.attr, &dev_attr_prot_guard_type.attr, &dev_attr_host_reset.attr, + &dev_attr_eh_deadline.attr, NULL }; @@ -529,6 +566,7 @@ static int scsi_sdev_check_buf_bit(const char *buf) */ sdev_rd_attr (device_blocked, "%d\n"); sdev_rd_attr (queue_depth, "%d\n"); +sdev_rd_attr (device_busy, "%d\n"); sdev_rd_attr (type, "%d\n"); sdev_rd_attr (scsi_level, "%d\n"); sdev_rd_attr (vendor, "%.8s\n"); @@ -750,6 +788,7 @@ static struct attribute *scsi_sdev_attrs[] = { &dev_attr_device_blocked.attr, &dev_attr_type.attr, &dev_attr_scsi_level.attr, + &dev_attr_device_busy.attr, &dev_attr_vendor.attr, &dev_attr_model.attr, &dev_attr_rev.attr, diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index e4a989fa477d..63a6ca49d4e5 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -2744,6 +2744,28 @@ exit_get_chap: return err; } +static int iscsi_set_chap(struct iscsi_transport *transport, + struct iscsi_uevent *ev, uint32_t len) +{ + char *data = (char *)ev + sizeof(*ev); + struct Scsi_Host *shost; + int err = 0; + + if (!transport->set_chap) + return -ENOSYS; + + shost = scsi_host_lookup(ev->u.set_path.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.set_path.host_no); + return -ENODEV; + } + + err = transport->set_chap(shost, data, len); + scsi_host_put(shost); + return err; +} + static int iscsi_delete_chap(struct iscsi_transport *transport, struct iscsi_uevent *ev) { @@ -3234,6 +3256,10 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID: err = iscsi_logout_flashnode_sid(transport, ev); break; + case ISCSI_UEVENT_SET_CHAP: + err = iscsi_set_chap(transport, ev, + nlmsg_attrlen(nlh, sizeof(*ev))); + break; default: err = -ENOSYS; break; diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index f379c7f3034c..2700a5a09bd4 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -24,12 +24,15 @@ #include <linux/err.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/delay.h> #include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_srp.h> +#include "scsi_priv.h" #include "scsi_transport_srp_internal.h" struct srp_host_attrs { @@ -38,7 +41,7 @@ struct srp_host_attrs { #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data) #define SRP_HOST_ATTRS 0 -#define SRP_RPORT_ATTRS 3 +#define SRP_RPORT_ATTRS 8 struct srp_internal { struct scsi_transport_template t; @@ -54,6 +57,36 @@ struct srp_internal { #define dev_to_rport(d) container_of(d, struct srp_rport, dev) #define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent) +static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r) +{ + return dev_to_shost(r->dev.parent); +} + +/** + * srp_tmo_valid() - check timeout combination validity + * + * The combination of the timeout parameters must be such that SCSI commands + * are finished in a reasonable time. Hence do not allow the fast I/O fail + * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT. Furthermore, these + * parameters must be such that multipath can detect failed paths timely. + * Hence do not allow all three parameters to be disabled simultaneously. + */ +int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, int dev_loss_tmo) +{ + if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0) + return -EINVAL; + if (reconnect_delay == 0) + return -EINVAL; + if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) + return -EINVAL; + if (dev_loss_tmo >= LONG_MAX / HZ) + return -EINVAL; + if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 && + fast_io_fail_tmo >= dev_loss_tmo) + return -EINVAL; + return 0; +} +EXPORT_SYMBOL_GPL(srp_tmo_valid); static int srp_host_setup(struct transport_container *tc, struct device *dev, struct device *cdev) @@ -134,10 +167,465 @@ static ssize_t store_srp_rport_delete(struct device *dev, static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete); +static ssize_t show_srp_rport_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + static const char *const state_name[] = { + [SRP_RPORT_RUNNING] = "running", + [SRP_RPORT_BLOCKED] = "blocked", + [SRP_RPORT_FAIL_FAST] = "fail-fast", + [SRP_RPORT_LOST] = "lost", + }; + struct srp_rport *rport = transport_class_to_srp_rport(dev); + enum srp_rport_state state = rport->state; + + return sprintf(buf, "%s\n", + (unsigned)state < ARRAY_SIZE(state_name) ? + state_name[state] : "???"); +} + +static DEVICE_ATTR(state, S_IRUGO, show_srp_rport_state, NULL); + +static ssize_t srp_show_tmo(char *buf, int tmo) +{ + return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n"); +} + +static int srp_parse_tmo(int *tmo, const char *buf) +{ + int res = 0; + + if (strncmp(buf, "off", 3) != 0) + res = kstrtoint(buf, 0, tmo); + else + *tmo = -1; + + return res; +} + +static ssize_t show_reconnect_delay(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return srp_show_tmo(buf, rport->reconnect_delay); +} + +static ssize_t store_reconnect_delay(struct device *dev, + struct device_attribute *attr, + const char *buf, const size_t count) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + int res, delay; + + res = srp_parse_tmo(&delay, buf); + if (res) + goto out; + res = srp_tmo_valid(delay, rport->fast_io_fail_tmo, + rport->dev_loss_tmo); + if (res) + goto out; + + if (rport->reconnect_delay <= 0 && delay > 0 && + rport->state != SRP_RPORT_RUNNING) { + queue_delayed_work(system_long_wq, &rport->reconnect_work, + delay * HZ); + } else if (delay <= 0) { + cancel_delayed_work(&rport->reconnect_work); + } + rport->reconnect_delay = delay; + res = count; + +out: + return res; +} + +static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, show_reconnect_delay, + store_reconnect_delay); + +static ssize_t show_failed_reconnects(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return sprintf(buf, "%d\n", rport->failed_reconnects); +} + +static DEVICE_ATTR(failed_reconnects, S_IRUGO, show_failed_reconnects, NULL); + +static ssize_t show_srp_rport_fast_io_fail_tmo(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return srp_show_tmo(buf, rport->fast_io_fail_tmo); +} + +static ssize_t store_srp_rport_fast_io_fail_tmo(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + int res; + int fast_io_fail_tmo; + + res = srp_parse_tmo(&fast_io_fail_tmo, buf); + if (res) + goto out; + res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo, + rport->dev_loss_tmo); + if (res) + goto out; + rport->fast_io_fail_tmo = fast_io_fail_tmo; + res = count; + +out: + return res; +} + +static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, + show_srp_rport_fast_io_fail_tmo, + store_srp_rport_fast_io_fail_tmo); + +static ssize_t show_srp_rport_dev_loss_tmo(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return srp_show_tmo(buf, rport->dev_loss_tmo); +} + +static ssize_t store_srp_rport_dev_loss_tmo(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + int res; + int dev_loss_tmo; + + res = srp_parse_tmo(&dev_loss_tmo, buf); + if (res) + goto out; + res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo, + dev_loss_tmo); + if (res) + goto out; + rport->dev_loss_tmo = dev_loss_tmo; + res = count; + +out: + return res; +} + +static DEVICE_ATTR(dev_loss_tmo, S_IRUGO | S_IWUSR, + show_srp_rport_dev_loss_tmo, + store_srp_rport_dev_loss_tmo); + +static int srp_rport_set_state(struct srp_rport *rport, + enum srp_rport_state new_state) +{ + enum srp_rport_state old_state = rport->state; + + lockdep_assert_held(&rport->mutex); + + switch (new_state) { + case SRP_RPORT_RUNNING: + switch (old_state) { + case SRP_RPORT_LOST: + goto invalid; + default: + break; + } + break; + case SRP_RPORT_BLOCKED: + switch (old_state) { + case SRP_RPORT_RUNNING: + break; + default: + goto invalid; + } + break; + case SRP_RPORT_FAIL_FAST: + switch (old_state) { + case SRP_RPORT_LOST: + goto invalid; + default: + break; + } + break; + case SRP_RPORT_LOST: + break; + } + rport->state = new_state; + return 0; + +invalid: + return -EINVAL; +} + +/** + * srp_reconnect_work() - reconnect and schedule a new attempt if necessary + */ +static void srp_reconnect_work(struct work_struct *work) +{ + struct srp_rport *rport = container_of(to_delayed_work(work), + struct srp_rport, reconnect_work); + struct Scsi_Host *shost = rport_to_shost(rport); + int delay, res; + + res = srp_reconnect_rport(rport); + if (res != 0) { + shost_printk(KERN_ERR, shost, + "reconnect attempt %d failed (%d)\n", + ++rport->failed_reconnects, res); + delay = rport->reconnect_delay * + min(100, max(1, rport->failed_reconnects - 10)); + if (delay > 0) + queue_delayed_work(system_long_wq, + &rport->reconnect_work, delay * HZ); + } +} + +static void __rport_fail_io_fast(struct srp_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + struct srp_internal *i; + + lockdep_assert_held(&rport->mutex); + + if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST)) + return; + scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); + + /* Involve the LLD if possible to terminate all I/O on the rport. */ + i = to_srp_internal(shost->transportt); + if (i->f->terminate_rport_io) + i->f->terminate_rport_io(rport); +} + +/** + * rport_fast_io_fail_timedout() - fast I/O failure timeout handler + */ +static void rport_fast_io_fail_timedout(struct work_struct *work) +{ + struct srp_rport *rport = container_of(to_delayed_work(work), + struct srp_rport, fast_io_fail_work); + struct Scsi_Host *shost = rport_to_shost(rport); + + pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n", + dev_name(&rport->dev), dev_name(&shost->shost_gendev)); + + mutex_lock(&rport->mutex); + if (rport->state == SRP_RPORT_BLOCKED) + __rport_fail_io_fast(rport); + mutex_unlock(&rport->mutex); +} + +/** + * rport_dev_loss_timedout() - device loss timeout handler + */ +static void rport_dev_loss_timedout(struct work_struct *work) +{ + struct srp_rport *rport = container_of(to_delayed_work(work), + struct srp_rport, dev_loss_work); + struct Scsi_Host *shost = rport_to_shost(rport); + struct srp_internal *i = to_srp_internal(shost->transportt); + + pr_info("dev_loss_tmo expired for SRP %s / %s.\n", + dev_name(&rport->dev), dev_name(&shost->shost_gendev)); + + mutex_lock(&rport->mutex); + WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0); + scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); + mutex_unlock(&rport->mutex); + + i->f->rport_delete(rport); +} + +static void __srp_start_tl_fail_timers(struct srp_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + int delay, fast_io_fail_tmo, dev_loss_tmo; + + lockdep_assert_held(&rport->mutex); + + if (!rport->deleted) { + delay = rport->reconnect_delay; + fast_io_fail_tmo = rport->fast_io_fail_tmo; + dev_loss_tmo = rport->dev_loss_tmo; + pr_debug("%s current state: %d\n", + dev_name(&shost->shost_gendev), rport->state); + + if (delay > 0) + queue_delayed_work(system_long_wq, + &rport->reconnect_work, + 1UL * delay * HZ); + if (fast_io_fail_tmo >= 0 && + srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) { + pr_debug("%s new state: %d\n", + dev_name(&shost->shost_gendev), + rport->state); + scsi_target_block(&shost->shost_gendev); + queue_delayed_work(system_long_wq, + &rport->fast_io_fail_work, + 1UL * fast_io_fail_tmo * HZ); + } + if (dev_loss_tmo >= 0) + queue_delayed_work(system_long_wq, + &rport->dev_loss_work, + 1UL * dev_loss_tmo * HZ); + } else { + pr_debug("%s has already been deleted\n", + dev_name(&shost->shost_gendev)); + srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST); + scsi_target_unblock(&shost->shost_gendev, + SDEV_TRANSPORT_OFFLINE); + } +} + +/** + * srp_start_tl_fail_timers() - start the transport layer failure timers + * + * Start the transport layer fast I/O failure and device loss timers. Do not + * modify a timer that was already started. + */ +void srp_start_tl_fail_timers(struct srp_rport *rport) +{ + mutex_lock(&rport->mutex); + __srp_start_tl_fail_timers(rport); + mutex_unlock(&rport->mutex); +} +EXPORT_SYMBOL(srp_start_tl_fail_timers); + +/** + * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn() + */ +static int scsi_request_fn_active(struct Scsi_Host *shost) +{ + struct scsi_device *sdev; + struct request_queue *q; + int request_fn_active = 0; + + shost_for_each_device(sdev, shost) { + q = sdev->request_queue; + + spin_lock_irq(q->queue_lock); + request_fn_active += q->request_fn_active; + spin_unlock_irq(q->queue_lock); + } + + return request_fn_active; +} + +/** + * srp_reconnect_rport() - reconnect to an SRP target port + * + * Blocks SCSI command queueing before invoking reconnect() such that + * queuecommand() won't be invoked concurrently with reconnect() from outside + * the SCSI EH. This is important since a reconnect() implementation may + * reallocate resources needed by queuecommand(). + * + * Notes: + * - This function neither waits until outstanding requests have finished nor + * tries to abort these. It is the responsibility of the reconnect() + * function to finish outstanding commands before reconnecting to the target + * port. + * - It is the responsibility of the caller to ensure that the resources + * reallocated by the reconnect() function won't be used while this function + * is in progress. One possible strategy is to invoke this function from + * the context of the SCSI EH thread only. Another possible strategy is to + * lock the rport mutex inside each SCSI LLD callback that can be invoked by + * the SCSI EH (the scsi_host_template.eh_*() functions and also the + * scsi_host_template.queuecommand() function). + */ +int srp_reconnect_rport(struct srp_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + struct srp_internal *i = to_srp_internal(shost->transportt); + struct scsi_device *sdev; + int res; + + pr_debug("SCSI host %s\n", dev_name(&shost->shost_gendev)); + + res = mutex_lock_interruptible(&rport->mutex); + if (res) + goto out; + scsi_target_block(&shost->shost_gendev); + while (scsi_request_fn_active(shost)) + msleep(20); + res = i->f->reconnect(rport); + pr_debug("%s (state %d): transport.reconnect() returned %d\n", + dev_name(&shost->shost_gendev), rport->state, res); + if (res == 0) { + cancel_delayed_work(&rport->fast_io_fail_work); + cancel_delayed_work(&rport->dev_loss_work); + + rport->failed_reconnects = 0; + srp_rport_set_state(rport, SRP_RPORT_RUNNING); + scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING); + /* + * If the SCSI error handler has offlined one or more devices, + * invoking scsi_target_unblock() won't change the state of + * these devices into running so do that explicitly. + */ + spin_lock_irq(shost->host_lock); + __shost_for_each_device(sdev, shost) + if (sdev->sdev_state == SDEV_OFFLINE) + sdev->sdev_state = SDEV_RUNNING; + spin_unlock_irq(shost->host_lock); + } else if (rport->state == SRP_RPORT_RUNNING) { + /* + * srp_reconnect_rport() was invoked with fast_io_fail + * off. Mark the port as failed and start the TL failure + * timers if these had not yet been started. + */ + __rport_fail_io_fast(rport); + scsi_target_unblock(&shost->shost_gendev, + SDEV_TRANSPORT_OFFLINE); + __srp_start_tl_fail_timers(rport); + } else if (rport->state != SRP_RPORT_BLOCKED) { + scsi_target_unblock(&shost->shost_gendev, + SDEV_TRANSPORT_OFFLINE); + } + mutex_unlock(&rport->mutex); + +out: + return res; +} +EXPORT_SYMBOL(srp_reconnect_rport); + +/** + * srp_timed_out() - SRP transport intercept of the SCSI timeout EH + * + * If a timeout occurs while an rport is in the blocked state, ask the SCSI + * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core + * handle the timeout (BLK_EH_NOT_HANDLED). + * + * Note: This function is called from soft-IRQ context and with the request + * queue lock held. + */ +static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) +{ + struct scsi_device *sdev = scmd->device; + struct Scsi_Host *shost = sdev->host; + struct srp_internal *i = to_srp_internal(shost->transportt); + + pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev)); + return i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? + BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; +} + static void srp_rport_release(struct device *dev) { struct srp_rport *rport = dev_to_rport(dev); + cancel_delayed_work_sync(&rport->reconnect_work); + cancel_delayed_work_sync(&rport->fast_io_fail_work); + cancel_delayed_work_sync(&rport->dev_loss_work); + put_device(dev->parent); kfree(rport); } @@ -185,6 +673,24 @@ static int srp_host_match(struct attribute_container *cont, struct device *dev) } /** + * srp_rport_get() - increment rport reference count + */ +void srp_rport_get(struct srp_rport *rport) +{ + get_device(&rport->dev); +} +EXPORT_SYMBOL(srp_rport_get); + +/** + * srp_rport_put() - decrement rport reference count + */ +void srp_rport_put(struct srp_rport *rport) +{ + put_device(&rport->dev); +} +EXPORT_SYMBOL(srp_rport_put); + +/** * srp_rport_add - add a SRP remote port to the device hierarchy * @shost: scsi host the remote port is connected to. * @ids: The port id for the remote port. @@ -196,12 +702,15 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost, { struct srp_rport *rport; struct device *parent = &shost->shost_gendev; + struct srp_internal *i = to_srp_internal(shost->transportt); int id, ret; rport = kzalloc(sizeof(*rport), GFP_KERNEL); if (!rport) return ERR_PTR(-ENOMEM); + mutex_init(&rport->mutex); + device_initialize(&rport->dev); rport->dev.parent = get_device(parent); @@ -210,6 +719,17 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost, memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id)); rport->roles = ids->roles; + if (i->f->reconnect) + rport->reconnect_delay = i->f->reconnect_delay ? + *i->f->reconnect_delay : 10; + INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work); + rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ? + *i->f->fast_io_fail_tmo : 15; + rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60; + INIT_DELAYED_WORK(&rport->fast_io_fail_work, + rport_fast_io_fail_timedout); + INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout); + id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id); dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id); @@ -259,6 +779,13 @@ void srp_rport_del(struct srp_rport *rport) transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); + + mutex_lock(&rport->mutex); + if (rport->state == SRP_RPORT_BLOCKED) + __rport_fail_io_fast(rport); + rport->deleted = true; + mutex_unlock(&rport->mutex); + put_device(dev); } EXPORT_SYMBOL_GPL(srp_rport_del); @@ -310,6 +837,8 @@ srp_attach_transport(struct srp_function_template *ft) if (!i) return NULL; + i->t.eh_timed_out = srp_timed_out; + i->t.tsk_mgmt_response = srp_tsk_mgmt_response; i->t.it_nexus_response = srp_it_nexus_response; @@ -327,6 +856,15 @@ srp_attach_transport(struct srp_function_template *ft) count = 0; i->rport_attrs[count++] = &dev_attr_port_id; i->rport_attrs[count++] = &dev_attr_roles; + if (ft->has_rport_state) { + i->rport_attrs[count++] = &dev_attr_state; + i->rport_attrs[count++] = &dev_attr_fast_io_fail_tmo; + i->rport_attrs[count++] = &dev_attr_dev_loss_tmo; + } + if (ft->reconnect) { + i->rport_attrs[count++] = &dev_attr_reconnect_delay; + i->rport_attrs[count++] = &dev_attr_failed_reconnects; + } if (ft->rport_delete) i->rport_attrs[count++] = &dev_attr_delete; i->rport_attrs[count++] = NULL; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b58e8f815a00..69725f7c32c1 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -105,7 +105,8 @@ static void sd_unlock_native_capacity(struct gendisk *disk); static int sd_probe(struct device *); static int sd_remove(struct device *); static void sd_shutdown(struct device *); -static int sd_suspend(struct device *); +static int sd_suspend_system(struct device *); +static int sd_suspend_runtime(struct device *); static int sd_resume(struct device *); static void sd_rescan(struct device *); static int sd_done(struct scsi_cmnd *); @@ -484,11 +485,11 @@ static struct class sd_disk_class = { }; static const struct dev_pm_ops sd_pm_ops = { - .suspend = sd_suspend, + .suspend = sd_suspend_system, .resume = sd_resume, - .poweroff = sd_suspend, + .poweroff = sd_suspend_system, .restore = sd_resume, - .runtime_suspend = sd_suspend, + .runtime_suspend = sd_suspend_runtime, .runtime_resume = sd_resume, }; @@ -829,7 +830,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq) static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) { - rq->timeout = SD_FLUSH_TIMEOUT; + rq->timeout *= SD_FLUSH_TIMEOUT_MULTIPLIER; rq->retries = SD_MAX_RETRIES; rq->cmd[0] = SYNCHRONIZE_CACHE; rq->cmd_len = 10; @@ -1002,7 +1003,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) SCpnt->cmnd[0] = READ_6; SCpnt->sc_data_direction = DMA_FROM_DEVICE; } else { - scmd_printk(KERN_ERR, SCpnt, "Unknown command %x\n", rq->cmd_flags); + scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags); goto out; } @@ -1433,12 +1434,13 @@ static int sd_sync_cache(struct scsi_disk *sdkp) { int retries, res; struct scsi_device *sdp = sdkp->device; + const int timeout = sdp->request_queue->rq_timeout + * SD_FLUSH_TIMEOUT_MULTIPLIER; struct scsi_sense_hdr sshdr; if (!scsi_device_online(sdp)) return -ENODEV; - for (retries = 3; retries > 0; --retries) { unsigned char cmd[10] = { 0 }; @@ -1448,20 +1450,39 @@ static int sd_sync_cache(struct scsi_disk *sdkp) * flush everything. */ res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, - &sshdr, SD_FLUSH_TIMEOUT, - SD_MAX_RETRIES, NULL, REQ_PM); + &sshdr, timeout, SD_MAX_RETRIES, + NULL, REQ_PM); if (res == 0) break; } if (res) { sd_print_result(sdkp, res); + if (driver_byte(res) & DRIVER_SENSE) sd_print_sense_hdr(sdkp, &sshdr); + /* we need to evaluate the error return */ + if (scsi_sense_valid(&sshdr) && + /* 0x3a is medium not present */ + sshdr.asc == 0x3a) + /* this is no error here */ + return 0; + + switch (host_byte(res)) { + /* ignore errors due to racing a disconnection */ + case DID_BAD_TARGET: + case DID_NO_CONNECT: + return 0; + /* signal the upper layer it might try again */ + case DID_BUS_BUSY: + case DID_IMM_RETRY: + case DID_REQUEUE: + case DID_SOFT_ERROR: + return -EBUSY; + default: + return -EIO; + } } - - if (res) - return -EIO; return 0; } @@ -2420,14 +2441,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) } } - if (modepage == 0x3F) { - sd_printk(KERN_ERR, sdkp, "No Caching mode page " - "present\n"); - goto defaults; - } else if ((buffer[offset] & 0x3f) != modepage) { - sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); - goto defaults; - } + sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n"); + goto defaults; + Page_found: if (modepage == 8) { sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); @@ -2643,14 +2659,23 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) { struct scsi_device *sdev = sdkp->device; + if (sdev->host->no_write_same) { + sdev->no_write_same = 1; + + return; + } + if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) { + /* too large values might cause issues with arcmsr */ + int vpd_buf_len = 64; + sdev->no_report_opcodes = 1; /* Disable WRITE SAME if REPORT SUPPORTED OPERATION * CODES is unsupported and the device has an ATA * Information VPD page (SAT). */ - if (!scsi_get_vpd_page(sdev, 0x89, buffer, SD_BUF_SIZE)) + if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len)) sdev->no_write_same = 1; } @@ -2859,6 +2884,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) gd->events |= DISK_EVENT_MEDIA_CHANGE; } + blk_pm_runtime_init(sdp->request_queue, dev); add_disk(gd); if (sdkp->capacity) sd_dif_config_host(sdkp); @@ -2867,7 +2893,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", sdp->removable ? "removable " : ""); - blk_pm_runtime_init(sdp->request_queue, dev); scsi_autopm_put_device(sdp); put_device(&sdkp->dev); } @@ -3063,9 +3088,17 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start) sd_print_result(sdkp, res); if (driver_byte(res) & DRIVER_SENSE) sd_print_sense_hdr(sdkp, &sshdr); + if (scsi_sense_valid(&sshdr) && + /* 0x3a is medium not present */ + sshdr.asc == 0x3a) + res = 0; } - return res; + /* SCSI error codes must not go to the generic layer */ + if (res) + return -EIO; + + return 0; } /* @@ -3083,7 +3116,7 @@ static void sd_shutdown(struct device *dev) if (pm_runtime_suspended(dev)) goto exit; - if (sdkp->WCE) { + if (sdkp->WCE && sdkp->media_present) { sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); sd_sync_cache(sdkp); } @@ -3097,7 +3130,7 @@ exit: scsi_disk_put(sdkp); } -static int sd_suspend(struct device *dev) +static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) { struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); int ret = 0; @@ -3105,16 +3138,23 @@ static int sd_suspend(struct device *dev) if (!sdkp) return 0; /* this can happen */ - if (sdkp->WCE) { + if (sdkp->WCE && sdkp->media_present) { sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); ret = sd_sync_cache(sdkp); - if (ret) + if (ret) { + /* ignore OFFLINE device */ + if (ret == -ENODEV) + ret = 0; goto done; + } } if (sdkp->device->manage_start_stop) { sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); + /* an error is not worth aborting a system sleep */ ret = sd_start_stop_device(sdkp, 0); + if (ignore_stop_errors) + ret = 0; } done: @@ -3122,6 +3162,16 @@ done: return ret; } +static int sd_suspend_system(struct device *dev) +{ + return sd_suspend_common(dev, true); +} + +static int sd_suspend_runtime(struct device *dev) +{ + return sd_suspend_common(dev, false); +} + static int sd_resume(struct device *dev) { struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 7a049de22051..26895ff247c5 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -13,7 +13,11 @@ */ #define SD_TIMEOUT (30 * HZ) #define SD_MOD_TIMEOUT (75 * HZ) -#define SD_FLUSH_TIMEOUT (60 * HZ) +/* + * Flush timeout is a multiplier over the standard device timeout which is + * user modifiable via sysfs but initially set to SD_TIMEOUT + */ +#define SD_FLUSH_TIMEOUT_MULTIPLIER 2 #define SD_WRITE_SAME_TIMEOUT (120 * HZ) /* diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 5cbc4bb1b395..df5e961484e1 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -105,8 +105,11 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ; static int sg_add(struct device *, struct class_interface *); static void sg_remove(struct device *, struct class_interface *); +static DEFINE_SPINLOCK(sg_open_exclusive_lock); + static DEFINE_IDR(sg_index_idr); -static DEFINE_RWLOCK(sg_index_lock); +static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock + file descriptor list for device */ static struct class_interface sg_interface = { .add_dev = sg_add, @@ -143,7 +146,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ } Sg_request; typedef struct sg_fd { /* holds the state of a file descriptor */ - struct list_head sfd_siblings; /* protected by sfd_lock of device */ + /* sfd_siblings is protected by sg_index_lock */ + struct list_head sfd_siblings; struct sg_device *parentdp; /* owning device */ wait_queue_head_t read_wait; /* queue read until command done */ rwlock_t rq_list_lock; /* protect access to list in req_arr */ @@ -166,12 +170,13 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ typedef struct sg_device { /* holds the state of each scsi generic device */ struct scsi_device *device; + wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */ int sg_tablesize; /* adapter's max scatter-gather table size */ u32 index; /* device index number */ - spinlock_t sfd_lock; /* protect file descriptor list for device */ + /* sfds is protected by sg_index_lock */ struct list_head sfds; - struct rw_semaphore o_sem; /* exclude open should hold this rwsem */ volatile char detached; /* 0->attached, 1->detached pending removal */ + /* exclude protected by sg_open_exclusive_lock */ char exclude; /* opened for exclusive access */ char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ struct gendisk *disk; @@ -220,14 +225,35 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd) return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); } +static int get_exclude(Sg_device *sdp) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&sg_open_exclusive_lock, flags); + ret = sdp->exclude; + spin_unlock_irqrestore(&sg_open_exclusive_lock, flags); + return ret; +} + +static int set_exclude(Sg_device *sdp, char val) +{ + unsigned long flags; + + spin_lock_irqsave(&sg_open_exclusive_lock, flags); + sdp->exclude = val; + spin_unlock_irqrestore(&sg_open_exclusive_lock, flags); + return val; +} + static int sfds_list_empty(Sg_device *sdp) { unsigned long flags; int ret; - spin_lock_irqsave(&sdp->sfd_lock, flags); + read_lock_irqsave(&sg_index_lock, flags); ret = list_empty(&sdp->sfds); - spin_unlock_irqrestore(&sdp->sfd_lock, flags); + read_unlock_irqrestore(&sg_index_lock, flags); return ret; } @@ -239,6 +265,7 @@ sg_open(struct inode *inode, struct file *filp) struct request_queue *q; Sg_device *sdp; Sg_fd *sfp; + int res; int retval; nonseekable_open(inode, filp); @@ -267,52 +294,54 @@ sg_open(struct inode *inode, struct file *filp) goto error_out; } - if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) { - retval = -EPERM; /* Can't lock it with read only access */ - goto error_out; - } - if (flags & O_NONBLOCK) { - if (flags & O_EXCL) { - if (!down_write_trylock(&sdp->o_sem)) { - retval = -EBUSY; - goto error_out; - } - } else { - if (!down_read_trylock(&sdp->o_sem)) { - retval = -EBUSY; - goto error_out; - } + if (flags & O_EXCL) { + if (O_RDONLY == (flags & O_ACCMODE)) { + retval = -EPERM; /* Can't lock it with read only access */ + goto error_out; + } + if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) { + retval = -EBUSY; + goto error_out; + } + res = wait_event_interruptible(sdp->o_excl_wait, + ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1))); + if (res) { + retval = res; /* -ERESTARTSYS because signal hit process */ + goto error_out; + } + } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */ + if (flags & O_NONBLOCK) { + retval = -EBUSY; + goto error_out; + } + res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp)); + if (res) { + retval = res; /* -ERESTARTSYS because signal hit process */ + goto error_out; } - } else { - if (flags & O_EXCL) - down_write(&sdp->o_sem); - else - down_read(&sdp->o_sem); } - /* Since write lock is held, no need to check sfd_list */ - if (flags & O_EXCL) - sdp->exclude = 1; /* used by release lock */ - + if (sdp->detached) { + retval = -ENODEV; + goto error_out; + } if (sfds_list_empty(sdp)) { /* no existing opens on this device */ sdp->sgdebug = 0; q = sdp->device->request_queue; sdp->sg_tablesize = queue_max_segments(q); } - sfp = sg_add_sfp(sdp, dev); - if (!IS_ERR(sfp)) + if ((sfp = sg_add_sfp(sdp, dev))) filp->private_data = sfp; - /* retval is already provably zero at this point because of the - * check after retval = scsi_autopm_get_device(sdp->device)) - */ else { - retval = PTR_ERR(sfp); - if (flags & O_EXCL) { - sdp->exclude = 0; /* undo if error */ - up_write(&sdp->o_sem); - } else - up_read(&sdp->o_sem); + set_exclude(sdp, 0); /* undo if error */ + wake_up_interruptible(&sdp->o_excl_wait); + } + retval = -ENOMEM; + goto error_out; + } + retval = 0; error_out: + if (retval) { scsi_autopm_put_device(sdp->device); sdp_put: scsi_device_put(sdp->device); @@ -329,18 +358,13 @@ sg_release(struct inode *inode, struct file *filp) { Sg_device *sdp; Sg_fd *sfp; - int excl; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); - excl = sdp->exclude; - sdp->exclude = 0; - if (excl) - up_write(&sdp->o_sem); - else - up_read(&sdp->o_sem); + set_exclude(sdp, 0); + wake_up_interruptible(&sdp->o_excl_wait); scsi_autopm_put_device(sdp->device); kref_put(&sfp->f_ref, sg_remove_sfp); @@ -1391,9 +1415,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) disk->first_minor = k; sdp->disk = disk; sdp->device = scsidp; - spin_lock_init(&sdp->sfd_lock); INIT_LIST_HEAD(&sdp->sfds); - init_rwsem(&sdp->o_sem); + init_waitqueue_head(&sdp->o_excl_wait); sdp->sg_tablesize = queue_max_segments(q); sdp->index = k; kref_init(&sdp->d_ref); @@ -1526,13 +1549,11 @@ static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf) /* Need a write lock to set sdp->detached. */ write_lock_irqsave(&sg_index_lock, iflags); - spin_lock(&sdp->sfd_lock); sdp->detached = 1; list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { wake_up_interruptible(&sfp->read_wait); kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); } - spin_unlock(&sdp->sfd_lock); write_unlock_irqrestore(&sg_index_lock, iflags); sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); @@ -2043,7 +2064,7 @@ sg_add_sfp(Sg_device * sdp, int dev) sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); if (!sfp) - return ERR_PTR(-ENOMEM); + return NULL; init_waitqueue_head(&sfp->read_wait); rwlock_init(&sfp->rq_list_lock); @@ -2057,13 +2078,9 @@ sg_add_sfp(Sg_device * sdp, int dev) sfp->cmd_q = SG_DEF_COMMAND_Q; sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; sfp->parentdp = sdp; - spin_lock_irqsave(&sdp->sfd_lock, iflags); - if (sdp->detached) { - spin_unlock_irqrestore(&sdp->sfd_lock, iflags); - return ERR_PTR(-ENODEV); - } + write_lock_irqsave(&sg_index_lock, iflags); list_add_tail(&sfp->sfd_siblings, &sdp->sfds); - spin_unlock_irqrestore(&sdp->sfd_lock, iflags); + write_unlock_irqrestore(&sg_index_lock, iflags); SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); if (unlikely(sg_big_buff != def_reserved_size)) sg_big_buff = def_reserved_size; @@ -2113,9 +2130,10 @@ static void sg_remove_sfp(struct kref *kref) struct sg_device *sdp = sfp->parentdp; unsigned long iflags; - spin_lock_irqsave(&sdp->sfd_lock, iflags); + write_lock_irqsave(&sg_index_lock, iflags); list_del(&sfp->sfd_siblings); - spin_unlock_irqrestore(&sdp->sfd_lock, iflags); + write_unlock_irqrestore(&sg_index_lock, iflags); + wake_up_interruptible(&sdp->o_excl_wait); INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); schedule_work(&sfp->ew.work); @@ -2502,7 +2520,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) return 0; } -/* must be called while holding sg_index_lock and sfd_lock */ +/* must be called while holding sg_index_lock */ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) { int k, m, new_interface, blen, usg; @@ -2587,26 +2605,22 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v) read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; - if (sdp) { - spin_lock(&sdp->sfd_lock); - if (!list_empty(&sdp->sfds)) { - struct scsi_device *scsidp = sdp->device; + if (sdp && !list_empty(&sdp->sfds)) { + struct scsi_device *scsidp = sdp->device; - seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); - if (sdp->detached) - seq_printf(s, "detached pending close "); - else - seq_printf - (s, "scsi%d chan=%d id=%d lun=%d em=%d", - scsidp->host->host_no, - scsidp->channel, scsidp->id, - scsidp->lun, - scsidp->host->hostt->emulated); - seq_printf(s, " sg_tablesize=%d excl=%d\n", - sdp->sg_tablesize, sdp->exclude); - sg_proc_debug_helper(s, sdp); - } - spin_unlock(&sdp->sfd_lock); + seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); + if (sdp->detached) + seq_printf(s, "detached pending close "); + else + seq_printf + (s, "scsi%d chan=%d id=%d lun=%d em=%d", + scsidp->host->host_no, + scsidp->channel, scsidp->id, + scsidp->lun, + scsidp->host->hostt->emulated); + seq_printf(s, " sg_tablesize=%d excl=%d\n", + sdp->sg_tablesize, get_exclude(sdp)); + sg_proc_debug_helper(s, sdp); } read_unlock_irqrestore(&sg_index_lock, iflags); return 0; diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 1a28f5632797..17d740427240 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1697,6 +1697,7 @@ static struct scsi_host_template scsi_driver = { .use_clustering = DISABLE_CLUSTERING, /* Make sure we dont get a sg segment crosses a page boundary */ .dma_boundary = PAGE_SIZE-1, + .no_write_same = 1, }; enum { diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c index b06a1dea8818..b006cf789ba1 100644 --- a/drivers/scsi/tmscsim.c +++ b/drivers/scsi/tmscsim.c @@ -521,7 +521,7 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr pACB->SelConn++; return 1; } - if (time_before (jiffies, pACB->pScsiHost->last_reset)) + if (time_before (jiffies, pACB->last_reset)) { DEBUG0(printk ("DC390: We were just reset and don't accept commands yet!\n")); return 1; @@ -1863,7 +1863,7 @@ dc390_ScsiRstDetect( struct dc390_acb* pACB ) /* delay half a second */ udelay (1000); DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - pACB->pScsiHost->last_reset = jiffies + 5*HZ/2 + pACB->last_reset = jiffies + 5*HZ/2 + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; pACB->Connected = 0; @@ -2048,9 +2048,9 @@ static int DC390_bus_reset (struct scsi_cmnd *cmd) dc390_ResetDevParam(pACB); mdelay(1); - pACB->pScsiHost->last_reset = jiffies + 3*HZ/2 + pACB->last_reset = jiffies + 3*HZ/2 + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; - + DC390_write8(ScsiCmd, CLEAR_FIFO_CMD); DC390_read8(INT_Status); /* Reset Pending INT */ @@ -2383,7 +2383,7 @@ static void dc390_init_hw(struct dc390_acb *pACB, u8 index) if (pACB->Gmode2 & RST_SCSI_BUS) { dc390_ResetSCSIBus(pACB); udelay(1000); - shost->last_reset = jiffies + HZ/2 + + pACB->last_reset = jiffies + HZ/2 + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; } @@ -2455,8 +2455,8 @@ static int dc390_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) shost->irq = pdev->irq; shost->base = io_port; shost->unique_id = io_port; - shost->last_reset = jiffies; - + + pACB->last_reset = jiffies; pACB->pScsiHost = shost; pACB->IOPortBase = (u16) io_port; pACB->IRQLevel = pdev->irq; diff --git a/drivers/scsi/tmscsim.h b/drivers/scsi/tmscsim.h index 77adc54dbd16..3d1bb4ad1826 100644 --- a/drivers/scsi/tmscsim.h +++ b/drivers/scsi/tmscsim.h @@ -143,6 +143,7 @@ u8 Ignore_IRQ; /* Not used */ struct pci_dev *pdev; +unsigned long last_reset; unsigned long Cmds; u32 SelLost; u32 SelConn; diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index bce09a6898c4..721050090520 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -177,6 +177,7 @@ enum { MASK_TASK_RESPONSE = 0xFF00, MASK_RSP_UPIU_RESULT = 0xFFFF, MASK_QUERY_DATA_SEG_LEN = 0xFFFF, + MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF, MASK_RSP_EXCEPTION_EVENT = 0x10000, }; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index b36ca9a2dfbb..04884d663e4e 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -36,9 +36,11 @@ #include <linux/async.h> #include "ufshcd.h" +#include "unipro.h" #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ UTP_TASK_REQ_COMPL |\ + UIC_POWER_MODE |\ UFSHCD_ERROR_MASK) /* UIC command timeout, unit: ms */ #define UIC_CMD_TIMEOUT 500 @@ -56,6 +58,9 @@ /* Expose the flag value from utp_upiu_query.value */ #define MASK_QUERY_UPIU_FLAG_LOC 0xFF +/* Interrupt aggregation default timeout, unit: 40us */ +#define INT_AGGR_DEF_TO 0x02 + enum { UFSHCD_MAX_CHANNEL = 0, UFSHCD_MAX_ID = 1, @@ -78,12 +83,6 @@ enum { UFSHCD_INT_CLEAR, }; -/* Interrupt aggregation options */ -enum { - INT_AGGR_RESET, - INT_AGGR_CONFIG, -}; - /* * ufshcd_wait_for_register - wait for register value to change * @hba - per-adapter interface @@ -238,6 +237,18 @@ static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) } /** + * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command + * @hba: Pointer to adapter instance + * + * This function gets UIC command argument3 + * Returns 0 on success, non zero value on error + */ +static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) +{ + return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); +} + +/** * ufshcd_get_req_rsp - returns the TR response transaction type * @ucd_rsp_ptr: pointer to response UPIU */ @@ -260,6 +271,20 @@ ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; } +/* + * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length + * from response UPIU + * @ucd_rsp_ptr: pointer to response UPIU + * + * Return the data segment length. + */ +static inline unsigned int +ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & + MASK_RSP_UPIU_DATA_SEG_LEN; +} + /** * ufshcd_is_exception_event - Check if the device raised an exception event * @ucd_rsp_ptr: pointer to response UPIU @@ -276,30 +301,30 @@ static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr) } /** - * ufshcd_config_int_aggr - Configure interrupt aggregation values. - * Currently there is no use case where we want to configure - * interrupt aggregation dynamically. So to configure interrupt - * aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and - * INT_AGGR_TIMEOUT_VALUE are used. + * ufshcd_reset_intr_aggr - Reset interrupt aggregation values. * @hba: per adapter instance - * @option: Interrupt aggregation option */ static inline void -ufshcd_config_int_aggr(struct ufs_hba *hba, int option) +ufshcd_reset_intr_aggr(struct ufs_hba *hba) { - switch (option) { - case INT_AGGR_RESET: - ufshcd_writel(hba, INT_AGGR_ENABLE | - INT_AGGR_COUNTER_AND_TIMER_RESET, - REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); - break; - case INT_AGGR_CONFIG: - ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | - INT_AGGR_COUNTER_THRESHOLD_VALUE | - INT_AGGR_TIMEOUT_VALUE, - REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); - break; - } + ufshcd_writel(hba, INT_AGGR_ENABLE | + INT_AGGR_COUNTER_AND_TIMER_RESET, + REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); +} + +/** + * ufshcd_config_intr_aggr - Configure interrupt aggregation values. + * @hba: per adapter instance + * @cnt: Interrupt aggregation counter threshold + * @tmout: Interrupt aggregation timeout value + */ +static inline void +ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) +{ + ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | + INT_AGGR_COUNTER_THLD_VAL(cnt) | + INT_AGGR_TIMEOUT_VAL(tmout), + REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); } /** @@ -355,7 +380,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) { int len; - if (lrbp->sense_buffer) { + if (lrbp->sense_buffer && + ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) { len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data, @@ -446,6 +472,18 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) } /** + * ufshcd_get_upmcrs - Get the power mode change request status + * @hba: Pointer to adapter instance + * + * This function gets the UPMCRS field of HCS register + * Returns value of UPMCRS field + */ +static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) +{ + return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; +} + +/** * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers * @hba: per adapter instance * @uic_cmd: UIC command @@ -1362,6 +1400,202 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba) } /** + * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET + * @hba: per adapter instance + * @attr_sel: uic command argument1 + * @attr_set: attribute set type as uic command argument2 + * @mib_val: setting value as uic command argument3 + * @peer: indicate whether peer or local + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, + u8 attr_set, u32 mib_val, u8 peer) +{ + struct uic_command uic_cmd = {0}; + static const char *const action[] = { + "dme-set", + "dme-peer-set" + }; + const char *set = action[!!peer]; + int ret; + + uic_cmd.command = peer ? + UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; + uic_cmd.argument1 = attr_sel; + uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); + uic_cmd.argument3 = mib_val; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", + set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr); + +/** + * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET + * @hba: per adapter instance + * @attr_sel: uic command argument1 + * @mib_val: the value of the attribute as returned by the UIC command + * @peer: indicate whether peer or local + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, + u32 *mib_val, u8 peer) +{ + struct uic_command uic_cmd = {0}; + static const char *const action[] = { + "dme-get", + "dme-peer-get" + }; + const char *get = action[!!peer]; + int ret; + + uic_cmd.command = peer ? + UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; + uic_cmd.argument1 = attr_sel; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) { + dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n", + get, UIC_GET_ATTR_ID(attr_sel), ret); + goto out; + } + + if (mib_val) + *mib_val = uic_cmd.argument3; +out: + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); + +/** + * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage + * using DME_SET primitives. + * @hba: per adapter instance + * @mode: powr mode value + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) +{ + struct uic_command uic_cmd = {0}; + struct completion pwr_done; + unsigned long flags; + u8 status; + int ret; + + uic_cmd.command = UIC_CMD_DME_SET; + uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); + uic_cmd.argument3 = mode; + init_completion(&pwr_done); + + mutex_lock(&hba->uic_cmd_mutex); + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->pwr_done = &pwr_done; + spin_unlock_irqrestore(hba->host->host_lock, flags); + ret = __ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) { + dev_err(hba->dev, + "pwr mode change with mode 0x%x uic error %d\n", + mode, ret); + goto out; + } + + if (!wait_for_completion_timeout(hba->pwr_done, + msecs_to_jiffies(UIC_CMD_TIMEOUT))) { + dev_err(hba->dev, + "pwr mode change with mode 0x%x completion timeout\n", + mode); + ret = -ETIMEDOUT; + goto out; + } + + status = ufshcd_get_upmcrs(hba); + if (status != PWR_LOCAL) { + dev_err(hba->dev, + "pwr mode change failed, host umpcrs:0x%x\n", + status); + ret = (status != PWR_OK) ? status : -1; + } +out: + spin_lock_irqsave(hba->host->host_lock, flags); + hba->pwr_done = NULL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + mutex_unlock(&hba->uic_cmd_mutex); + return ret; +} + +/** + * ufshcd_config_max_pwr_mode - Set & Change power mode with + * maximum capability attribute information. + * @hba: per adapter instance + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba) +{ + enum {RX = 0, TX = 1}; + u32 lanes[] = {1, 1}; + u32 gear[] = {1, 1}; + u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE}; + int ret; + + /* Get the connected lane count */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]); + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]); + + /* + * First, get the maximum gears of HS speed. + * If a zero value, it means there is no HSGEAR capability. + * Then, get the maximum gears of PWM speed. + */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]); + if (!gear[RX]) { + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]); + pwr[RX] = SLOWAUTO_MODE; + } + + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]); + if (!gear[TX]) { + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), + &gear[TX]); + pwr[TX] = SLOWAUTO_MODE; + } + + /* + * Configure attributes for power mode change with below. + * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, + * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, + * - PA_HSSERIES + */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]); + if (pwr[RX] == FASTAUTO_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]); + if (pwr[TX] == FASTAUTO_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); + + if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B); + + ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]); + if (ret) + dev_err(hba->dev, + "pwr_mode: power mode change failed %d\n", ret); + + return ret; +} + +/** * ufshcd_complete_dev_init() - checks device readiness * hba: per-adapter instance * @@ -1442,7 +1676,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); /* Configure interrupt aggregation */ - ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG); + ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); /* Configure UTRL and UTMRL base address registers */ ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), @@ -1788,32 +2022,24 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) int result = 0; switch (scsi_status) { - case SAM_STAT_GOOD: - result |= DID_OK << 16 | - COMMAND_COMPLETE << 8 | - SAM_STAT_GOOD; - break; case SAM_STAT_CHECK_CONDITION: + ufshcd_copy_sense_data(lrbp); + case SAM_STAT_GOOD: result |= DID_OK << 16 | COMMAND_COMPLETE << 8 | - SAM_STAT_CHECK_CONDITION; - ufshcd_copy_sense_data(lrbp); - break; - case SAM_STAT_BUSY: - result |= SAM_STAT_BUSY; + scsi_status; break; case SAM_STAT_TASK_SET_FULL: - /* * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue * depth needs to be adjusted to the exact number of * outstanding commands the LUN can handle at any given time. */ ufshcd_adjust_lun_qdepth(lrbp->cmd); - result |= SAM_STAT_TASK_SET_FULL; - break; + case SAM_STAT_BUSY: case SAM_STAT_TASK_ABORTED: - result |= SAM_STAT_TASK_ABORTED; + ufshcd_copy_sense_data(lrbp); + result |= scsi_status; break; default: result |= DID_ERROR << 16; @@ -1898,14 +2124,20 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) /** * ufshcd_uic_cmd_compl - handle completion of uic command * @hba: per adapter instance + * @intr_status: interrupt status generated by the controller */ -static void ufshcd_uic_cmd_compl(struct ufs_hba *hba) +static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) { - if (hba->active_uic_cmd) { + if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { hba->active_uic_cmd->argument2 |= ufshcd_get_uic_cmd_result(hba); + hba->active_uic_cmd->argument3 = + ufshcd_get_dme_attr_val(hba); complete(&hba->active_uic_cmd->done); } + + if ((intr_status & UIC_POWER_MODE) && hba->pwr_done) + complete(hba->pwr_done); } /** @@ -1960,7 +2192,7 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) /* Reset interrupt aggregation counters */ if (int_aggr_reset) - ufshcd_config_int_aggr(hba, INT_AGGR_RESET); + ufshcd_reset_intr_aggr(hba); } /** @@ -2251,8 +2483,8 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) if (hba->errors) ufshcd_err_handler(hba); - if (intr_status & UIC_COMMAND_COMPL) - ufshcd_uic_cmd_compl(hba); + if (intr_status & UFSHCD_UIC_MASK) + ufshcd_uic_cmd_compl(hba, intr_status); if (intr_status & UTP_TASK_REQ_COMPL) ufshcd_tmc_handler(hba); @@ -2494,6 +2726,8 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie) if (ret) goto out; + ufshcd_config_max_pwr_mode(hba); + ret = ufshcd_verify_dev_init(hba); if (ret) goto out; diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 59c9c4848be1..577679a2d189 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -175,6 +175,7 @@ struct ufs_dev_cmd { * @active_uic_cmd: handle of active UIC command * @uic_cmd_mutex: mutex for uic command * @ufshcd_tm_wait_queue: wait queue for task management + * @pwr_done: completion for power mode change * @tm_condition: condition variable for task management * @ufshcd_state: UFSHCD states * @intr_mask: Interrupt Mask Bits @@ -219,6 +220,8 @@ struct ufs_hba { wait_queue_head_t ufshcd_tm_wait_queue; unsigned long tm_condition; + struct completion *pwr_done; + u32 ufshcd_state; u32 intr_mask; u16 ee_ctrl_mask; @@ -263,4 +266,55 @@ static inline void check_upiu_size(void) extern int ufshcd_runtime_suspend(struct ufs_hba *hba); extern int ufshcd_runtime_resume(struct ufs_hba *hba); extern int ufshcd_runtime_idle(struct ufs_hba *hba); +extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, + u8 attr_set, u32 mib_val, u8 peer); +extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, + u32 *mib_val, u8 peer); + +/* UIC command interfaces for DME primitives */ +#define DME_LOCAL 0 +#define DME_PEER 1 +#define ATTR_SET_NOR 0 /* NORMAL */ +#define ATTR_SET_ST 1 /* STATIC */ + +static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel, + u32 mib_val) +{ + return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, + mib_val, DME_LOCAL); +} + +static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel, + u32 mib_val) +{ + return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, + mib_val, DME_LOCAL); +} + +static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel, + u32 mib_val) +{ + return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, + mib_val, DME_PEER); +} + +static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel, + u32 mib_val) +{ + return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, + mib_val, DME_PEER); +} + +static inline int ufshcd_dme_get(struct ufs_hba *hba, + u32 attr_sel, u32 *mib_val) +{ + return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL); +} + +static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, + u32 attr_sel, u32 *mib_val) +{ + return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); +} + #endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index f1e1b7459107..0475c6619a68 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -124,6 +124,9 @@ enum { #define CONTROLLER_FATAL_ERROR UFS_BIT(16) #define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17) +#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL |\ + UIC_POWER_MODE) + #define UFSHCD_ERROR_MASK (UIC_ERROR |\ DEVICE_FATAL_ERROR |\ CONTROLLER_FATAL_ERROR |\ @@ -142,6 +145,15 @@ enum { #define DEVICE_ERROR_INDICATOR UFS_BIT(5) #define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8) +enum { + PWR_OK = 0x0, + PWR_LOCAL = 0x01, + PWR_REMOTE = 0x02, + PWR_BUSY = 0x03, + PWR_ERROR_CAP = 0x04, + PWR_FATAL_ERROR = 0x05, +}; + /* HCE - Host Controller Enable 34h */ #define CONTROLLER_ENABLE UFS_BIT(0) #define CONTROLLER_DISABLE 0x0 @@ -191,6 +203,12 @@ enum { #define CONFIG_RESULT_CODE_MASK 0xFF #define GENERIC_ERROR_CODE_MASK 0xFF +#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\ + ((sel) & 0xFFFF)) +#define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0) +#define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16) +#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF) + /* UIC Commands */ enum { UIC_CMD_DME_GET = 0x01, @@ -226,8 +244,8 @@ enum { #define MASK_UIC_COMMAND_RESULT 0xFF -#define INT_AGGR_COUNTER_THRESHOLD_VALUE (0x1F << 8) -#define INT_AGGR_TIMEOUT_VALUE (0x02) +#define INT_AGGR_COUNTER_THLD_VAL(c) (((c) & 0x1F) << 8) +#define INT_AGGR_TIMEOUT_VAL(t) (((t) & 0xFF) << 0) /* Interrupt disable masks */ enum { diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h new file mode 100644 index 000000000000..0bb8041c047a --- /dev/null +++ b/drivers/scsi/ufs/unipro.h @@ -0,0 +1,151 @@ +/* + * drivers/scsi/ufs/unipro.h + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _UNIPRO_H_ +#define _UNIPRO_H_ + +/* + * PHY Adpater attributes + */ +#define PA_ACTIVETXDATALANES 0x1560 +#define PA_ACTIVERXDATALANES 0x1580 +#define PA_TXTRAILINGCLOCKS 0x1564 +#define PA_PHY_TYPE 0x1500 +#define PA_AVAILTXDATALANES 0x1520 +#define PA_AVAILRXDATALANES 0x1540 +#define PA_MINRXTRAILINGCLOCKS 0x1543 +#define PA_TXPWRSTATUS 0x1567 +#define PA_RXPWRSTATUS 0x1582 +#define PA_TXFORCECLOCK 0x1562 +#define PA_TXPWRMODE 0x1563 +#define PA_LEGACYDPHYESCDL 0x1570 +#define PA_MAXTXSPEEDFAST 0x1521 +#define PA_MAXTXSPEEDSLOW 0x1522 +#define PA_MAXRXSPEEDFAST 0x1541 +#define PA_MAXRXSPEEDSLOW 0x1542 +#define PA_TXLINKSTARTUPHS 0x1544 +#define PA_TXSPEEDFAST 0x1565 +#define PA_TXSPEEDSLOW 0x1566 +#define PA_REMOTEVERINFO 0x15A0 +#define PA_TXGEAR 0x1568 +#define PA_TXTERMINATION 0x1569 +#define PA_HSSERIES 0x156A +#define PA_PWRMODE 0x1571 +#define PA_RXGEAR 0x1583 +#define PA_RXTERMINATION 0x1584 +#define PA_MAXRXPWMGEAR 0x1586 +#define PA_MAXRXHSGEAR 0x1587 +#define PA_RXHSUNTERMCAP 0x15A5 +#define PA_RXLSTERMCAP 0x15A6 +#define PA_PACPREQTIMEOUT 0x1590 +#define PA_PACPREQEOBTIMEOUT 0x1591 +#define PA_HIBERN8TIME 0x15A7 +#define PA_LOCALVERINFO 0x15A9 +#define PA_TACTIVATE 0x15A8 +#define PA_PACPFRAMECOUNT 0x15C0 +#define PA_PACPERRORCOUNT 0x15C1 +#define PA_PHYTESTCONTROL 0x15C2 +#define PA_PWRMODEUSERDATA0 0x15B0 +#define PA_PWRMODEUSERDATA1 0x15B1 +#define PA_PWRMODEUSERDATA2 0x15B2 +#define PA_PWRMODEUSERDATA3 0x15B3 +#define PA_PWRMODEUSERDATA4 0x15B4 +#define PA_PWRMODEUSERDATA5 0x15B5 +#define PA_PWRMODEUSERDATA6 0x15B6 +#define PA_PWRMODEUSERDATA7 0x15B7 +#define PA_PWRMODEUSERDATA8 0x15B8 +#define PA_PWRMODEUSERDATA9 0x15B9 +#define PA_PWRMODEUSERDATA10 0x15BA +#define PA_PWRMODEUSERDATA11 0x15BB +#define PA_CONNECTEDTXDATALANES 0x1561 +#define PA_CONNECTEDRXDATALANES 0x1581 +#define PA_LOGICALLANEMAP 0x15A1 +#define PA_SLEEPNOCONFIGTIME 0x15A2 +#define PA_STALLNOCONFIGTIME 0x15A3 +#define PA_SAVECONFIGTIME 0x15A4 + +/* PA power modes */ +enum { + FAST_MODE = 1, + SLOW_MODE = 2, + FASTAUTO_MODE = 4, + SLOWAUTO_MODE = 5, + UNCHANGED = 7, +}; + +/* PA TX/RX Frequency Series */ +enum { + PA_HS_MODE_A = 1, + PA_HS_MODE_B = 2, +}; + +/* + * Data Link Layer Attributes + */ +#define DL_TC0TXFCTHRESHOLD 0x2040 +#define DL_FC0PROTTIMEOUTVAL 0x2041 +#define DL_TC0REPLAYTIMEOUTVAL 0x2042 +#define DL_AFC0REQTIMEOUTVAL 0x2043 +#define DL_AFC0CREDITTHRESHOLD 0x2044 +#define DL_TC0OUTACKTHRESHOLD 0x2045 +#define DL_TC1TXFCTHRESHOLD 0x2060 +#define DL_FC1PROTTIMEOUTVAL 0x2061 +#define DL_TC1REPLAYTIMEOUTVAL 0x2062 +#define DL_AFC1REQTIMEOUTVAL 0x2063 +#define DL_AFC1CREDITTHRESHOLD 0x2064 +#define DL_TC1OUTACKTHRESHOLD 0x2065 +#define DL_TXPREEMPTIONCAP 0x2000 +#define DL_TC0TXMAXSDUSIZE 0x2001 +#define DL_TC0RXINITCREDITVAL 0x2002 +#define DL_TC0TXBUFFERSIZE 0x2005 +#define DL_PEERTC0PRESENT 0x2046 +#define DL_PEERTC0RXINITCREVAL 0x2047 +#define DL_TC1TXMAXSDUSIZE 0x2003 +#define DL_TC1RXINITCREDITVAL 0x2004 +#define DL_TC1TXBUFFERSIZE 0x2006 +#define DL_PEERTC1PRESENT 0x2066 +#define DL_PEERTC1RXINITCREVAL 0x2067 + +/* + * Network Layer Attributes + */ +#define N_DEVICEID 0x3000 +#define N_DEVICEID_VALID 0x3001 +#define N_TC0TXMAXSDUSIZE 0x3020 +#define N_TC1TXMAXSDUSIZE 0x3021 + +/* + * Transport Layer Attributes + */ +#define T_NUMCPORTS 0x4000 +#define T_NUMTESTFEATURES 0x4001 +#define T_CONNECTIONSTATE 0x4020 +#define T_PEERDEVICEID 0x4021 +#define T_PEERCPORTID 0x4022 +#define T_TRAFFICCLASS 0x4023 +#define T_PROTOCOLID 0x4024 +#define T_CPORTFLAGS 0x4025 +#define T_TXTOKENVALUE 0x4026 +#define T_RXTOKENVALUE 0x4027 +#define T_LOCALBUFFERSPACE 0x4028 +#define T_PEERBUFFERSPACE 0x4029 +#define T_CREDITSTOSEND 0x402A +#define T_CPORTMODE 0x402B +#define T_TC0TXMAXSDUSIZE 0x4060 +#define T_TC1TXMAXSDUSIZE 0x4061 + +/* Boolean attribute values */ +enum { + FALSE = 0, + TRUE, +}; + +#endif /* _UNIPRO_H_ */ diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 74b88efde6ad..c3173dced870 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -224,6 +224,9 @@ static void virtscsi_vq_done(struct virtio_scsi *vscsi, virtqueue_disable_cb(vq); while ((buf = virtqueue_get_buf(vq, &len)) != NULL) fn(vscsi, buf); + + if (unlikely(virtqueue_is_broken(vq))) + break; } while (!virtqueue_enable_cb(vq)); spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags); } @@ -710,19 +713,15 @@ static struct scsi_host_template virtscsi_host_template_multi = { #define virtscsi_config_get(vdev, fld) \ ({ \ typeof(((struct virtio_scsi_config *)0)->fld) __val; \ - vdev->config->get(vdev, \ - offsetof(struct virtio_scsi_config, fld), \ - &__val, sizeof(__val)); \ + virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \ __val; \ }) #define virtscsi_config_set(vdev, fld, val) \ - (void)({ \ + do { \ typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \ - vdev->config->set(vdev, \ - offsetof(struct virtio_scsi_config, fld), \ - &__val, sizeof(__val)); \ - }) + virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \ + } while(0) static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) { @@ -954,7 +953,7 @@ static void virtscsi_remove(struct virtio_device *vdev) scsi_host_put(shost); } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int virtscsi_freeze(struct virtio_device *vdev) { virtscsi_remove_vqs(vdev); @@ -988,7 +987,7 @@ static struct virtio_driver virtio_scsi_driver = { .id_table = id_table, .probe = virtscsi_probe, .scan = virtscsi_scan, -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP .freeze = virtscsi_freeze, .restore = virtscsi_restore, #endif |