diff options
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r-- | drivers/scsi/ipr.c | 238 |
1 files changed, 153 insertions, 85 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 6077c43edacc..fe6029f4df16 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -192,7 +192,7 @@ static const struct ipr_chip_t ipr_chip[] = { { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] } }; -static int ipr_max_bus_speeds [] = { +static int ipr_max_bus_speeds[] = { IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE }; @@ -562,10 +562,27 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, trace_entry->u.add_data = add_data; } #else -#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0) +#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0) #endif /** + * ipr_lock_and_done - Acquire lock and complete command + * @ipr_cmd: ipr command struct + * + * Return value: + * none + **/ +static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd) +{ + unsigned long lock_flags; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ipr_cmd->done(ipr_cmd); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); +} + +/** * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse * @ipr_cmd: ipr command struct * @@ -611,34 +628,50 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) * Return value: * none **/ -static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd) +static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd, + void (*fast_done) (struct ipr_cmnd *)) { ipr_reinit_ipr_cmnd(ipr_cmd); ipr_cmd->u.scratch = 0; ipr_cmd->sibling = NULL; + ipr_cmd->fast_done = fast_done; init_timer(&ipr_cmd->timer); } /** - * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block + * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block * @ioa_cfg: ioa config struct * * Return value: * pointer to ipr command struct **/ static -struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) +struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) { struct ipr_cmnd *ipr_cmd; ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue); list_del(&ipr_cmd->queue); - ipr_init_ipr_cmnd(ipr_cmd); return ipr_cmd; } /** + * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it + * @ioa_cfg: ioa config struct + * + * Return value: + * pointer to ipr command struct + **/ +static +struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) +{ + struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg); + ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done); + return ipr_cmd; +} + +/** * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts * @ioa_cfg: ioa config struct * @clr_ints: interrupts to clear @@ -1002,7 +1035,7 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, **/ static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto) { - switch(proto) { + switch (proto) { case IPR_PROTO_SATA: case IPR_PROTO_SAS_STP: res->ata_class = ATA_DEV_ATA; @@ -3043,7 +3076,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) } #else -#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0) +#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0) #endif /** @@ -3055,7 +3088,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) **/ static void ipr_release_dump(struct kref *kref) { - struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref); + struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref); struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; unsigned long lock_flags = 0; int i; @@ -3142,7 +3175,7 @@ restart: break; } } - } while(did_work); + } while (did_work); list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { if (res->add_to_ml) { @@ -3268,7 +3301,7 @@ static ssize_t ipr_show_log_level(struct device *dev, * number of bytes printed to buffer **/ static ssize_t ipr_store_log_level(struct device *dev, - struct device_attribute *attr, + struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); @@ -3315,7 +3348,7 @@ static ssize_t ipr_store_diagnostics(struct device *dev, return -EACCES; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - while(ioa_cfg->in_reset_reload) { + while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); @@ -3682,7 +3715,7 @@ static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, unsigned long lock_flags; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - while(ioa_cfg->in_reset_reload) { + while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); @@ -3746,7 +3779,7 @@ static ssize_t ipr_store_update_fw(struct device *dev, len = snprintf(fname, 99, "%s", buf); fname[len-1] = '\0'; - if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { + if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); return -EIO; } @@ -4612,7 +4645,7 @@ static int ipr_slave_alloc(struct scsi_device *sdev) * Return value: * SUCCESS / FAILED **/ -static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd) +static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd) { struct ipr_ioa_cfg *ioa_cfg; int rc; @@ -4634,7 +4667,7 @@ static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd) return rc; } -static int ipr_eh_host_reset(struct scsi_cmnd * cmd) +static int ipr_eh_host_reset(struct scsi_cmnd *cmd) { int rc; @@ -4701,7 +4734,7 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, } LEAVE; - return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0); + return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0; } /** @@ -4725,7 +4758,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes, ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - while(ioa_cfg->in_reset_reload) { + while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); @@ -4753,7 +4786,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes, * Return value: * SUCCESS / FAILED **/ -static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) +static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) { struct ipr_cmnd *ipr_cmd; struct ipr_ioa_cfg *ioa_cfg; @@ -4811,10 +4844,10 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) res->resetting_device = 0; LEAVE; - return (rc ? FAILED : SUCCESS); + return rc ? FAILED : SUCCESS; } -static int ipr_eh_dev_reset(struct scsi_cmnd * cmd) +static int ipr_eh_dev_reset(struct scsi_cmnd *cmd) { int rc; @@ -4910,7 +4943,7 @@ static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd) * Return value: * SUCCESS / FAILED **/ -static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) +static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd) { struct ipr_cmnd *ipr_cmd; struct ipr_ioa_cfg *ioa_cfg; @@ -4979,7 +5012,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) res->needs_sync_complete = 1; LEAVE; - return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS); + return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS; } /** @@ -4989,7 +5022,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) * Return value: * SUCCESS / FAILED **/ -static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd) +static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd) { unsigned long flags; int rc; @@ -5116,8 +5149,9 @@ static irqreturn_t ipr_isr(int irq, void *devp) u16 cmd_index; int num_hrrq = 0; int irq_none = 0; - struct ipr_cmnd *ipr_cmd; + struct ipr_cmnd *ipr_cmd, *temp; irqreturn_t rc = IRQ_NONE; + LIST_HEAD(doneq); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); @@ -5138,8 +5172,8 @@ static irqreturn_t ipr_isr(int irq, void *devp) if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) { ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA"); - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - return IRQ_HANDLED; + rc = IRQ_HANDLED; + goto unlock_out; } ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; @@ -5148,9 +5182,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); - list_del(&ipr_cmd->queue); - del_timer(&ipr_cmd->timer); - ipr_cmd->done(ipr_cmd); + list_move_tail(&ipr_cmd->queue, &doneq); rc = IRQ_HANDLED; @@ -5180,8 +5212,8 @@ static irqreturn_t ipr_isr(int irq, void *devp) } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES && int_reg & IPR_PCII_HRRQ_UPDATED) { ipr_isr_eh(ioa_cfg, "Error clearing HRRQ"); - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - return IRQ_HANDLED; + rc = IRQ_HANDLED; + goto unlock_out; } else break; } @@ -5189,7 +5221,14 @@ static irqreturn_t ipr_isr(int irq, void *devp) if (unlikely(rc == IRQ_NONE)) rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); +unlock_out: spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { + list_del(&ipr_cmd->queue); + del_timer(&ipr_cmd->timer); + ipr_cmd->fast_done(ipr_cmd); + } + return rc; } @@ -5770,21 +5809,28 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); + unsigned long lock_flags; scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { - scsi_dma_unmap(ipr_cmd->scsi_cmd); + scsi_dma_unmap(scsi_cmd); + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); scsi_cmd->scsi_done(scsi_cmd); - } else + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + } else { + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); ipr_erp_start(ioa_cfg, ipr_cmd); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + } } /** * ipr_queuecommand - Queue a mid-layer request + * @shost: scsi host struct * @scsi_cmd: scsi command struct - * @done: done function * * This function queues a request generated by the mid-layer. * @@ -5793,61 +5839,61 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) * SCSI_MLQUEUE_DEVICE_BUSY if device is busy * SCSI_MLQUEUE_HOST_BUSY if host is busy **/ -static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd, - void (*done) (struct scsi_cmnd *)) +static int ipr_queuecommand(struct Scsi_Host *shost, + struct scsi_cmnd *scsi_cmd) { struct ipr_ioa_cfg *ioa_cfg; struct ipr_resource_entry *res; struct ipr_ioarcb *ioarcb; struct ipr_cmnd *ipr_cmd; - int rc = 0; + unsigned long lock_flags; + int rc; - scsi_cmd->scsi_done = done; - ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; - res = scsi_cmd->device->hostdata; + ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + + spin_lock_irqsave(shost->host_lock, lock_flags); scsi_cmd->result = (DID_OK << 16); + res = scsi_cmd->device->hostdata; /* * We are currently blocking all devices due to a host reset * We have told the host to stop giving us new requests, but * ERP ops don't count. FIXME */ - if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) + if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) { + spin_unlock_irqrestore(shost->host_lock, lock_flags); return SCSI_MLQUEUE_HOST_BUSY; + } /* * FIXME - Create scsi_set_host_offline interface * and the ioa_is_dead check can be removed */ if (unlikely(ioa_cfg->ioa_is_dead || !res)) { - memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); - scsi_cmd->result = (DID_NO_CONNECT << 16); - scsi_cmd->scsi_done(scsi_cmd); - return 0; + spin_unlock_irqrestore(shost->host_lock, lock_flags); + goto err_nodev; + } + + if (ipr_is_gata(res) && res->sata_port) { + rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap); + spin_unlock_irqrestore(shost->host_lock, lock_flags); + return rc; } - if (ipr_is_gata(res) && res->sata_port) - return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap); + ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg); + spin_unlock_irqrestore(shost->host_lock, lock_flags); - ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done); ioarcb = &ipr_cmd->ioarcb; - list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); ipr_cmd->scsi_cmd = scsi_cmd; - ioarcb->res_handle = res->res_handle; - ipr_cmd->done = ipr_scsi_done; - ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); + ipr_cmd->done = ipr_scsi_eh_done; if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { if (scsi_cmd->underflow == 0) ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; - if (res->needs_sync_complete) { - ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; - res->needs_sync_complete = 0; - } - ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; if (ipr_is_gscsi(res)) ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; @@ -5859,23 +5905,46 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd, (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; - if (likely(rc == 0)) { - if (ioa_cfg->sis64) - rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); - else - rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); - } + if (ioa_cfg->sis64) + rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); + else + rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); - if (unlikely(rc != 0)) { - list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + spin_lock_irqsave(shost->host_lock, lock_flags); + if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) { + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + spin_unlock_irqrestore(shost->host_lock, lock_flags); + if (!rc) + scsi_dma_unmap(scsi_cmd); return SCSI_MLQUEUE_HOST_BUSY; } + if (unlikely(ioa_cfg->ioa_is_dead)) { + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + spin_unlock_irqrestore(shost->host_lock, lock_flags); + scsi_dma_unmap(scsi_cmd); + goto err_nodev; + } + + ioarcb->res_handle = res->res_handle; + if (res->needs_sync_complete) { + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; + res->needs_sync_complete = 0; + } + list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); + ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); ipr_send_command(ipr_cmd); + spin_unlock_irqrestore(shost->host_lock, lock_flags); return 0; -} -static DEF_SCSI_QCMD(ipr_queuecommand) +err_nodev: + spin_lock_irqsave(shost->host_lock, lock_flags); + memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + scsi_cmd->result = (DID_NO_CONNECT << 16); + scsi_cmd->scsi_done(scsi_cmd); + spin_unlock_irqrestore(shost->host_lock, lock_flags); + return 0; +} /** * ipr_ioctl - IOCTL handler @@ -5907,7 +5976,7 @@ static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) * Return value: * pointer to buffer with description string **/ -static const char * ipr_ioa_info(struct Scsi_Host *host) +static const char *ipr_ioa_info(struct Scsi_Host *host) { static char buffer[512]; struct ipr_ioa_cfg *ioa_cfg; @@ -5965,7 +6034,7 @@ static void ipr_ata_phy_reset(struct ata_port *ap) ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, flags); - while(ioa_cfg->in_reset_reload) { + while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, flags); @@ -6005,7 +6074,7 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc) unsigned long flags; spin_lock_irqsave(ioa_cfg->host->host_lock, flags); - while(ioa_cfg->in_reset_reload) { + while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, flags); @@ -6330,7 +6399,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) int i; if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { - for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){ + for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) { if (pvr_version_is(ipr_blocked_processors[i])) return 1; } @@ -6608,7 +6677,7 @@ static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg) * none **/ static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg, - struct ipr_mode_pages *mode_pages) + struct ipr_mode_pages *mode_pages) { int i, entry_length; struct ipr_dev_bus_entry *bus; @@ -8022,7 +8091,7 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) ipr_reinit_ipr_cmnd(ipr_cmd); ipr_cmd->job_step_failed = ipr_reset_cmd_failed; rc = ipr_cmd->job_step(ipr_cmd); - } while(rc == IPR_RC_JOB_CONTINUE); + } while (rc == IPR_RC_JOB_CONTINUE); } /** @@ -8283,7 +8352,7 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) } if (ioa_cfg->ipr_cmd_pool) - pci_pool_destroy (ioa_cfg->ipr_cmd_pool); + pci_pool_destroy(ioa_cfg->ipr_cmd_pool); kfree(ioa_cfg->ipr_cmnd_list); kfree(ioa_cfg->ipr_cmnd_list_dma); @@ -8363,8 +8432,8 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) dma_addr_t dma_addr; int i; - ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev, - sizeof(struct ipr_cmnd), 512, 0); + ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev, + sizeof(struct ipr_cmnd), 512, 0); if (!ioa_cfg->ipr_cmd_pool) return -ENOMEM; @@ -8378,7 +8447,7 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) } for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { - ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); + ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); if (!ipr_cmd) { ipr_free_cmd_blks(ioa_cfg); @@ -8775,8 +8844,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); - ata_host_init(&ioa_cfg->ata_host, &pdev->dev, - sata_port_info.flags, &ipr_sata_ops); + ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops); ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); @@ -8964,7 +9032,7 @@ static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg) int target, lun; for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++) - for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ ) + for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++) scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun); } @@ -9010,7 +9078,7 @@ static void __ipr_remove(struct pci_dev *pdev) ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); - while(ioa_cfg->in_reset_reload) { + while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); @@ -9020,7 +9088,7 @@ static void __ipr_remove(struct pci_dev *pdev) spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); - flush_work_sync(&ioa_cfg->work_q); + flush_work(&ioa_cfg->work_q); spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); spin_lock(&ipr_driver_lock); @@ -9139,7 +9207,7 @@ static void ipr_shutdown(struct pci_dev *pdev) unsigned long lock_flags = 0; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - while(ioa_cfg->in_reset_reload) { + while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); @@ -9228,7 +9296,7 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = { }; MODULE_DEVICE_TABLE(pci, ipr_pci_table); -static struct pci_error_handlers ipr_err_handler = { +static const struct pci_error_handlers ipr_err_handler = { .error_detected = ipr_pci_error_detected, .slot_reset = ipr_pci_slot_reset, }; |