diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-19 01:38:59 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-19 01:38:59 +0200 |
commit | 675e0655c12209ba1f40af0dff7cd76b17a1315c (patch) | |
tree | c29b8ddd6fdbd66161e7150feee566daaebe36d3 | |
parent | Merge branch 'stable/for-linus-4.7' of git://git.kernel.org/pub/scm/linux/ker... (diff) | |
parent | Merge branch 'fixes' into misc (diff) | |
download | linux-675e0655c12209ba1f40af0dff7cd76b17a1315c.tar.xz linux-675e0655c12209ba1f40af0dff7cd76b17a1315c.zip |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"First round of SCSI updates for the 4.6+ merge window.
This batch includes the usual quota of driver updates (bnx2fc, mp3sas,
hpsa, ncr5380, lpfc, hisi_sas, snic, aacraid, megaraid_sas). There's
also a multiqueue update for scsi_debug, assorted bug fixes and a few
other minor updates (refactor of scsi_sg_pools into generic code, alua
and VPD updates, and struct timeval conversions)"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (138 commits)
mpt3sas: Used "synchronize_irq()"API to synchronize timed-out IO & TMs
mpt3sas: Set maximum transfer length per IO to 4MB for VDs
mpt3sas: Updating mpt3sas driver version to 13.100.00.00
mpt3sas: Fix initial Reference tag field for 4K PI drives.
mpt3sas: Handle active cable exception event
mpt3sas: Update MPI header to 2.00.42
Revert "lpfc: Delete unnecessary checks before the function call mempool_destroy"
eata_pio: missing break statement
hpsa: Fix type ZBC conditional checks
scsi_lib: Decode T10 vendor IDs
scsi_dh_alua: do not fail for unknown VPD identification
scsi_debug: use locally assigned naa
scsi_debug: uuid for lu name
scsi_debug: vpd and mode page work
scsi_debug: add multiple queue support
bfa: fix bfa_fcb_itnim_alloc() error handling
megaraid_sas: Downgrade two success messages to info
cxlflash: Fix to resolve dead-lock during EEH recovery
scsi_debug: rework resp_report_luns
scsi_debug: use pdt constants
...
117 files changed, 4839 insertions, 6481 deletions
diff --git a/Documentation/scsi/g_NCR5380.txt b/Documentation/scsi/g_NCR5380.txt index 3b80f567f818..fd880150aeea 100644 --- a/Documentation/scsi/g_NCR5380.txt +++ b/Documentation/scsi/g_NCR5380.txt @@ -23,11 +23,10 @@ supported by the driver. If the default configuration does not work for you, you can use the kernel command lines (eg using the lilo append command): - ncr5380=port,irq,dma - ncr53c400=port,irq -or - ncr5380=base,irq,dma - ncr53c400=base,irq + ncr5380=addr,irq + ncr53c400=addr,irq + ncr53c400a=addr,irq + dtc3181e=addr,irq The driver does not probe for any addresses or ports other than those in the OVERRIDE or given to the kernel as above. @@ -36,19 +35,17 @@ This driver provides some information on what it has detected in /proc/scsi/g_NCR5380/x where x is the scsi card number as detected at boot time. More info to come in the future. -When NCR53c400 support is compiled in, BIOS parameters will be returned by -the driver (the raw 5380 driver does not and I don't plan to fiddle with -it!). - This driver works as a module. When included as a module, parameters can be passed on the insmod/modprobe command line: ncr_irq=xx the interrupt ncr_addr=xx the port or base address (for port or memory mapped, resp.) - ncr_dma=xx the DMA ncr_5380=1 to set up for a NCR5380 board ncr_53c400=1 to set up for a NCR53C400 board + ncr_53c400a=1 to set up for a NCR53C400A board + dtc_3181e=1 to set up for a Domex Technology Corp 3181E board + hp_c2502=1 to set up for a Hewlett Packard C2502 board e.g. modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1 for a port mapped NCR5380 board or diff --git a/Documentation/scsi/scsi-parameters.txt b/Documentation/scsi/scsi-parameters.txt index 2bfd6f6d2d3d..1241ac11edb1 100644 --- a/Documentation/scsi/scsi-parameters.txt +++ b/Documentation/scsi/scsi-parameters.txt @@ -27,13 +27,15 @@ parameters may be changed at runtime by the command aic79xx= [HW,SCSI] See Documentation/scsi/aic79xx.txt. - atascsi= [HW,SCSI] Atari SCSI + atascsi= [HW,SCSI] + See drivers/scsi/atari_scsi.c. BusLogic= [HW,SCSI] See drivers/scsi/BusLogic.c, comment before function BusLogic_ParseDriverOptions(). dtc3181e= [HW,SCSI] + See Documentation/scsi/g_NCR5380.txt. eata= [HW,SCSI] @@ -51,8 +53,8 @@ parameters may be changed at runtime by the command ips= [HW,SCSI] Adaptec / IBM ServeRAID controller See header of drivers/scsi/ips.c. - mac5380= [HW,SCSI] Format: - <can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags> + mac5380= [HW,SCSI] + See drivers/scsi/mac_scsi.c. max_luns= [SCSI] Maximum number of LUNs to probe. Should be between 1 and 2^32-1. @@ -65,10 +67,13 @@ parameters may be changed at runtime by the command See header of drivers/scsi/NCR_D700.c. ncr5380= [HW,SCSI] + See Documentation/scsi/g_NCR5380.txt. ncr53c400= [HW,SCSI] + See Documentation/scsi/g_NCR5380.txt. ncr53c400a= [HW,SCSI] + See Documentation/scsi/g_NCR5380.txt. ncr53c406a= [HW,SCSI] diff --git a/MAINTAINERS b/MAINTAINERS index 804bc4fd154f..c7dd1a3401e5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7593,10 +7593,10 @@ M: Michael Schmitz <schmitzmic@gmail.com> L: linux-scsi@vger.kernel.org S: Maintained F: Documentation/scsi/g_NCR5380.txt +F: Documentation/scsi/dtc3x80.txt F: drivers/scsi/NCR5380.* F: drivers/scsi/arm/cumana_1.c F: drivers/scsi/arm/oak.c -F: drivers/scsi/atari_NCR5380.c F: drivers/scsi/atari_scsi.* F: drivers/scsi/dmx3191d.c F: drivers/scsi/dtc.* diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index d7c732042a4f..188f2f2eb21f 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c @@ -294,7 +294,7 @@ static int icside_dma_init(struct pata_icside_info *info) static struct scsi_host_template pata_icside_sht = { ATA_BASE_SHT(DRV_NAME), - .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, + .sg_tablesize = SG_MAX_SEGMENTS, .dma_boundary = IOMD_DMA_BOUNDARY, }; diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index b6bf20496021..369a75e1f44e 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -81,7 +81,7 @@ MODULE_PARM_DESC(cmd_sg_entries, module_param(indirect_sg_entries, uint, 0444); MODULE_PARM_DESC(indirect_sg_entries, - "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")"); + "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")"); module_param(allow_ext_sg, bool, 0444); MODULE_PARM_DESC(allow_ext_sg, @@ -2819,7 +2819,7 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) spin_unlock(&host->target_lock); scsi_scan_target(&target->scsi_host->shost_gendev, - 0, target->scsi_id, SCAN_WILD_CARD, 0); + 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL); if (srp_connected_ch(target) < target->ch_count || target->qp_in_error) { @@ -3097,7 +3097,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) case SRP_OPT_SG_TABLESIZE: if (match_int(args, &token) || token < 1 || - token > SCSI_MAX_SG_CHAIN_SEGMENTS) { + token > SG_MAX_SEGMENTS) { pr_warn("bad max sg_tablesize parameter '%s'\n", p); goto out; diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 7ebccfa8072a..7ee1667acde4 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -2281,7 +2281,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); - if (!dma_addr_out) + if (pci_dma_mapping_error(ioc->pcidev, dma_addr_out)) goto put_mf; ioc->add_sge(psge, flagsLength, dma_addr_out); psge += ioc->SGE_size; @@ -2296,7 +2296,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, flagsLength |= blk_rq_bytes(rsp) + 4; dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); - if (!dma_addr_in) + if (pci_dma_mapping_error(ioc->pcidev, dma_addr_in)) goto unmap; ioc->add_sge(psge, flagsLength, dma_addr_in); diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index 613231c16194..031e088edb5e 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c @@ -1150,7 +1150,7 @@ static void mpt_work_wrapper(struct work_struct *work) } shost_printk(KERN_INFO, shost, MYIOC_s_FMT "Integrated RAID detects new device %d\n", ioc->name, disk); - scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, 1); + scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, SCSI_SCAN_RESCAN); } diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c index 157d3d203ba1..9310a547b89f 100644 --- a/drivers/s390/scsi/zfcp_unit.c +++ b/drivers/s390/scsi/zfcp_unit.c @@ -26,7 +26,8 @@ void zfcp_unit_scsi_scan(struct zfcp_unit *unit) lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun); if (rport && rport->port_state == FC_PORTSTATE_ONLINE) - scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, 1); + scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, + SCSI_SCAN_MANUAL); } static void zfcp_unit_scsi_scan_work(struct work_struct *work) diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index e80768f8e579..98e5d51a3346 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -17,6 +17,7 @@ config SCSI tristate "SCSI device support" depends on BLOCK select SCSI_DMA if HAS_DMA + select SG_POOL ---help--- If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or any other SCSI device under Linux, say Y and make sure that you know @@ -202,12 +203,12 @@ config SCSI_ENCLOSURE certain enclosure conditions to be reported and is not required. config SCSI_CONSTANTS - bool "Verbose SCSI error reporting (kernel size +=75K)" + bool "Verbose SCSI error reporting (kernel size += 36K)" depends on SCSI help The error messages regarding your SCSI hardware will be easier to understand if you say Y here; it will enlarge your kernel by about - 75 KB. If in doubt, say Y. + 36 KB. If in doubt, say Y. config SCSI_LOGGING bool "SCSI logging facility" @@ -813,17 +814,6 @@ config SCSI_GENERIC_NCR5380_MMIO To compile this driver as a module, choose M here: the module will be called g_NCR5380_mmio. -config SCSI_GENERIC_NCR53C400 - bool "Enable NCR53c400 extensions" - depends on SCSI_GENERIC_NCR5380 - help - This enables certain optimizations for the NCR53c400 SCSI cards. - You might as well try it out. Note that this driver will only probe - for the Trantor T130B in its default configuration; you might have - to pass a command line option to the kernel at boot time if it does - not detect your card. See the file - <file:Documentation/scsi/g_NCR5380.txt> for details. - config SCSI_IPS tristate "IBM ServeRAID support" depends on PCI && SCSI diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 3eff2a69fe08..43908bbb3b23 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -29,29 +29,9 @@ * Ronald van Cuijlenborg, Alan Cox and others. */ -/* - * Further development / testing that should be done : - * 1. Cleanup the NCR5380_transfer_dma function and DMA operation complete - * code so that everything does the same thing that's done at the - * end of a pseudo-DMA read operation. - * - * 2. Fix REAL_DMA (interrupt driven, polled works fine) - - * basically, transfer size needs to be reduced by one - * and the last byte read as is done with PSEUDO_DMA. - * - * 4. Test SCSI-II tagged queueing (I have no devices which support - * tagged queueing) - */ +/* Ported to Atari by Roman Hodek and others. */ -#ifndef notyet -#undef REAL_DMA -#endif - -#ifdef BOARD_REQUIRES_NO_DELAY -#define io_recovery_delay(x) -#else -#define io_recovery_delay(x) udelay(x) -#endif +/* Adapted for the Sun 3 by Sam Creasey. */ /* * Design @@ -126,17 +106,10 @@ * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential * transceivers. * - * DONT_USE_INTR - if defined, never use interrupts, even if we probe or - * override-configure an IRQ. - * * PSEUDO_DMA - if defined, PSEUDO DMA is used during the data transfer phases. * * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. * - * REAL_DMA_POLL - if defined, REAL DMA is used but the driver doesn't - * rely on phase mismatch and EOP interrupts to determine end - * of phase. - * * These macros MUST be defined : * * NCR5380_read(register) - read from the specified register @@ -147,29 +120,29 @@ * specific implementation of the NCR5380 * * Either real DMA *or* pseudo DMA may be implemented - * REAL functions : - * NCR5380_REAL_DMA should be defined if real DMA is to be used. - * Note that the DMA setup functions should return the number of bytes - * that they were able to program the controller for. - * - * Also note that generic i386/PC versions of these macros are - * available as NCR5380_i386_dma_write_setup, - * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. * * NCR5380_dma_write_setup(instance, src, count) - initialize * NCR5380_dma_read_setup(instance, dst, count) - initialize * NCR5380_dma_residual(instance); - residual count * - * PSEUDO functions : - * NCR5380_pwrite(instance, src, count) - * NCR5380_pread(instance, dst, count); - * * The generic driver is initialized by calling NCR5380_init(instance), * after setting the appropriate host specific fields and ID. If the * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, * possible) function may be used. */ +#ifndef NCR5380_io_delay +#define NCR5380_io_delay(x) +#endif + +#ifndef NCR5380_acquire_dma_irq +#define NCR5380_acquire_dma_irq(x) (1) +#endif + +#ifndef NCR5380_release_dma_irq +#define NCR5380_release_dma_irq(x) +#endif + static int do_abort(struct Scsi_Host *); static void do_reset(struct Scsi_Host *); @@ -280,12 +253,20 @@ static struct { {0, NULL} }, basrs[] = { + {BASR_END_DMA_TRANSFER, "END OF DMA"}, + {BASR_DRQ, "DRQ"}, + {BASR_PARITY_ERROR, "PARITY ERROR"}, + {BASR_IRQ, "IRQ"}, + {BASR_PHASE_MATCH, "PHASE MATCH"}, + {BASR_BUSY_ERROR, "BUSY ERROR"}, {BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL} }, icrs[] = { {ICR_ASSERT_RST, "ASSERT RST"}, + {ICR_ARBITRATION_PROGRESS, "ARB. IN PROGRESS"}, + {ICR_ARBITRATION_LOST, "LOST ARB."}, {ICR_ASSERT_ACK, "ASSERT ACK"}, {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"}, @@ -294,14 +275,14 @@ icrs[] = { {0, NULL} }, mrs[] = { - {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, - {MR_TARGET, "MODE TARGET"}, - {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, - {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"}, - {MR_ENABLE_EOP_INTR, "MODE EOP INTR"}, - {MR_MONITOR_BSY, "MODE MONITOR BSY"}, - {MR_DMA_MODE, "MODE DMA"}, - {MR_ARBITRATE, "MODE ARBITRATION"}, + {MR_BLOCK_DMA_MODE, "BLOCK DMA MODE"}, + {MR_TARGET, "TARGET"}, + {MR_ENABLE_PAR_CHECK, "PARITY CHECK"}, + {MR_ENABLE_PAR_INTR, "PARITY INTR"}, + {MR_ENABLE_EOP_INTR, "EOP INTR"}, + {MR_MONITOR_BSY, "MONITOR BSY"}, + {MR_DMA_MODE, "DMA MODE"}, + {MR_ARBITRATE, "ARBITRATE"}, {0, NULL} }; @@ -322,23 +303,23 @@ static void NCR5380_print(struct Scsi_Host *instance) icr = NCR5380_read(INITIATOR_COMMAND_REG); basr = NCR5380_read(BUS_AND_STATUS_REG); - printk("STATUS_REG: %02x ", status); + printk(KERN_DEBUG "SR = 0x%02x : ", status); for (i = 0; signals[i].mask; ++i) if (status & signals[i].mask) - printk(",%s", signals[i].name); - printk("\nBASR: %02x ", basr); + printk(KERN_CONT "%s, ", signals[i].name); + printk(KERN_CONT "\nBASR = 0x%02x : ", basr); for (i = 0; basrs[i].mask; ++i) if (basr & basrs[i].mask) - printk(",%s", basrs[i].name); - printk("\nICR: %02x ", icr); + printk(KERN_CONT "%s, ", basrs[i].name); + printk(KERN_CONT "\nICR = 0x%02x : ", icr); for (i = 0; icrs[i].mask; ++i) if (icr & icrs[i].mask) - printk(",%s", icrs[i].name); - printk("\nMODE: %02x ", mr); + printk(KERN_CONT "%s, ", icrs[i].name); + printk(KERN_CONT "\nMR = 0x%02x : ", mr); for (i = 0; mrs[i].mask; ++i) if (mr & mrs[i].mask) - printk(",%s", mrs[i].name); - printk("\n"); + printk(KERN_CONT "%s, ", mrs[i].name); + printk(KERN_CONT "\n"); } static struct { @@ -477,52 +458,18 @@ static void prepare_info(struct Scsi_Host *instance) instance->base, instance->irq, instance->can_queue, instance->cmd_per_lun, instance->sg_tablesize, instance->this_id, - hostdata->flags & FLAG_NO_DMA_FIXUP ? "NO_DMA_FIXUP " : "", + hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "", hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "", hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : "", -#ifdef AUTOPROBE_IRQ - "AUTOPROBE_IRQ " -#endif #ifdef DIFFERENTIAL "DIFFERENTIAL " #endif -#ifdef REAL_DMA - "REAL_DMA " -#endif -#ifdef REAL_DMA_POLL - "REAL_DMA_POLL " -#endif #ifdef PARITY "PARITY " #endif -#ifdef PSEUDO_DMA - "PSEUDO_DMA " -#endif ""); } -#ifdef PSEUDO_DMA -static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance, - char *buffer, int length) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - - hostdata->spin_max_r = 0; - hostdata->spin_max_w = 0; - return 0; -} - -static int __maybe_unused NCR5380_show_info(struct seq_file *m, - struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - - seq_printf(m, "Highwater I/O busy spin counts: write %d, read %d\n", - hostdata->spin_max_w, hostdata->spin_max_r); - return 0; -} -#endif - /** * NCR5380_init - initialise an NCR5380 * @instance: adapter to configure @@ -543,6 +490,8 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags) int i; unsigned long deadline; + instance->max_lun = 7; + hostdata->host = instance; hostdata->id_mask = 1 << instance->this_id; hostdata->id_higher_mask = 0; @@ -551,9 +500,8 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags) hostdata->id_higher_mask |= i; for (i = 0; i < 8; ++i) hostdata->busy[i] = 0; -#ifdef REAL_DMA - hostdata->dmalen = 0; -#endif + hostdata->dma_len = 0; + spin_lock_init(&hostdata->lock); hostdata->connected = NULL; hostdata->sensing = NULL; @@ -719,6 +667,9 @@ static int NCR5380_queue_command(struct Scsi_Host *instance, cmd->result = 0; + if (!NCR5380_acquire_dma_irq(instance)) + return SCSI_MLQUEUE_HOST_BUSY; + spin_lock_irqsave(&hostdata->lock, flags); /* @@ -743,6 +694,19 @@ static int NCR5380_queue_command(struct Scsi_Host *instance, return 0; } +static inline void maybe_release_dma_irq(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + + /* Caller does the locking needed to set & test these data atomically */ + if (list_empty(&hostdata->disconnected) && + list_empty(&hostdata->unissued) && + list_empty(&hostdata->autosense) && + !hostdata->connected && + !hostdata->selecting) + NCR5380_release_dma_irq(instance); +} + /** * dequeue_next_cmd - dequeue a command for processing * @instance: the scsi host instance @@ -844,17 +808,14 @@ static void NCR5380_main(struct work_struct *work) if (!NCR5380_select(instance, cmd)) { dsprintk(NDEBUG_MAIN, instance, "main: select complete\n"); + maybe_release_dma_irq(instance); } else { dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance, "main: select failed, returning %p to queue\n", cmd); requeue_cmd(instance, cmd); } } - if (hostdata->connected -#ifdef REAL_DMA - && !hostdata->dmalen -#endif - ) { + if (hostdata->connected && !hostdata->dma_len) { dsprintk(NDEBUG_MAIN, instance, "main: performing information transfer\n"); NCR5380_information_transfer(instance); done = 0; @@ -865,7 +826,88 @@ static void NCR5380_main(struct work_struct *work) } while (!done); } -#ifndef DONT_USE_INTR +/* + * NCR5380_dma_complete - finish DMA transfer + * @instance: the scsi host instance + * + * Called by the interrupt handler when DMA finishes or a phase + * mismatch occurs (which would end the DMA transfer). + */ + +static void NCR5380_dma_complete(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + int transferred; + unsigned char **data; + int *count; + int saved_data = 0, overrun = 0; + unsigned char p; + + if (hostdata->read_overruns) { + p = hostdata->connected->SCp.phase; + if (p & SR_IO) { + udelay(10); + if ((NCR5380_read(BUS_AND_STATUS_REG) & + (BASR_PHASE_MATCH | BASR_ACK)) == + (BASR_PHASE_MATCH | BASR_ACK)) { + saved_data = NCR5380_read(INPUT_DATA_REG); + overrun = 1; + dsprintk(NDEBUG_DMA, instance, "read overrun handled\n"); + } + } + } + +#ifdef CONFIG_SUN3 + if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) { + pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", + instance->host_no); + BUG(); + } + + if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == + (BASR_PHASE_MATCH | BASR_ACK)) { + pr_err("scsi%d: BASR %02x\n", instance->host_no, + NCR5380_read(BUS_AND_STATUS_REG)); + pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n", + instance->host_no); + BUG(); + } +#endif + + NCR5380_write(MODE_REG, MR_BASE); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + + transferred = hostdata->dma_len - NCR5380_dma_residual(instance); + hostdata->dma_len = 0; + + data = (unsigned char **)&hostdata->connected->SCp.ptr; + count = &hostdata->connected->SCp.this_residual; + *data += transferred; + *count -= transferred; + + if (hostdata->read_overruns) { + int cnt, toPIO; + + if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { + cnt = toPIO = hostdata->read_overruns; + if (overrun) { + dsprintk(NDEBUG_DMA, instance, + "Got an input overrun, using saved byte\n"); + *(*data)++ = saved_data; + (*count)--; + cnt--; + toPIO--; + } + if (toPIO > 0) { + dsprintk(NDEBUG_DMA, instance, + "Doing %d byte PIO to 0x%p\n", cnt, *data); + NCR5380_transfer_pio(instance, &p, &cnt, data); + *count -= toPIO - cnt; + } + } + } +} /** * NCR5380_intr - generic NCR5380 irq handler @@ -901,7 +943,7 @@ static void NCR5380_main(struct work_struct *work) * the Busy Monitor interrupt is enabled together with DMA Mode. */ -static irqreturn_t NCR5380_intr(int irq, void *dev_id) +static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id) { struct Scsi_Host *instance = dev_id; struct NCR5380_hostdata *hostdata = shost_priv(instance); @@ -919,7 +961,6 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) dsprintk(NDEBUG_INTR, instance, "IRQ %d, BASR 0x%02x, SR 0x%02x, MR 0x%02x\n", irq, basr, sr, mr); -#if defined(REAL_DMA) if ((mr & MR_DMA_MODE) || (mr & MR_MONITOR_BSY)) { /* Probably End of DMA, Phase Mismatch or Loss of BSY. * We ack IRQ after clearing Mode Register. Workarounds @@ -928,26 +969,14 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) dsprintk(NDEBUG_INTR, instance, "interrupt in DMA mode\n"); - int transferred; - - if (!hostdata->connected) - panic("scsi%d : DMA interrupt with no connected cmd\n", - instance->hostno); - - transferred = hostdata->dmalen - NCR5380_dma_residual(instance); - hostdata->connected->SCp.this_residual -= transferred; - hostdata->connected->SCp.ptr += transferred; - hostdata->dmalen = 0; - - /* FIXME: we need to poll briefly then defer a workqueue task ! */ - NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_ACK, 0, 2 * HZ); - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_read(RESET_PARITY_INTERRUPT_REG); - } else -#endif /* REAL_DMA */ - if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) && + if (hostdata->connected) { + NCR5380_dma_complete(instance); + queue_work(hostdata->work_q, &hostdata->main_task); + } else { + NCR5380_write(MODE_REG, MR_BASE); + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + } + } else if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) && (sr & (SR_SEL | SR_IO | SR_BSY | SR_RST)) == (SR_SEL | SR_IO)) { /* Probably reselected */ NCR5380_write(SELECT_ENABLE_REG, 0); @@ -966,10 +995,16 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) NCR5380_read(RESET_PARITY_INTERRUPT_REG); dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n"); +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_DMA_ENABLE; +#endif } handled = 1; } else { shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n"); +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_DMA_ENABLE; +#endif } spin_unlock_irqrestore(&hostdata->lock, flags); @@ -977,8 +1012,6 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) return IRQ_RETVAL(handled); } -#endif - /* * Function : int NCR5380_select(struct Scsi_Host *instance, * struct scsi_cmnd *cmd) @@ -1217,14 +1250,6 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, * was true but before BSY was false during selection, the information * transfer phase should be a MESSAGE OUT phase so that we can send the * IDENTIFY message. - * - * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG - * message (2 bytes) with a tag ID that we increment with every command - * until it wraps back to 0. - * - * XXX - it turns out that there are some broken SCSI-II devices, - * which claim to support tagged queuing but fail when more than - * some number of commands are issued at once. */ /* Wait for start of REQ/ACK handshake */ @@ -1247,9 +1272,6 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun); len = 1; - cmd->tag = 0; - - /* Send message(s) */ data = tmp; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &data); @@ -1259,6 +1281,10 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, hostdata->connected = cmd; hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun; +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_INTR; +#endif + initialize_SCp(cmd); cmd = NULL; @@ -1495,7 +1521,6 @@ timeout: return -1; } -#if defined(REAL_DMA) || defined(PSEUDO_DMA) || defined (REAL_DMA_POLL) /* * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, * unsigned char *phase, int *count, unsigned char **data) @@ -1520,53 +1545,47 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char **data) { struct NCR5380_hostdata *hostdata = shost_priv(instance); - register int c = *count; - register unsigned char p = *phase; - register unsigned char *d = *data; + int c = *count; + unsigned char p = *phase; + unsigned char *d = *data; unsigned char tmp; - int foo; -#if defined(REAL_DMA_POLL) - int cnt, toPIO; - unsigned char saved_data = 0, overrun = 0, residue; -#endif + int result = 0; if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { *phase = tmp; return -1; } -#if defined(REAL_DMA) || defined(REAL_DMA_POLL) + + hostdata->connected->SCp.phase = p; + if (p & SR_IO) { - if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS)) - c -= 2; + if (hostdata->read_overruns) + c -= hostdata->read_overruns; + else if (hostdata->flags & FLAG_DMA_FIXUP) + --c; } - hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c); dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n", - (p & SR_IO) ? "receive" : "send", c, *data); + (p & SR_IO) ? "receive" : "send", c, d); + +#ifdef CONFIG_SUN3 + /* send start chain */ + sun3scsi_dma_start(c, *data); #endif NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); - -#ifdef REAL_DMA NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | MR_ENABLE_EOP_INTR); -#elif defined(REAL_DMA_POLL) - NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY); -#else - /* - * Note : on my sample board, watch-dog timeouts occurred when interrupts - * were not disabled for the duration of a single DMA transfer, from - * before the setting of DMA mode to after transfer of the last byte. - */ - - if (hostdata->flags & FLAG_NO_DMA_FIXUP) - NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | - MR_ENABLE_EOP_INTR); - else - NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY); -#endif /* def REAL_DMA */ - dprintk(NDEBUG_DMA, "scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG)); + if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) { + /* On the Medusa, it is a must to initialize the DMA before + * starting the NCR. This is also the cleaner way for the TT. + */ + if (p & SR_IO) + result = NCR5380_dma_recv_setup(instance, d, c); + else + result = NCR5380_dma_send_setup(instance, d, c); + } /* * On the PAS16 at least I/O recovery delays are not needed here. @@ -1574,24 +1593,49 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, */ if (p & SR_IO) { - io_recovery_delay(1); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + NCR5380_io_delay(1); NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); } else { - io_recovery_delay(1); + NCR5380_io_delay(1); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); - io_recovery_delay(1); + NCR5380_io_delay(1); NCR5380_write(START_DMA_SEND_REG, 0); - io_recovery_delay(1); + NCR5380_io_delay(1); } -#if defined(REAL_DMA_POLL) - do { - tmp = NCR5380_read(BUS_AND_STATUS_REG); - } while ((tmp & BASR_PHASE_MATCH) && !(tmp & (BASR_BUSY_ERROR | BASR_END_DMA_TRANSFER))); +#ifdef CONFIG_SUN3 +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_DMA_ENABLE; +#endif + sun3_dma_active = 1; +#endif + + if (hostdata->flags & FLAG_LATE_DMA_SETUP) { + /* On the Falcon, the DMA setup must be done after the last + * NCR access, else the DMA setup gets trashed! + */ + if (p & SR_IO) + result = NCR5380_dma_recv_setup(instance, d, c); + else + result = NCR5380_dma_send_setup(instance, d, c); + } + + /* On failure, NCR5380_dma_xxxx_setup() returns a negative int. */ + if (result < 0) + return result; + + /* For real DMA, result is the byte count. DMA interrupt is expected. */ + if (result > 0) { + hostdata->dma_len = result; + return 0; + } + + /* The result is zero iff pseudo DMA send/receive was completed. */ + hostdata->dma_len = c; /* - * At this point, either we've completed DMA, or we have a phase mismatch, - * or we've unexpectedly lost BUSY (which is a real error). + * A note regarding the DMA errata workarounds for early NMOS silicon. * * For DMA sends, we want to wait until the last byte has been * transferred out over the bus before we turn off DMA mode. Alas, there @@ -1618,79 +1662,16 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, * properly, or the target switches to MESSAGE IN phase to signal a * disconnection (either operation bringing the DMA to a clean halt). * However, in order to handle scatter-receive, we must work around the - * problem. The chosen fix is to DMA N-2 bytes, then check for the + * problem. The chosen fix is to DMA fewer bytes, then check for the * condition before taking the NCR5380 out of DMA mode. One or two extra * bytes are transferred via PIO as necessary to fill out the original * request. */ - if (p & SR_IO) { - if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS)) { - udelay(10); - if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == - (BASR_PHASE_MATCH | BASR_ACK)) { - saved_data = NCR5380_read(INPUT_DATA_REGISTER); - overrun = 1; - } - } - } else { - int limit = 100; - while (((tmp = NCR5380_read(BUS_AND_STATUS_REG)) & BASR_ACK) || (NCR5380_read(STATUS_REG) & SR_REQ)) { - if (!(tmp & BASR_PHASE_MATCH)) - break; - if (--limit < 0) - break; - } - } - - dsprintk(NDEBUG_DMA, "polled DMA transfer complete, basr 0x%02x, sr 0x%02x\n", - tmp, NCR5380_read(STATUS_REG)); - - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - - residue = NCR5380_dma_residual(instance); - c -= residue; - *count -= c; - *data += c; - *phase = NCR5380_read(STATUS_REG) & PHASE_MASK; - - if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS) && - *phase == p && (p & SR_IO) && residue == 0) { - if (overrun) { - dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); - **data = saved_data; - *data += 1; - *count -= 1; - cnt = toPIO = 1; - } else { - printk("No overrun??\n"); - cnt = toPIO = 2; - } - dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%X\n", cnt, *data); - NCR5380_transfer_pio(instance, phase, &cnt, data); - *count -= toPIO - cnt; - } - - dprintk(NDEBUG_DMA, "Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count)); - return 0; - -#elif defined(REAL_DMA) - return 0; -#else /* defined(REAL_DMA_POLL) */ - if (p & SR_IO) { - foo = NCR5380_pread(instance, d, - hostdata->flags & FLAG_NO_DMA_FIXUP ? c : c - 1); - if (!foo && !(hostdata->flags & FLAG_NO_DMA_FIXUP)) { + if (hostdata->flags & FLAG_DMA_FIXUP) { + if (p & SR_IO) { /* - * We can't disable DMA mode after successfully transferring - * what we plan to be the last byte, since that would open up - * a race condition where if the target asserted REQ before - * we got the DMA mode reset, the NCR5380 would have latched - * an additional byte into the INPUT DATA register and we'd - * have dropped it. - * - * The workaround was to transfer one fewer bytes than we + * The workaround was to transfer fewer bytes than we * intended to with the pseudo-DMA read function, wait for * the chip to latch the last byte, read it, and then disable * pseudo-DMA mode. @@ -1706,19 +1687,16 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, if (NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, HZ) < 0) { - foo = -1; + result = -1; shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n"); } if (NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, HZ) < 0) { - foo = -1; + result = -1; shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n"); } - d[c - 1] = NCR5380_read(INPUT_DATA_REG); - } - } else { - foo = NCR5380_pwrite(instance, d, c); - if (!foo && !(hostdata->flags & FLAG_NO_DMA_FIXUP)) { + d[*count - 1] = NCR5380_read(INPUT_DATA_REG); + } else { /* * Wait for the last byte to be sent. If REQ is being asserted for * the byte we're interested, we'll ACK it and it will go false. @@ -1726,21 +1704,15 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, if (NCR5380_poll_politely2(instance, BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, HZ) < 0) { - foo = -1; + result = -1; shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n"); } } } - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - NCR5380_read(RESET_PARITY_INTERRUPT_REG); - *data = d + c; - *count = 0; - *phase = NCR5380_read(STATUS_REG) & PHASE_MASK; - return foo; -#endif /* def REAL_DMA */ + + NCR5380_dma_complete(instance); + return result; } -#endif /* defined(REAL_DMA) | defined(PSEUDO_DMA) */ /* * Function : NCR5380_information_transfer (struct Scsi_Host *instance) @@ -1770,6 +1742,10 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; struct scsi_cmnd *cmd; +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_INTR; +#endif + while ((cmd = hostdata->connected)) { struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); @@ -1781,6 +1757,31 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) old_phase = phase; NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); } +#ifdef CONFIG_SUN3 + if (phase == PHASE_CMDOUT) { + void *d; + unsigned long count; + + if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { + count = cmd->SCp.buffer->length; + d = sg_virt(cmd->SCp.buffer); + } else { + count = cmd->SCp.this_residual; + d = cmd->SCp.ptr; + } + + if (sun3_dma_setup_done != cmd && + sun3scsi_dma_xfer_len(count, cmd) > 0) { + sun3scsi_dma_setup(instance, d, count, + rq_data_dir(cmd->request)); + sun3_dma_setup_done = cmd; + } +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_INTR; +#endif + } +#endif /* CONFIG_SUN3 */ + if (sink && (phase != PHASE_MSGOUT)) { NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); @@ -1831,13 +1832,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) * in an unconditional loop. */ -#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) transfersize = 0; - if (!cmd->device->borken && - !(hostdata->flags & FLAG_NO_PSEUDO_DMA)) + if (!cmd->device->borken) transfersize = NCR5380_dma_xfer_len(instance, cmd, phase); - if (transfersize) { + if (transfersize > 0) { len = transfersize; if (NCR5380_transfer_dma(instance, &phase, &len, (unsigned char **)&cmd->SCp.ptr)) { @@ -1853,11 +1852,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) do_abort(instance); cmd->result = DID_ERROR << 16; /* XXX - need to source or sink data here, as appropriate */ - } else - cmd->SCp.this_residual -= transfersize - len; - } else -#endif /* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */ - { + } + } else { /* Break up transfer into 3 ms chunks, * presuming 6 accesses per handshake. */ @@ -1868,6 +1864,10 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) (unsigned char **)&cmd->SCp.ptr); cmd->SCp.this_residual -= transfersize - len; } +#ifdef CONFIG_SUN3 + if (sun3_dma_setup_done == cmd) + sun3_dma_setup_done = NULL; +#endif return; case PHASE_MSGIN: len = 1; @@ -1912,6 +1912,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); + + maybe_release_dma_irq(instance); return; case MESSAGE_REJECT: /* Accept message by clearing ACK */ @@ -1944,6 +1946,9 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_DMA_ENABLE; +#endif return; /* * The SCSI data pointer is *IMPLICITLY* saved on a disconnect @@ -2047,6 +2052,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) hostdata->connected = NULL; cmd->result = DID_ERROR << 16; complete_cmd(instance, cmd); + maybe_release_dma_irq(instance); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return; } @@ -2094,10 +2100,8 @@ static void NCR5380_reselect(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); unsigned char target_mask; - unsigned char lun, phase; - int len; + unsigned char lun; unsigned char msg[3]; - unsigned char *data; struct NCR5380_cmd *ncmd; struct scsi_cmnd *tmp; @@ -2139,15 +2143,26 @@ static void NCR5380_reselect(struct Scsi_Host *instance) return; } - len = 1; - data = msg; - phase = PHASE_MSGIN; - NCR5380_transfer_pio(instance, &phase, &len, &data); +#ifdef CONFIG_SUN3 + /* acknowledge toggle to MSGIN */ + NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN)); - if (len) { - do_abort(instance); - return; + /* peek at the byte without really hitting the bus */ + msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG); +#else + { + int len = 1; + unsigned char *data = msg; + unsigned char phase = PHASE_MSGIN; + + NCR5380_transfer_pio(instance, &phase, &len, &data); + + if (len) { + do_abort(instance); + return; + } } +#endif /* CONFIG_SUN3 */ if (!(msg[0] & 0x80)) { shost_printk(KERN_ERR, instance, "expecting IDENTIFY message, got "); @@ -2195,59 +2210,37 @@ static void NCR5380_reselect(struct Scsi_Host *instance) return; } - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - - hostdata->connected = tmp; - dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu, tag %d\n", - scmd_id(tmp), tmp->device->lun, tmp->tag); -} +#ifdef CONFIG_SUN3 + { + void *d; + unsigned long count; -/* - * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) - * - * Purpose : called by interrupt handler when DMA finishes or a phase - * mismatch occurs (which would finish the DMA transfer). - * - * Inputs : instance - this instance of the NCR5380. - * - * Returns : pointer to the scsi_cmnd structure for which the I_T_L - * nexus has been reestablished, on failure NULL is returned. - */ - -#ifdef REAL_DMA -static void NCR5380_dma_complete(NCR5380_instance * instance) { - struct NCR5380_hostdata *hostdata = shost_priv(instance); - int transferred; + if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) { + count = tmp->SCp.buffer->length; + d = sg_virt(tmp->SCp.buffer); + } else { + count = tmp->SCp.this_residual; + d = tmp->SCp.ptr; + } - /* - * XXX this might not be right. - * - * Wait for final byte to transfer, ie wait for ACK to go false. - * - * We should use the Last Byte Sent bit, unfortunately this is - * not available on the 5380/5381 (only the various CMOS chips) - * - * FIXME: timeout, and need to handle long timeout/irq case - */ + if (sun3_dma_setup_done != tmp && + sun3scsi_dma_xfer_len(count, tmp) > 0) { + sun3scsi_dma_setup(instance, d, count, + rq_data_dir(tmp->request)); + sun3_dma_setup_done = tmp; + } + } - NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, BASR_ACK, 0, 5*HZ); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); +#endif /* CONFIG_SUN3 */ + /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - /* - * The only places we should see a phase mismatch and have to send - * data from the same set of pointers will be the data transfer - * phases. So, residual, requested length are only important here. - */ - - if (!(hostdata->connected->SCp.phase & SR_CD)) { - transferred = instance->dmalen - NCR5380_dma_residual(); - hostdata->connected->SCp.this_residual -= transferred; - hostdata->connected->SCp.ptr += transferred; - } + hostdata->connected = tmp; + dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu\n", + scmd_id(tmp), tmp->device->lun); } -#endif /* def REAL_DMA */ /** * list_find_cmd - test for presence of a command in a linked list @@ -2360,9 +2353,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) if (hostdata->connected == cmd) { dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd); hostdata->connected = NULL; -#ifdef REAL_DMA hostdata->dma_len = 0; -#endif if (do_abort(instance)) { set_host_byte(cmd, DID_ERROR); complete_cmd(instance, cmd); @@ -2388,6 +2379,7 @@ out: dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd); queue_work(hostdata->work_q, &hostdata->main_task); + maybe_release_dma_irq(instance); spin_unlock_irqrestore(&hostdata->lock, flags); return result; @@ -2445,7 +2437,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); set_host_byte(cmd, DID_RESET); - cmd->scsi_done(cmd); + complete_cmd(instance, cmd); } INIT_LIST_HEAD(&hostdata->disconnected); @@ -2465,11 +2457,10 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) for (i = 0; i < 8; ++i) hostdata->busy[i] = 0; -#ifdef REAL_DMA hostdata->dma_len = 0; -#endif queue_work(hostdata->work_q, &hostdata->main_task); + maybe_release_dma_irq(instance); spin_unlock_irqrestore(&hostdata->lock, flags); return SUCCESS; diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index a79288682a74..c60728785d89 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h @@ -199,13 +199,6 @@ #define PHASE_SR_TO_TCR(phase) ((phase) >> 2) -/* - * "Special" value for the (unsigned char) command tag, to indicate - * I_T_L nexus instead of I_T_L_Q. - */ - -#define TAG_NONE 0xff - /* * These are "special" values for the irq and dma_channel fields of the * Scsi_Host structure @@ -220,28 +213,17 @@ #define NO_IRQ 0 #endif -#define FLAG_NO_DMA_FIXUP 1 /* No DMA errata workarounds */ +#define FLAG_DMA_FIXUP 1 /* Use DMA errata workarounds */ #define FLAG_NO_PSEUDO_DMA 8 /* Inhibit DMA */ #define FLAG_LATE_DMA_SETUP 32 /* Setup NCR before DMA H/W */ -#define FLAG_TAGGED_QUEUING 64 /* as X3T9.2 spelled it */ #define FLAG_TOSHIBA_DELAY 128 /* Allow for borken CD-ROMs */ -#ifdef SUPPORT_TAGS -struct tag_alloc { - DECLARE_BITMAP(allocated, MAX_TAGS); - int nr_allocated; - int queue_size; -}; -#endif - struct NCR5380_hostdata { NCR5380_implementation_fields; /* implementation specific */ struct Scsi_Host *host; /* Host backpointer */ unsigned char id_mask, id_higher_mask; /* 1 << id, all bits greater */ unsigned char busy[8]; /* index = target, bit = lun */ -#if defined(REAL_DMA) || defined(REAL_DMA_POLL) int dma_len; /* requested length of DMA */ -#endif unsigned char last_message; /* last message OUT */ struct scsi_cmnd *connected; /* currently connected cmnd */ struct scsi_cmnd *selecting; /* cmnd to be connected */ @@ -256,13 +238,6 @@ struct NCR5380_hostdata { int read_overruns; /* number of bytes to cut from a * transfer to handle chip overruns */ struct work_struct main_task; -#ifdef SUPPORT_TAGS - struct tag_alloc TagAlloc[8][8]; /* 8 targets and 8 LUNs */ -#endif -#ifdef PSEUDO_DMA - unsigned spin_max_r; - unsigned spin_max_w; -#endif struct workqueue_struct *work_q; unsigned long accesses_per_ms; /* chip register accesses per ms */ }; @@ -305,132 +280,20 @@ static void NCR5380_print(struct Scsi_Host *instance); #define NCR5380_dprint_phase(flg, arg) do {} while (0) #endif -#if defined(AUTOPROBE_IRQ) static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible); -#endif static int NCR5380_init(struct Scsi_Host *instance, int flags); static int NCR5380_maybe_reset_bus(struct Scsi_Host *); static void NCR5380_exit(struct Scsi_Host *instance); static void NCR5380_information_transfer(struct Scsi_Host *instance); -#ifndef DONT_USE_INTR static irqreturn_t NCR5380_intr(int irq, void *dev_id); -#endif static void NCR5380_main(struct work_struct *work); static const char *NCR5380_info(struct Scsi_Host *instance); static void NCR5380_reselect(struct Scsi_Host *instance); static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *); -#if defined(PSEUDO_DMA) || defined(REAL_DMA) || defined(REAL_DMA_POLL) static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); -#endif static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); +static int NCR5380_poll_politely(struct Scsi_Host *, int, int, int, int); +static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int); -#if (defined(REAL_DMA) || defined(REAL_DMA_POLL)) - -#if defined(i386) || defined(__alpha__) - -/** - * NCR5380_pc_dma_setup - setup ISA DMA - * @instance: adapter to set up - * @ptr: block to transfer (virtual address) - * @count: number of bytes to transfer - * @mode: DMA controller mode to use - * - * Program the DMA controller ready to perform an ISA DMA transfer - * on this chip. - * - * Locks: takes and releases the ISA DMA lock. - */ - -static __inline__ int NCR5380_pc_dma_setup(struct Scsi_Host *instance, unsigned char *ptr, unsigned int count, unsigned char mode) -{ - unsigned limit; - unsigned long bus_addr = virt_to_bus(ptr); - unsigned long flags; - - if (instance->dma_channel <= 3) { - if (count > 65536) - count = 65536; - limit = 65536 - (bus_addr & 0xFFFF); - } else { - if (count > 65536 * 2) - count = 65536 * 2; - limit = 65536 * 2 - (bus_addr & 0x1FFFF); - } - - if (count > limit) - count = limit; - - if ((count & 1) || (bus_addr & 1)) - panic("scsi%d : attempted unaligned DMA transfer\n", instance->host_no); - - flags=claim_dma_lock(); - disable_dma(instance->dma_channel); - clear_dma_ff(instance->dma_channel); - set_dma_addr(instance->dma_channel, bus_addr); - set_dma_count(instance->dma_channel, count); - set_dma_mode(instance->dma_channel, mode); - enable_dma(instance->dma_channel); - release_dma_lock(flags); - - return count; -} - -/** - * NCR5380_pc_dma_write_setup - setup ISA DMA write - * @instance: adapter to set up - * @ptr: block to transfer (virtual address) - * @count: number of bytes to transfer - * - * Program the DMA controller ready to perform an ISA DMA write to the - * SCSI controller. - * - * Locks: called routines take and release the ISA DMA lock. - */ - -static __inline__ int NCR5380_pc_dma_write_setup(struct Scsi_Host *instance, unsigned char *src, unsigned int count) -{ - return NCR5380_pc_dma_setup(instance, src, count, DMA_MODE_WRITE); -} - -/** - * NCR5380_pc_dma_read_setup - setup ISA DMA read - * @instance: adapter to set up - * @ptr: block to transfer (virtual address) - * @count: number of bytes to transfer - * - * Program the DMA controller ready to perform an ISA DMA read from the - * SCSI controller. - * - * Locks: called routines take and release the ISA DMA lock. - */ - -static __inline__ int NCR5380_pc_dma_read_setup(struct Scsi_Host *instance, unsigned char *src, unsigned int count) -{ - return NCR5380_pc_dma_setup(instance, src, count, DMA_MODE_READ); -} - -/** - * NCR5380_pc_dma_residual - return bytes left - * @instance: adapter - * - * Reports the number of bytes left over after the DMA was terminated. - * - * Locks: takes and releases the ISA DMA lock. - */ - -static __inline__ int NCR5380_pc_dma_residual(struct Scsi_Host *instance) -{ - unsigned long flags; - int tmp; - - flags = claim_dma_lock(); - clear_dma_ff(instance->dma_channel); - tmp = get_dma_residue(instance->dma_channel); - release_dma_lock(flags); - - return tmp; -} -#endif /* defined(i386) || defined(__alpha__) */ -#endif /* defined(REAL_DMA) */ #endif /* __KERNEL__ */ #endif /* NCR5380_H */ diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 7dfd0fa27255..6678d1fd897b 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -555,8 +555,6 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd) dev = (struct aac_dev *)scsicmd->device->host->hostdata; cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); - if (!cmd_fibcontext) - return -ENOMEM; aac_fib_init(cmd_fibcontext); dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); @@ -1037,8 +1035,6 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd) dev = (struct aac_dev *)scsicmd->device->host->hostdata; cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); - if (!cmd_fibcontext) - return -ENOMEM; aac_fib_init(cmd_fibcontext); dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext); @@ -1950,10 +1946,6 @@ static int aac_read(struct scsi_cmnd * scsicmd) * Alocate and initialize a Fib */ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); - if (!cmd_fibcontext) { - printk(KERN_WARNING "aac_read: fib allocation failed\n"); - return -1; - } status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count); @@ -2048,16 +2040,6 @@ static int aac_write(struct scsi_cmnd * scsicmd) * Allocate and initialize a Fib then setup a BlockWrite command */ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); - if (!cmd_fibcontext) { - /* FIB temporarily unavailable,not catastrophic failure */ - - /* scsicmd->result = DID_ERROR << 16; - * scsicmd->scsi_done(scsicmd); - * return 0; - */ - printk(KERN_WARNING "aac_write: fib allocation failed\n"); - return -1; - } status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); @@ -2283,8 +2265,6 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd) * Allocate and initialize a Fib */ cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd); - if (!cmd_fibcontext) - return SCSI_MLQUEUE_HOST_BUSY; aac_fib_init(cmd_fibcontext); @@ -3184,8 +3164,6 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd) * Allocate and initialize a Fib then setup a BlockWrite command */ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); - if (!cmd_fibcontext) - return -1; status = aac_adapter_scsi(cmd_fibcontext, scsicmd); diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index efa493cf1bc6..8f90d9e77104 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -29,6 +29,7 @@ enum { #define AAC_INT_MODE_MSI (1<<1) #define AAC_INT_MODE_AIF (1<<2) #define AAC_INT_MODE_SYNC (1<<3) +#define AAC_INT_MODE_MSIX (1<<16) #define AAC_INT_ENABLE_TYPE1_INTX 0xfffffffb #define AAC_INT_ENABLE_TYPE1_MSIX 0xfffffffa @@ -62,7 +63,7 @@ enum { #define PMC_GLOBAL_INT_BIT0 0x00000001 #ifndef AAC_DRIVER_BUILD -# define AAC_DRIVER_BUILD 41052 +# define AAC_DRIVER_BUILD 41066 # define AAC_DRIVER_BRANCH "-ms" #endif #define MAXIMUM_NUM_CONTAINERS 32 @@ -720,7 +721,7 @@ struct sa_registers { }; -#define Sa_MINIPORT_REVISION 1 +#define SA_INIT_NUM_MSIXVECTORS 1 #define sa_readw(AEP, CSR) readl(&((AEP)->regs.sa->CSR)) #define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR)) @@ -2065,6 +2066,10 @@ extern struct aac_common aac_config; #define AifEnAddJBOD 30 /* JBOD created */ #define AifEnDeleteJBOD 31 /* JBOD deleted */ +#define AifBuManagerEvent 42 /* Bu management*/ +#define AifBuCacheDataLoss 10 +#define AifBuCacheDataRecover 11 + #define AifCmdJobProgress 2 /* Progress report */ #define AifJobCtrZero 101 /* Array Zero progress */ #define AifJobStsSuccess 1 /* Job completes */ diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 2b4e75380ae6..341ea327ae79 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -37,6 +37,7 @@ #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/blkdev.h> +#include <linux/delay.h> #include <linux/completion.h> #include <linux/mm.h> #include <scsi/scsi_host.h> @@ -47,6 +48,20 @@ struct aac_common aac_config = { .irq_mod = 1 }; +static inline int aac_is_msix_mode(struct aac_dev *dev) +{ + u32 status; + + status = src_readl(dev, MUnit.OMR); + return (status & AAC_INT_MODE_MSIX); +} + +static inline void aac_change_to_intx(struct aac_dev *dev) +{ + aac_src_access_devreg(dev, AAC_DISABLE_MSIX); + aac_src_access_devreg(dev, AAC_ENABLE_INTX); +} + static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign) { unsigned char *base; @@ -91,7 +106,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION); if (dev->max_fib_size != sizeof(struct hw_fib)) init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4); - init->Sa_MSIXVectors = cpu_to_le32(Sa_MINIPORT_REVISION); + init->Sa_MSIXVectors = cpu_to_le32(SA_INIT_NUM_MSIXVECTORS); init->fsrev = cpu_to_le32(dev->fsrev); /* @@ -378,21 +393,8 @@ void aac_define_int_mode(struct aac_dev *dev) msi_count = i; } else { dev->msi_enabled = 0; - printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n", - dev->name, dev->id, i); - } - } - - if (!dev->msi_enabled) { - msi_count = 1; - i = pci_enable_msi(dev->pdev); - - if (!i) { - dev->msi_enabled = 1; - dev->msi = 1; - } else { - printk(KERN_ERR "%s%d: MSI not supported!! Will try INTx 0x%x.\n", - dev->name, dev->id, i); + dev_err(&dev->pdev->dev, + "MSIX not supported!! Will try INTX 0x%x.\n", i); } } @@ -427,6 +429,15 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) dev->comm_interface = AAC_COMM_PRODUCER; dev->raw_io_interface = dev->raw_io_64 = 0; + + /* + * Enable INTX mode, if not done already Enabled + */ + if (aac_is_msix_mode(dev)) { + aac_change_to_intx(dev); + dev_info(&dev->pdev->dev, "Changed firmware to INTX mode"); + } + if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, status+3, NULL)) && diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 511bbc575062..0aeecec1f5ea 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -637,10 +637,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, } return -EFAULT; } - /* We used to udelay() here but that absorbed - * a CPU when a timeout occured. Not very - * useful. */ - cpu_relax(); + /* + * Allow other processes / CPUS to use core + */ + schedule(); } } else if (down_interruptible(&fibptr->event_wait)) { /* Do nothing ... satisfy @@ -901,6 +901,31 @@ void aac_printf(struct aac_dev *dev, u32 val) memset(cp, 0, 256); } +static inline int aac_aif_data(struct aac_aifcmd *aifcmd, uint32_t index) +{ + return le32_to_cpu(((__le32 *)aifcmd->data)[index]); +} + + +static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd) +{ + switch (aac_aif_data(aifcmd, 1)) { + case AifBuCacheDataLoss: + if (aac_aif_data(aifcmd, 2)) + dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n", + aac_aif_data(aifcmd, 2)); + else + dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n"); + break; + case AifBuCacheDataRecover: + if (aac_aif_data(aifcmd, 2)) + dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n", + aac_aif_data(aifcmd, 2)); + else + dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n"); + break; + } +} /** * aac_handle_aif - Handle a message from the firmware @@ -1154,6 +1179,8 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) ADD : DELETE; break; } + case AifBuManagerEvent: + aac_handle_aif_bu(dev, aifcmd); break; } @@ -1996,6 +2023,10 @@ int aac_command_thread(void *data) if (difference <= 0) difference = 1; set_current_state(TASK_INTERRUPTIBLE); + + if (kthread_should_stop()) + break; + schedule_timeout(difference); if (kthread_should_stop()) diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index d677b52860ae..7e836205aef1 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c @@ -392,9 +392,10 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, if (likely(fib->callback && fib->callback_data)) { fib->flags &= FIB_CONTEXT_FLAG_FASTRESP; fib->callback(fib->callback_data, fib); - } else { - aac_fib_complete(fib); - } + } else + dev_info(&dev->pdev->dev, + "Invalid callback_fib[%d] (*%p)(%p)\n", + index, fib->callback, fib->callback_data); } else { unsigned long flagv; dprintk((KERN_INFO "event_wait up\n")); diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index ff6caab8cc8b..a943bd230bc2 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1299,6 +1299,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) else shost->this_id = shost->max_id; + aac_intr_normal(aac, 0, 2, 0, NULL); + /* * dmb - we may need to move the setting of these parms somewhere else once * we get a fib that can report the actual numbers @@ -1431,8 +1433,8 @@ static int aac_acquire_resources(struct aac_dev *dev) /* After EEH recovery or suspend resume, max_msix count * may change, therfore updating in init as well. */ - aac_adapter_start(dev); dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix); + aac_adapter_start(dev); } return 0; diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index bc0203f3d243..28f8b8a1b8a4 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -135,7 +135,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id) if (mode & AAC_INT_MODE_AIF) { /* handle AIF */ - aac_intr_normal(dev, 0, 2, 0, NULL); + if (dev->aif_thread && dev->fsa_dev) + aac_intr_normal(dev, 0, 2, 0, NULL); if (dev->msi_enabled) aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT); mode = 0; diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c index 221f18c5df93..8e9cfe8f22f5 100644 --- a/drivers/scsi/arm/cumana_1.c +++ b/drivers/scsi/arm/cumana_1.c @@ -13,13 +13,14 @@ #include <scsi/scsi_host.h> -#define PSEUDO_DMA - #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) #define NCR5380_read(reg) cumanascsi_read(instance, reg) #define NCR5380_write(reg, value) cumanascsi_write(instance, reg, value) #define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) +#define NCR5380_dma_recv_setup cumanascsi_pread +#define NCR5380_dma_send_setup cumanascsi_pwrite +#define NCR5380_dma_residual(instance) (0) #define NCR5380_intr cumanascsi_intr #define NCR5380_queue_command cumanascsi_queue_command @@ -41,8 +42,8 @@ void cumanascsi_setup(char *str, int *ints) #define L(v) (((v)<<16)|((v) & 0x0000ffff)) #define H(v) (((v)>>16)|((v) & 0xffff0000)) -static inline int -NCR5380_pwrite(struct Scsi_Host *host, unsigned char *addr, int len) +static inline int cumanascsi_pwrite(struct Scsi_Host *host, + unsigned char *addr, int len) { unsigned long *laddr; void __iomem *dma = priv(host)->dma + 0x2000; @@ -101,11 +102,14 @@ NCR5380_pwrite(struct Scsi_Host *host, unsigned char *addr, int len) } end: writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL); - return len; + + if (len) + return -1; + return 0; } -static inline int -NCR5380_pread(struct Scsi_Host *host, unsigned char *addr, int len) +static inline int cumanascsi_pread(struct Scsi_Host *host, + unsigned char *addr, int len) { unsigned long *laddr; void __iomem *dma = priv(host)->dma + 0x2000; @@ -163,7 +167,10 @@ NCR5380_pread(struct Scsi_Host *host, unsigned char *addr, int len) } end: writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL); - return len; + + if (len) + return -1; + return 0; } static unsigned char cumanascsi_read(struct Scsi_Host *host, unsigned int reg) @@ -239,7 +246,7 @@ static int cumanascsi1_probe(struct expansion_card *ec, host->irq = ec->irq; - ret = NCR5380_init(host, 0); + ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP); if (ret) goto out_unmap; diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c index faa1bee07c8a..edce5f3cfdba 100644 --- a/drivers/scsi/arm/cumana_2.c +++ b/drivers/scsi/arm/cumana_2.c @@ -365,7 +365,7 @@ static struct scsi_host_template cumanascsi2_template = { .eh_abort_handler = fas216_eh_abort, .can_queue = 1, .this_id = 7, - .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, + .sg_tablesize = SG_MAX_SEGMENTS, .dma_boundary = IOMD_DMA_BOUNDARY, .use_clustering = DISABLE_CLUSTERING, .proc_name = "cumanascsi2", diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c index a8ad6880dd91..e93e047f4316 100644 --- a/drivers/scsi/arm/eesox.c +++ b/drivers/scsi/arm/eesox.c @@ -484,7 +484,7 @@ static struct scsi_host_template eesox_template = { .eh_abort_handler = fas216_eh_abort, .can_queue = 1, .this_id = 7, - .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, + .sg_tablesize = SG_MAX_SEGMENTS, .dma_boundary = IOMD_DMA_BOUNDARY, .use_clustering = DISABLE_CLUSTERING, .proc_name = "eesox", diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c index 1fab1d1896b1..a396024a3cae 100644 --- a/drivers/scsi/arm/oak.c +++ b/drivers/scsi/arm/oak.c @@ -14,9 +14,6 @@ #include <scsi/scsi_host.h> -/*#define PSEUDO_DMA*/ -#define DONT_USE_INTR - #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) #define NCR5380_read(reg) \ @@ -24,7 +21,10 @@ #define NCR5380_write(reg, value) \ writeb(value, priv(instance)->base + ((reg) << 2)) -#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) +#define NCR5380_dma_xfer_len(instance, cmd, phase) (0) +#define NCR5380_dma_recv_setup oakscsi_pread +#define NCR5380_dma_send_setup oakscsi_pwrite +#define NCR5380_dma_residual(instance) (0) #define NCR5380_queue_command oakscsi_queue_command #define NCR5380_info oakscsi_info @@ -40,23 +40,23 @@ #define STAT ((128 + 16) << 2) #define DATA ((128 + 8) << 2) -static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *addr, - int len) +static inline int oakscsi_pwrite(struct Scsi_Host *instance, + unsigned char *addr, int len) { void __iomem *base = priv(instance)->base; printk("writing %p len %d\n",addr, len); - if(!len) return -1; while(1) { int status; while (((status = readw(base + STAT)) & 0x100)==0); } + return 0; } -static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *addr, - int len) +static inline int oakscsi_pread(struct Scsi_Host *instance, + unsigned char *addr, int len) { void __iomem *base = priv(instance)->base; printk("reading %p len %d\n", addr, len); @@ -73,7 +73,7 @@ printk("reading %p len %d\n", addr, len); if(status & 0x200 || !timeout) { printk("status = %08X\n", status); - return 1; + return -1; } } @@ -143,7 +143,7 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id) host->irq = NO_IRQ; host->n_io_port = 255; - ret = NCR5380_init(host, 0); + ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP); if (ret) goto out_unmap; diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c index 5e1b73e1b743..79aa88911b7f 100644 --- a/drivers/scsi/arm/powertec.c +++ b/drivers/scsi/arm/powertec.c @@ -291,7 +291,7 @@ static struct scsi_host_template powertecscsi_template = { .can_queue = 8, .this_id = 7, - .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, + .sg_tablesize = SG_MAX_SEGMENTS, .dma_boundary = IOMD_DMA_BOUNDARY, .cmd_per_lun = 2, .use_clustering = ENABLE_CLUSTERING, diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c deleted file mode 100644 index 389825ba5d96..000000000000 --- a/drivers/scsi/atari_NCR5380.c +++ /dev/null @@ -1,2676 +0,0 @@ -/* - * NCR 5380 generic driver routines. These should make it *trivial* - * to implement 5380 SCSI drivers under Linux with a non-trantor - * architecture. - * - * Note that these routines also work with NR53c400 family chips. - * - * Copyright 1993, Drew Eckhardt - * Visionary Computing - * (Unix and Linux consulting and custom programming) - * drew@colorado.edu - * +1 (303) 666-5836 - * - * For more information, please consult - * - * NCR 5380 Family - * SCSI Protocol Controller - * Databook - * - * NCR Microelectronics - * 1635 Aeroplaza Drive - * Colorado Springs, CO 80916 - * 1+ (719) 578-3400 - * 1+ (800) 334-5454 - */ - -/* Ported to Atari by Roman Hodek and others. */ - -/* Adapted for the sun3 by Sam Creasey. */ - -/* - * Design - * - * This is a generic 5380 driver. To use it on a different platform, - * one simply writes appropriate system specific macros (ie, data - * transfer - some PC's will use the I/O bus, 68K's must use - * memory mapped) and drops this file in their 'C' wrapper. - * - * As far as command queueing, two queues are maintained for - * each 5380 in the system - commands that haven't been issued yet, - * and commands that are currently executing. This means that an - * unlimited number of commands may be queued, letting - * more commands propagate from the higher driver levels giving higher - * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, - * allowing multiple commands to propagate all the way to a SCSI-II device - * while a command is already executing. - * - * - * Issues specific to the NCR5380 : - * - * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead - * piece of hardware that requires you to sit in a loop polling for - * the REQ signal as long as you are connected. Some devices are - * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect - * while doing long seek operations. [...] These - * broken devices are the exception rather than the rule and I'd rather - * spend my time optimizing for the normal case. - * - * Architecture : - * - * At the heart of the design is a coroutine, NCR5380_main, - * which is started from a workqueue for each NCR5380 host in the - * system. It attempts to establish I_T_L or I_T_L_Q nexuses by - * removing the commands from the issue queue and calling - * NCR5380_select() if a nexus is not established. - * - * Once a nexus is established, the NCR5380_information_transfer() - * phase goes through the various phases as instructed by the target. - * if the target goes into MSG IN and sends a DISCONNECT message, - * the command structure is placed into the per instance disconnected - * queue, and NCR5380_main tries to find more work. If the target is - * idle for too long, the system will try to sleep. - * - * If a command has disconnected, eventually an interrupt will trigger, - * calling NCR5380_intr() which will in turn call NCR5380_reselect - * to reestablish a nexus. This will run main if necessary. - * - * On command termination, the done function will be called as - * appropriate. - * - * SCSI pointers are maintained in the SCp field of SCSI command - * structures, being initialized after the command is connected - * in NCR5380_select, and set as appropriate in NCR5380_information_transfer. - * Note that in violation of the standard, an implicit SAVE POINTERS operation - * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS. - */ - -/* - * Using this file : - * This file a skeleton Linux SCSI driver for the NCR 5380 series - * of chips. To use it, you write an architecture specific functions - * and macros and include this file in your driver. - * - * These macros control options : - * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically - * for commands that return with a CHECK CONDITION status. - * - * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential - * transceivers. - * - * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. - * - * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible - * - * These macros MUST be defined : - * - * NCR5380_read(register) - read from the specified register - * - * NCR5380_write(register, value) - write to the specific register - * - * NCR5380_implementation_fields - additional fields needed for this - * specific implementation of the NCR5380 - * - * Either real DMA *or* pseudo DMA may be implemented - * REAL functions : - * NCR5380_REAL_DMA should be defined if real DMA is to be used. - * Note that the DMA setup functions should return the number of bytes - * that they were able to program the controller for. - * - * Also note that generic i386/PC versions of these macros are - * available as NCR5380_i386_dma_write_setup, - * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. - * - * NCR5380_dma_write_setup(instance, src, count) - initialize - * NCR5380_dma_read_setup(instance, dst, count) - initialize - * NCR5380_dma_residual(instance); - residual count - * - * PSEUDO functions : - * NCR5380_pwrite(instance, src, count) - * NCR5380_pread(instance, dst, count); - * - * The generic driver is initialized by calling NCR5380_init(instance), - * after setting the appropriate host specific fields and ID. If the - * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, - * possible) function may be used. - */ - -static int do_abort(struct Scsi_Host *); -static void do_reset(struct Scsi_Host *); - -#ifdef SUPPORT_TAGS - -/* - * Functions for handling tagged queuing - * ===================================== - * - * ++roman (01/96): Now I've implemented SCSI-2 tagged queuing. Some notes: - * - * Using consecutive numbers for the tags is no good idea in my eyes. There - * could be wrong re-usings if the counter (8 bit!) wraps and some early - * command has been preempted for a long time. My solution: a bitfield for - * remembering used tags. - * - * There's also the problem that each target has a certain queue size, but we - * cannot know it in advance :-( We just see a QUEUE_FULL status being - * returned. So, in this case, the driver internal queue size assumption is - * reduced to the number of active tags if QUEUE_FULL is returned by the - * target. - * - * We're also not allowed running tagged commands as long as an untagged - * command is active. And REQUEST SENSE commands after a contingent allegiance - * condition _must_ be untagged. To keep track whether an untagged command has - * been issued, the host->busy array is still employed, as it is without - * support for tagged queuing. - * - * One could suspect that there are possible race conditions between - * is_lun_busy(), cmd_get_tag() and cmd_free_tag(). But I think this isn't the - * case: is_lun_busy() and cmd_get_tag() are both called from NCR5380_main(), - * which already guaranteed to be running at most once. It is also the only - * place where tags/LUNs are allocated. So no other allocation can slip - * between that pair, there could only happen a reselection, which can free a - * tag, but that doesn't hurt. Only the sequence in cmd_free_tag() becomes - * important: the tag bit must be cleared before 'nr_allocated' is decreased. - */ - -static void __init init_tags(struct NCR5380_hostdata *hostdata) -{ - int target, lun; - struct tag_alloc *ta; - - if (!(hostdata->flags & FLAG_TAGGED_QUEUING)) - return; - - for (target = 0; target < 8; ++target) { - for (lun = 0; lun < 8; ++lun) { - ta = &hostdata->TagAlloc[target][lun]; - bitmap_zero(ta->allocated, MAX_TAGS); - ta->nr_allocated = 0; - /* At the beginning, assume the maximum queue size we could - * support (MAX_TAGS). This value will be decreased if the target - * returns QUEUE_FULL status. - */ - ta->queue_size = MAX_TAGS; - } - } -} - - -/* Check if we can issue a command to this LUN: First see if the LUN is marked - * busy by an untagged command. If the command should use tagged queuing, also - * check that there is a free tag and the target's queue won't overflow. This - * function should be called with interrupts disabled to avoid race - * conditions. - */ - -static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) -{ - u8 lun = cmd->device->lun; - struct Scsi_Host *instance = cmd->device->host; - struct NCR5380_hostdata *hostdata = shost_priv(instance); - - if (hostdata->busy[cmd->device->id] & (1 << lun)) - return 1; - if (!should_be_tagged || - !(hostdata->flags & FLAG_TAGGED_QUEUING) || - !cmd->device->tagged_supported) - return 0; - if (hostdata->TagAlloc[scmd_id(cmd)][lun].nr_allocated >= - hostdata->TagAlloc[scmd_id(cmd)][lun].queue_size) { - dsprintk(NDEBUG_TAGS, instance, "target %d lun %d: no free tags\n", - scmd_id(cmd), lun); - return 1; - } - return 0; -} - - -/* Allocate a tag for a command (there are no checks anymore, check_lun_busy() - * must be called before!), or reserve the LUN in 'busy' if the command is - * untagged. - */ - -static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) -{ - u8 lun = cmd->device->lun; - struct Scsi_Host *instance = cmd->device->host; - struct NCR5380_hostdata *hostdata = shost_priv(instance); - - /* If we or the target don't support tagged queuing, allocate the LUN for - * an untagged command. - */ - if (!should_be_tagged || - !(hostdata->flags & FLAG_TAGGED_QUEUING) || - !cmd->device->tagged_supported) { - cmd->tag = TAG_NONE; - hostdata->busy[cmd->device->id] |= (1 << lun); - dsprintk(NDEBUG_TAGS, instance, "target %d lun %d now allocated by untagged command\n", - scmd_id(cmd), lun); - } else { - struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; - - cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); - set_bit(cmd->tag, ta->allocated); - ta->nr_allocated++; - dsprintk(NDEBUG_TAGS, instance, "using tag %d for target %d lun %d (%d tags allocated)\n", - cmd->tag, scmd_id(cmd), lun, ta->nr_allocated); - } -} - - -/* Mark the tag of command 'cmd' as free, or in case of an untagged command, - * unlock the LUN. - */ - -static void cmd_free_tag(struct scsi_cmnd *cmd) -{ - u8 lun = cmd->device->lun; - struct Scsi_Host *instance = cmd->device->host; - struct NCR5380_hostdata *hostdata = shost_priv(instance); - - if (cmd->tag == TAG_NONE) { - hostdata->busy[cmd->device->id] &= ~(1 << lun); - dsprintk(NDEBUG_TAGS, instance, "target %d lun %d untagged cmd freed\n", - scmd_id(cmd), lun); - } else if (cmd->tag >= MAX_TAGS) { - shost_printk(KERN_NOTICE, instance, - "trying to free bad tag %d!\n", cmd->tag); - } else { - struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; - clear_bit(cmd->tag, ta->allocated); - ta->nr_allocated--; - dsprintk(NDEBUG_TAGS, instance, "freed tag %d for target %d lun %d\n", - cmd->tag, scmd_id(cmd), lun); - } -} - - -static void free_all_tags(struct NCR5380_hostdata *hostdata) -{ - int target, lun; - struct tag_alloc *ta; - - if (!(hostdata->flags & FLAG_TAGGED_QUEUING)) - return; - - for (target = 0; target < 8; ++target) { - for (lun = 0; lun < 8; ++lun) { - ta = &hostdata->TagAlloc[target][lun]; - bitmap_zero(ta->allocated, MAX_TAGS); - ta->nr_allocated = 0; - } - } -} - -#endif /* SUPPORT_TAGS */ - -/** - * merge_contiguous_buffers - coalesce scatter-gather list entries - * @cmd: command requesting IO - * - * Try to merge several scatter-gather buffers into one DMA transfer. - * This is possible if the scatter buffers lie on physically - * contiguous addresses. The first scatter-gather buffer's data are - * assumed to be already transferred into cmd->SCp.this_residual. - * Every buffer merged avoids an interrupt and a DMA setup operation. - */ - -static void merge_contiguous_buffers(struct scsi_cmnd *cmd) -{ -#if !defined(CONFIG_SUN3) - unsigned long endaddr; -#if (NDEBUG & NDEBUG_MERGING) - unsigned long oldlen = cmd->SCp.this_residual; - int cnt = 1; -#endif - - for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; - cmd->SCp.buffers_residual && - virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) { - dprintk(NDEBUG_MERGING, "VTOP(%p) == %08lx -> merging\n", - page_address(sg_page(&cmd->SCp.buffer[1])), endaddr); -#if (NDEBUG & NDEBUG_MERGING) - ++cnt; -#endif - ++cmd->SCp.buffer; - --cmd->SCp.buffers_residual; - cmd->SCp.this_residual += cmd->SCp.buffer->length; - endaddr += cmd->SCp.buffer->length; - } -#if (NDEBUG & NDEBUG_MERGING) - if (oldlen != cmd->SCp.this_residual) - dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n", - cnt, cmd->SCp.ptr, cmd->SCp.this_residual); -#endif -#endif /* !defined(CONFIG_SUN3) */ -} - -/** - * initialize_SCp - init the scsi pointer field - * @cmd: command block to set up - * - * Set up the internal fields in the SCSI command. - */ - -static inline void initialize_SCp(struct scsi_cmnd *cmd) -{ - /* - * Initialize the Scsi Pointer field so that all of the commands in the - * various queues are valid. - */ - - if (scsi_bufflen(cmd)) { - cmd->SCp.buffer = scsi_sglist(cmd); - cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; - cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); - cmd->SCp.this_residual = cmd->SCp.buffer->length; - - merge_contiguous_buffers(cmd); - } else { - cmd->SCp.buffer = NULL; - cmd->SCp.buffers_residual = 0; - cmd->SCp.ptr = NULL; - cmd->SCp.this_residual = 0; - } - - cmd->SCp.Status = 0; - cmd->SCp.Message = 0; -} - -/** - * NCR5380_poll_politely2 - wait for two chip register values - * @instance: controller to poll - * @reg1: 5380 register to poll - * @bit1: Bitmask to check - * @val1: Expected value - * @reg2: Second 5380 register to poll - * @bit2: Second bitmask to check - * @val2: Second expected value - * @wait: Time-out in jiffies - * - * Polls the chip in a reasonably efficient manner waiting for an - * event to occur. After a short quick poll we begin to yield the CPU - * (if possible). In irq contexts the time-out is arbitrarily limited. - * Callers may hold locks as long as they are held in irq mode. - * - * Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT. - */ - -static int NCR5380_poll_politely2(struct Scsi_Host *instance, - int reg1, int bit1, int val1, - int reg2, int bit2, int val2, int wait) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - unsigned long deadline = jiffies + wait; - unsigned long n; - - /* Busy-wait for up to 10 ms */ - n = min(10000U, jiffies_to_usecs(wait)); - n *= hostdata->accesses_per_ms; - n /= 2000; - do { - if ((NCR5380_read(reg1) & bit1) == val1) - return 0; - if ((NCR5380_read(reg2) & bit2) == val2) - return 0; - cpu_relax(); - } while (n--); - - if (irqs_disabled() || in_interrupt()) - return -ETIMEDOUT; - - /* Repeatedly sleep for 1 ms until deadline */ - while (time_is_after_jiffies(deadline)) { - schedule_timeout_uninterruptible(1); - if ((NCR5380_read(reg1) & bit1) == val1) - return 0; - if ((NCR5380_read(reg2) & bit2) == val2) - return 0; - } - - return -ETIMEDOUT; -} - -static inline int NCR5380_poll_politely(struct Scsi_Host *instance, - int reg, int bit, int val, int wait) -{ - return NCR5380_poll_politely2(instance, reg, bit, val, - reg, bit, val, wait); -} - -#if NDEBUG -static struct { - unsigned char mask; - const char *name; -} signals[] = { - {SR_DBP, "PARITY"}, - {SR_RST, "RST"}, - {SR_BSY, "BSY"}, - {SR_REQ, "REQ"}, - {SR_MSG, "MSG"}, - {SR_CD, "CD"}, - {SR_IO, "IO"}, - {SR_SEL, "SEL"}, - {0, NULL} -}, -basrs[] = { - {BASR_ATN, "ATN"}, - {BASR_ACK, "ACK"}, - {0, NULL} -}, -icrs[] = { - {ICR_ASSERT_RST, "ASSERT RST"}, - {ICR_ASSERT_ACK, "ASSERT ACK"}, - {ICR_ASSERT_BSY, "ASSERT BSY"}, - {ICR_ASSERT_SEL, "ASSERT SEL"}, - {ICR_ASSERT_ATN, "ASSERT ATN"}, - {ICR_ASSERT_DATA, "ASSERT DATA"}, - {0, NULL} -}, -mrs[] = { - {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, - {MR_TARGET, "MODE TARGET"}, - {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, - {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"}, - {MR_ENABLE_EOP_INTR, "MODE EOP INTR"}, - {MR_MONITOR_BSY, "MODE MONITOR BSY"}, - {MR_DMA_MODE, "MODE DMA"}, - {MR_ARBITRATE, "MODE ARBITRATION"}, - {0, NULL} -}; - -/** - * NCR5380_print - print scsi bus signals - * @instance: adapter state to dump - * - * Print the SCSI bus signals for debugging purposes - */ - -static void NCR5380_print(struct Scsi_Host *instance) -{ - unsigned char status, data, basr, mr, icr, i; - - data = NCR5380_read(CURRENT_SCSI_DATA_REG); - status = NCR5380_read(STATUS_REG); - mr = NCR5380_read(MODE_REG); - icr = NCR5380_read(INITIATOR_COMMAND_REG); - basr = NCR5380_read(BUS_AND_STATUS_REG); - - printk("STATUS_REG: %02x ", status); - for (i = 0; signals[i].mask; ++i) - if (status & signals[i].mask) - printk(",%s", signals[i].name); - printk("\nBASR: %02x ", basr); - for (i = 0; basrs[i].mask; ++i) - if (basr & basrs[i].mask) - printk(",%s", basrs[i].name); - printk("\nICR: %02x ", icr); - for (i = 0; icrs[i].mask; ++i) - if (icr & icrs[i].mask) - printk(",%s", icrs[i].name); - printk("\nMODE: %02x ", mr); - for (i = 0; mrs[i].mask; ++i) - if (mr & mrs[i].mask) - printk(",%s", mrs[i].name); - printk("\n"); -} - -static struct { - unsigned char value; - const char *name; -} phases[] = { - {PHASE_DATAOUT, "DATAOUT"}, - {PHASE_DATAIN, "DATAIN"}, - {PHASE_CMDOUT, "CMDOUT"}, - {PHASE_STATIN, "STATIN"}, - {PHASE_MSGOUT, "MSGOUT"}, - {PHASE_MSGIN, "MSGIN"}, - {PHASE_UNKNOWN, "UNKNOWN"} -}; - -/** - * NCR5380_print_phase - show SCSI phase - * @instance: adapter to dump - * - * Print the current SCSI phase for debugging purposes - */ - -static void NCR5380_print_phase(struct Scsi_Host *instance) -{ - unsigned char status; - int i; - - status = NCR5380_read(STATUS_REG); - if (!(status & SR_REQ)) - shost_printk(KERN_DEBUG, instance, "REQ not asserted, phase unknown.\n"); - else { - for (i = 0; (phases[i].value != PHASE_UNKNOWN) && - (phases[i].value != (status & PHASE_MASK)); ++i) - ; - shost_printk(KERN_DEBUG, instance, "phase %s\n", phases[i].name); - } -} -#endif - -/** - * NCR58380_info - report driver and host information - * @instance: relevant scsi host instance - * - * For use as the host template info() handler. - */ - -static const char *NCR5380_info(struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - - return hostdata->info; -} - -static void prepare_info(struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - - snprintf(hostdata->info, sizeof(hostdata->info), - "%s, io_port 0x%lx, n_io_port %d, " - "base 0x%lx, irq %d, " - "can_queue %d, cmd_per_lun %d, " - "sg_tablesize %d, this_id %d, " - "flags { %s%s}, " - "options { %s} ", - instance->hostt->name, instance->io_port, instance->n_io_port, - instance->base, instance->irq, - instance->can_queue, instance->cmd_per_lun, - instance->sg_tablesize, instance->this_id, - hostdata->flags & FLAG_TAGGED_QUEUING ? "TAGGED_QUEUING " : "", - hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : "", -#ifdef DIFFERENTIAL - "DIFFERENTIAL " -#endif -#ifdef REAL_DMA - "REAL_DMA " -#endif -#ifdef PARITY - "PARITY " -#endif -#ifdef SUPPORT_TAGS - "SUPPORT_TAGS " -#endif - ""); -} - -/** - * NCR5380_init - initialise an NCR5380 - * @instance: adapter to configure - * @flags: control flags - * - * Initializes *instance and corresponding 5380 chip, - * with flags OR'd into the initial flags value. - * - * Notes : I assume that the host, hostno, and id bits have been - * set correctly. I don't care about the irq and other fields. - * - * Returns 0 for success - */ - -static int __init NCR5380_init(struct Scsi_Host *instance, int flags) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - int i; - unsigned long deadline; - - hostdata->host = instance; - hostdata->id_mask = 1 << instance->this_id; - hostdata->id_higher_mask = 0; - for (i = hostdata->id_mask; i <= 0x80; i <<= 1) - if (i > hostdata->id_mask) - hostdata->id_higher_mask |= i; - for (i = 0; i < 8; ++i) - hostdata->busy[i] = 0; -#ifdef SUPPORT_TAGS - init_tags(hostdata); -#endif -#if defined (REAL_DMA) - hostdata->dma_len = 0; -#endif - spin_lock_init(&hostdata->lock); - hostdata->connected = NULL; - hostdata->sensing = NULL; - INIT_LIST_HEAD(&hostdata->autosense); - INIT_LIST_HEAD(&hostdata->unissued); - INIT_LIST_HEAD(&hostdata->disconnected); - - hostdata->flags = flags; - - INIT_WORK(&hostdata->main_task, NCR5380_main); - hostdata->work_q = alloc_workqueue("ncr5380_%d", - WQ_UNBOUND | WQ_MEM_RECLAIM, - 1, instance->host_no); - if (!hostdata->work_q) - return -ENOMEM; - - prepare_info(instance); - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(TARGET_COMMAND_REG, 0); - NCR5380_write(SELECT_ENABLE_REG, 0); - - /* Calibrate register polling loop */ - i = 0; - deadline = jiffies + 1; - do { - cpu_relax(); - } while (time_is_after_jiffies(deadline)); - deadline += msecs_to_jiffies(256); - do { - NCR5380_read(STATUS_REG); - ++i; - cpu_relax(); - } while (time_is_after_jiffies(deadline)); - hostdata->accesses_per_ms = i / 256; - - return 0; -} - -/** - * NCR5380_maybe_reset_bus - Detect and correct bus wedge problems. - * @instance: adapter to check - * - * If the system crashed, it may have crashed with a connected target and - * the SCSI bus busy. Check for BUS FREE phase. If not, try to abort the - * currently established nexus, which we know nothing about. Failing that - * do a bus reset. - * - * Note that a bus reset will cause the chip to assert IRQ. - * - * Returns 0 if successful, otherwise -ENXIO. - */ - -static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - int pass; - - for (pass = 1; (NCR5380_read(STATUS_REG) & SR_BSY) && pass <= 6; ++pass) { - switch (pass) { - case 1: - case 3: - case 5: - shost_printk(KERN_ERR, instance, "SCSI bus busy, waiting up to five seconds\n"); - NCR5380_poll_politely(instance, - STATUS_REG, SR_BSY, 0, 5 * HZ); - break; - case 2: - shost_printk(KERN_ERR, instance, "bus busy, attempting abort\n"); - do_abort(instance); - break; - case 4: - shost_printk(KERN_ERR, instance, "bus busy, attempting reset\n"); - do_reset(instance); - /* Wait after a reset; the SCSI standard calls for - * 250ms, we wait 500ms to be on the safe side. - * But some Toshiba CD-ROMs need ten times that. - */ - if (hostdata->flags & FLAG_TOSHIBA_DELAY) - msleep(2500); - else - msleep(500); - break; - case 6: - shost_printk(KERN_ERR, instance, "bus locked solid\n"); - return -ENXIO; - } - } - return 0; -} - -/** - * NCR5380_exit - remove an NCR5380 - * @instance: adapter to remove - * - * Assumes that no more work can be queued (e.g. by NCR5380_intr). - */ - -static void NCR5380_exit(struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - - cancel_work_sync(&hostdata->main_task); - destroy_workqueue(hostdata->work_q); -} - -/** - * complete_cmd - finish processing a command and return it to the SCSI ML - * @instance: the host instance - * @cmd: command to complete - */ - -static void complete_cmd(struct Scsi_Host *instance, - struct scsi_cmnd *cmd) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - - dsprintk(NDEBUG_QUEUES, instance, "complete_cmd: cmd %p\n", cmd); - - if (hostdata->sensing == cmd) { - /* Autosense processing ends here */ - if ((cmd->result & 0xff) != SAM_STAT_GOOD) { - scsi_eh_restore_cmnd(cmd, &hostdata->ses); - set_host_byte(cmd, DID_ERROR); - } else - scsi_eh_restore_cmnd(cmd, &hostdata->ses); - hostdata->sensing = NULL; - } - -#ifdef SUPPORT_TAGS - cmd_free_tag(cmd); -#else - hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun); -#endif - cmd->scsi_done(cmd); -} - -/** - * NCR5380_queue_command - queue a command - * @instance: the relevant SCSI adapter - * @cmd: SCSI command - * - * cmd is added to the per-instance issue queue, with minor - * twiddling done to the host specific fields of cmd. If the - * main coroutine is not running, it is restarted. - */ - -static int NCR5380_queue_command(struct Scsi_Host *instance, - struct scsi_cmnd *cmd) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); - unsigned long flags; - -#if (NDEBUG & NDEBUG_NO_WRITE) - switch (cmd->cmnd[0]) { - case WRITE_6: - case WRITE_10: - shost_printk(KERN_DEBUG, instance, "WRITE attempted with NDEBUG_NO_WRITE set\n"); - cmd->result = (DID_ERROR << 16); - cmd->scsi_done(cmd); - return 0; - } -#endif /* (NDEBUG & NDEBUG_NO_WRITE) */ - - cmd->result = 0; - - /* - * ++roman: Just disabling the NCR interrupt isn't sufficient here, - * because also a timer int can trigger an abort or reset, which would - * alter queues and touch the lock. - */ - if (!NCR5380_acquire_dma_irq(instance)) - return SCSI_MLQUEUE_HOST_BUSY; - - spin_lock_irqsave(&hostdata->lock, flags); - - /* - * Insert the cmd into the issue queue. Note that REQUEST SENSE - * commands are added to the head of the queue since any command will - * clear the contingent allegiance condition that exists and the - * sense data is only guaranteed to be valid while the condition exists. - */ - - if (cmd->cmnd[0] == REQUEST_SENSE) - list_add(&ncmd->list, &hostdata->unissued); - else - list_add_tail(&ncmd->list, &hostdata->unissued); - - spin_unlock_irqrestore(&hostdata->lock, flags); - - dsprintk(NDEBUG_QUEUES, instance, "command %p added to %s of queue\n", - cmd, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); - - /* Kick off command processing */ - queue_work(hostdata->work_q, &hostdata->main_task); - return 0; -} - -static inline void maybe_release_dma_irq(struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - - /* Caller does the locking needed to set & test these data atomically */ - if (list_empty(&hostdata->disconnected) && - list_empty(&hostdata->unissued) && - list_empty(&hostdata->autosense) && - !hostdata->connected && - !hostdata->selecting) - NCR5380_release_dma_irq(instance); -} - -/** - * dequeue_next_cmd - dequeue a command for processing - * @instance: the scsi host instance - * - * Priority is given to commands on the autosense queue. These commands - * need autosense because of a CHECK CONDITION result. - * - * Returns a command pointer if a command is found for a target that is - * not already busy. Otherwise returns NULL. - */ - -static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - struct NCR5380_cmd *ncmd; - struct scsi_cmnd *cmd; - - if (hostdata->sensing || list_empty(&hostdata->autosense)) { - list_for_each_entry(ncmd, &hostdata->unissued, list) { - cmd = NCR5380_to_scmd(ncmd); - dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n", - cmd, scmd_id(cmd), hostdata->busy[scmd_id(cmd)], cmd->device->lun); - - if ( -#ifdef SUPPORT_TAGS - !is_lun_busy(cmd, 1) -#else - !(hostdata->busy[scmd_id(cmd)] & (1 << cmd->device->lun)) -#endif - ) { - list_del(&ncmd->list); - dsprintk(NDEBUG_QUEUES, instance, - "dequeue: removed %p from issue queue\n", cmd); - return cmd; - } - } - } else { - /* Autosense processing begins here */ - ncmd = list_first_entry(&hostdata->autosense, - struct NCR5380_cmd, list); - list_del(&ncmd->list); - cmd = NCR5380_to_scmd(ncmd); - dsprintk(NDEBUG_QUEUES, instance, - "dequeue: removed %p from autosense queue\n", cmd); - scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); - hostdata->sensing = cmd; - return cmd; - } - return NULL; -} - -static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); - - if (hostdata->sensing == cmd) { - scsi_eh_restore_cmnd(cmd, &hostdata->ses); - list_add(&ncmd->list, &hostdata->autosense); - hostdata->sensing = NULL; - } else - list_add(&ncmd->list, &hostdata->unissued); -} - -/** - * NCR5380_main - NCR state machines - * - * NCR5380_main is a coroutine that runs as long as more work can - * be done on the NCR5380 host adapters in a system. Both - * NCR5380_queue_command() and NCR5380_intr() will try to start it - * in case it is not running. - */ - -static void NCR5380_main(struct work_struct *work) -{ - struct NCR5380_hostdata *hostdata = - container_of(work, struct NCR5380_hostdata, main_task); - struct Scsi_Host *instance = hostdata->host; - int done; - - /* - * ++roman: Just disabling the NCR interrupt isn't sufficient here, - * because also a timer int can trigger an abort or reset, which can - * alter queues and touch the Falcon lock. - */ - - do { - done = 1; - - spin_lock_irq(&hostdata->lock); - while (!hostdata->connected && !hostdata->selecting) { - struct scsi_cmnd *cmd = dequeue_next_cmd(instance); - - if (!cmd) - break; - - dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd); - - /* - * Attempt to establish an I_T_L nexus here. - * On success, instance->hostdata->connected is set. - * On failure, we must add the command back to the - * issue queue so we can keep trying. - */ - /* - * REQUEST SENSE commands are issued without tagged - * queueing, even on SCSI-II devices because the - * contingent allegiance condition exists for the - * entire unit. - */ - /* ++roman: ...and the standard also requires that - * REQUEST SENSE command are untagged. - */ - -#ifdef SUPPORT_TAGS - cmd_get_tag(cmd, cmd->cmnd[0] != REQUEST_SENSE); -#endif - if (!NCR5380_select(instance, cmd)) { - dsprintk(NDEBUG_MAIN, instance, "main: select complete\n"); - maybe_release_dma_irq(instance); - } else { - dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance, - "main: select failed, returning %p to queue\n", cmd); - requeue_cmd(instance, cmd); -#ifdef SUPPORT_TAGS - cmd_free_tag(cmd); -#endif - } - } - if (hostdata->connected -#ifdef REAL_DMA - && !hostdata->dma_len -#endif - ) { - dsprintk(NDEBUG_MAIN, instance, "main: performing information transfer\n"); - NCR5380_information_transfer(instance); - done = 0; - } - spin_unlock_irq(&hostdata->lock); - if (!done) - cond_resched(); - } while (!done); -} - - -#ifdef REAL_DMA -/* - * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) - * - * Purpose : Called by interrupt handler when DMA finishes or a phase - * mismatch occurs (which would finish the DMA transfer). - * - * Inputs : instance - this instance of the NCR5380. - */ - -static void NCR5380_dma_complete(struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - int transferred; - unsigned char **data; - int *count; - int saved_data = 0, overrun = 0; - unsigned char p; - - if (hostdata->read_overruns) { - p = hostdata->connected->SCp.phase; - if (p & SR_IO) { - udelay(10); - if ((NCR5380_read(BUS_AND_STATUS_REG) & - (BASR_PHASE_MATCH|BASR_ACK)) == - (BASR_PHASE_MATCH|BASR_ACK)) { - saved_data = NCR5380_read(INPUT_DATA_REG); - overrun = 1; - dsprintk(NDEBUG_DMA, instance, "read overrun handled\n"); - } - } - } - -#if defined(CONFIG_SUN3) - if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) { - pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", - instance->host_no); - BUG(); - } - - /* make sure we're not stuck in a data phase */ - if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == - (BASR_PHASE_MATCH | BASR_ACK)) { - pr_err("scsi%d: BASR %02x\n", instance->host_no, - NCR5380_read(BUS_AND_STATUS_REG)); - pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n", - instance->host_no); - BUG(); - } -#endif - - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - NCR5380_read(RESET_PARITY_INTERRUPT_REG); - - transferred = hostdata->dma_len - NCR5380_dma_residual(instance); - hostdata->dma_len = 0; - - data = (unsigned char **)&hostdata->connected->SCp.ptr; - count = &hostdata->connected->SCp.this_residual; - *data += transferred; - *count -= transferred; - - if (hostdata->read_overruns) { - int cnt, toPIO; - - if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { - cnt = toPIO = hostdata->read_overruns; - if (overrun) { - dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); - *(*data)++ = saved_data; - (*count)--; - cnt--; - toPIO--; - } - dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data); - NCR5380_transfer_pio(instance, &p, &cnt, data); - *count -= toPIO - cnt; - } - } -} -#endif /* REAL_DMA */ - - -/** - * NCR5380_intr - generic NCR5380 irq handler - * @irq: interrupt number - * @dev_id: device info - * - * Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses - * from the disconnected queue, and restarting NCR5380_main() - * as required. - * - * The chip can assert IRQ in any of six different conditions. The IRQ flag - * is then cleared by reading the Reset Parity/Interrupt Register (RPIR). - * Three of these six conditions are latched in the Bus and Status Register: - * - End of DMA (cleared by ending DMA Mode) - * - Parity error (cleared by reading RPIR) - * - Loss of BSY (cleared by reading RPIR) - * Two conditions have flag bits that are not latched: - * - Bus phase mismatch (non-maskable in DMA Mode, cleared by ending DMA Mode) - * - Bus reset (non-maskable) - * The remaining condition has no flag bit at all: - * - Selection/reselection - * - * Hence, establishing the cause(s) of any interrupt is partly guesswork. - * In "The DP8490 and DP5380 Comparison Guide", National Semiconductor - * claimed that "the design of the [DP8490] interrupt logic ensures - * interrupts will not be lost (they can be on the DP5380)." - * The L5380/53C80 datasheet from LOGIC Devices has more details. - * - * Checking for bus reset by reading RST is futile because of interrupt - * latency, but a bus reset will reset chip logic. Checking for parity error - * is unnecessary because that interrupt is never enabled. A Loss of BSY - * condition will clear DMA Mode. We can tell when this occurs because the - * the Busy Monitor interrupt is enabled together with DMA Mode. - */ - -static irqreturn_t NCR5380_intr(int irq, void *dev_id) -{ - struct Scsi_Host *instance = dev_id; - struct NCR5380_hostdata *hostdata = shost_priv(instance); - int handled = 0; - unsigned char basr; - unsigned long flags; - - spin_lock_irqsave(&hostdata->lock, flags); - - basr = NCR5380_read(BUS_AND_STATUS_REG); - if (basr & BASR_IRQ) { - unsigned char mr = NCR5380_read(MODE_REG); - unsigned char sr = NCR5380_read(STATUS_REG); - - dsprintk(NDEBUG_INTR, instance, "IRQ %d, BASR 0x%02x, SR 0x%02x, MR 0x%02x\n", - irq, basr, sr, mr); - -#if defined(REAL_DMA) - if ((mr & MR_DMA_MODE) || (mr & MR_MONITOR_BSY)) { - /* Probably End of DMA, Phase Mismatch or Loss of BSY. - * We ack IRQ after clearing Mode Register. Workarounds - * for End of DMA errata need to happen in DMA Mode. - */ - - dsprintk(NDEBUG_INTR, instance, "interrupt in DMA mode\n"); - - if (hostdata->connected) { - NCR5380_dma_complete(instance); - queue_work(hostdata->work_q, &hostdata->main_task); - } else { - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_read(RESET_PARITY_INTERRUPT_REG); - } - } else -#endif /* REAL_DMA */ - if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) && - (sr & (SR_SEL | SR_IO | SR_BSY | SR_RST)) == (SR_SEL | SR_IO)) { - /* Probably reselected */ - NCR5380_write(SELECT_ENABLE_REG, 0); - NCR5380_read(RESET_PARITY_INTERRUPT_REG); - - dsprintk(NDEBUG_INTR, instance, "interrupt with SEL and IO\n"); - - if (!hostdata->connected) { - NCR5380_reselect(instance); - queue_work(hostdata->work_q, &hostdata->main_task); - } - if (!hostdata->connected) - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - } else { - /* Probably Bus Reset */ - NCR5380_read(RESET_PARITY_INTERRUPT_REG); - - dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n"); -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_DMA_ENABLE; -#endif - } - handled = 1; - } else { - shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n"); -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_DMA_ENABLE; -#endif - } - - spin_unlock_irqrestore(&hostdata->lock, flags); - - return IRQ_RETVAL(handled); -} - -/* - * Function : int NCR5380_select(struct Scsi_Host *instance, - * struct scsi_cmnd *cmd) - * - * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, - * including ARBITRATION, SELECTION, and initial message out for - * IDENTIFY and queue messages. - * - * Inputs : instance - instantiation of the 5380 driver on which this - * target lives, cmd - SCSI command to execute. - * - * Returns cmd if selection failed but should be retried, - * NULL if selection failed and should not be retried, or - * NULL if selection succeeded (hostdata->connected == cmd). - * - * Side effects : - * If bus busy, arbitration failed, etc, NCR5380_select() will exit - * with registers as they should have been on entry - ie - * SELECT_ENABLE will be set appropriately, the NCR5380 - * will cease to drive any SCSI bus signals. - * - * If successful : I_T_L or I_T_L_Q nexus will be established, - * instance->connected will be set to cmd. - * SELECT interrupt will be disabled. - * - * If failed (no target) : cmd->scsi_done() will be called, and the - * cmd->result host byte set to DID_BAD_TARGET. - */ - -static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, - struct scsi_cmnd *cmd) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - unsigned char tmp[3], phase; - unsigned char *data; - int len; - int err; - - NCR5380_dprint(NDEBUG_ARBITRATION, instance); - dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n", - instance->this_id); - - /* - * Arbitration and selection phases are slow and involve dropping the - * lock, so we have to watch out for EH. An exception handler may - * change 'selecting' to NULL. This function will then return NULL - * so that the caller will forget about 'cmd'. (During information - * transfer phases, EH may change 'connected' to NULL.) - */ - hostdata->selecting = cmd; - - /* - * Set the phase bits to 0, otherwise the NCR5380 won't drive the - * data bus during SELECTION. - */ - - NCR5380_write(TARGET_COMMAND_REG, 0); - - /* - * Start arbitration. - */ - - NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); - NCR5380_write(MODE_REG, MR_ARBITRATE); - - /* The chip now waits for BUS FREE phase. Then after the 800 ns - * Bus Free Delay, arbitration will begin. - */ - - spin_unlock_irq(&hostdata->lock); - err = NCR5380_poll_politely2(instance, MODE_REG, MR_ARBITRATE, 0, - INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS, - ICR_ARBITRATION_PROGRESS, HZ); - spin_lock_irq(&hostdata->lock); - if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE)) { - /* Reselection interrupt */ - goto out; - } - if (!hostdata->selecting) { - /* Command was aborted */ - NCR5380_write(MODE_REG, MR_BASE); - goto out; - } - if (err < 0) { - NCR5380_write(MODE_REG, MR_BASE); - shost_printk(KERN_ERR, instance, - "select: arbitration timeout\n"); - goto out; - } - spin_unlock_irq(&hostdata->lock); - - /* The SCSI-2 arbitration delay is 2.4 us */ - udelay(3); - - /* Check for lost arbitration */ - if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || - (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || - (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { - NCR5380_write(MODE_REG, MR_BASE); - dsprintk(NDEBUG_ARBITRATION, instance, "lost arbitration, deasserting MR_ARBITRATE\n"); - spin_lock_irq(&hostdata->lock); - goto out; - } - - /* After/during arbitration, BSY should be asserted. - * IBM DPES-31080 Version S31Q works now - * Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) - */ - NCR5380_write(INITIATOR_COMMAND_REG, - ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY); - - /* - * Again, bus clear + bus settle time is 1.2us, however, this is - * a minimum so we'll udelay ceil(1.2) - */ - - if (hostdata->flags & FLAG_TOSHIBA_DELAY) - udelay(15); - else - udelay(2); - - spin_lock_irq(&hostdata->lock); - - /* NCR5380_reselect() clears MODE_REG after a reselection interrupt */ - if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE)) - goto out; - - if (!hostdata->selecting) { - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - goto out; - } - - dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n"); - - /* - * Now that we have won arbitration, start Selection process, asserting - * the host and target ID's on the SCSI bus. - */ - - NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask | (1 << scmd_id(cmd))); - - /* - * Raise ATN while SEL is true before BSY goes false from arbitration, - * since this is the only way to guarantee that we'll get a MESSAGE OUT - * phase immediately after selection. - */ - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY | - ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL); - NCR5380_write(MODE_REG, MR_BASE); - - /* - * Reselect interrupts must be turned off prior to the dropping of BSY, - * otherwise we will trigger an interrupt. - */ - NCR5380_write(SELECT_ENABLE_REG, 0); - - spin_unlock_irq(&hostdata->lock); - - /* - * The initiator shall then wait at least two deskew delays and release - * the BSY signal. - */ - udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ - - /* Reset BSY */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | - ICR_ASSERT_ATN | ICR_ASSERT_SEL); - - /* - * Something weird happens when we cease to drive BSY - looks - * like the board/chip is letting us do another read before the - * appropriate propagation delay has expired, and we're confusing - * a BSY signal from ourselves as the target's response to SELECTION. - * - * A small delay (the 'C++' frontend breaks the pipeline with an - * unnecessary jump, making it work on my 386-33/Trantor T128, the - * tighter 'C' code breaks and requires this) solves the problem - - * the 1 us delay is arbitrary, and only used because this delay will - * be the same on other platforms and since it works here, it should - * work there. - * - * wingel suggests that this could be due to failing to wait - * one deskew delay. - */ - - udelay(1); - - dsprintk(NDEBUG_SELECTION, instance, "selecting target %d\n", scmd_id(cmd)); - - /* - * The SCSI specification calls for a 250 ms timeout for the actual - * selection. - */ - - err = NCR5380_poll_politely(instance, STATUS_REG, SR_BSY, SR_BSY, - msecs_to_jiffies(250)); - - if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { - spin_lock_irq(&hostdata->lock); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - NCR5380_reselect(instance); - if (!hostdata->connected) - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n"); - goto out; - } - - if (err < 0) { - spin_lock_irq(&hostdata->lock); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - /* Can't touch cmd if it has been reclaimed by the scsi ML */ - if (hostdata->selecting) { - cmd->result = DID_BAD_TARGET << 16; - complete_cmd(instance, cmd); - dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n"); - cmd = NULL; - } - goto out; - } - - /* - * No less than two deskew delays after the initiator detects the - * BSY signal is true, it shall release the SEL signal and may - * change the DATA BUS. -wingel - */ - - udelay(1); - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); - - /* - * Since we followed the SCSI spec, and raised ATN while SEL - * was true but before BSY was false during selection, the information - * transfer phase should be a MESSAGE OUT phase so that we can send the - * IDENTIFY message. - * - * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG - * message (2 bytes) with a tag ID that we increment with every command - * until it wraps back to 0. - * - * XXX - it turns out that there are some broken SCSI-II devices, - * which claim to support tagged queuing but fail when more than - * some number of commands are issued at once. - */ - - /* Wait for start of REQ/ACK handshake */ - - err = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ); - spin_lock_irq(&hostdata->lock); - if (err < 0) { - shost_printk(KERN_ERR, instance, "select: REQ timeout\n"); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - goto out; - } - if (!hostdata->selecting) { - do_abort(instance); - goto out; - } - - dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n", - scmd_id(cmd)); - tmp[0] = IDENTIFY(1, cmd->device->lun); - -#ifdef SUPPORT_TAGS - if (cmd->tag != TAG_NONE) { - tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG; - tmp[2] = cmd->tag; - len = 3; - } else - len = 1; -#else - len = 1; - cmd->tag = 0; -#endif /* SUPPORT_TAGS */ - - /* Send message(s) */ - data = tmp; - phase = PHASE_MSGOUT; - NCR5380_transfer_pio(instance, &phase, &len, &data); - dsprintk(NDEBUG_SELECTION, instance, "nexus established.\n"); - /* XXX need to handle errors here */ - - hostdata->connected = cmd; -#ifndef SUPPORT_TAGS - hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun; -#endif -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_INTR; -#endif - - initialize_SCp(cmd); - - cmd = NULL; - -out: - if (!hostdata->selecting) - return NULL; - hostdata->selecting = NULL; - return cmd; -} - -/* - * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, - * unsigned char *phase, int *count, unsigned char **data) - * - * Purpose : transfers data in given phase using polled I/O - * - * Inputs : instance - instance of driver, *phase - pointer to - * what phase is expected, *count - pointer to number of - * bytes to transfer, **data - pointer to data pointer. - * - * Returns : -1 when different phase is entered without transferring - * maximum number of bytes, 0 if all bytes are transferred or exit - * is in same phase. - * - * Also, *phase, *count, *data are modified in place. - * - * XXX Note : handling for bus free may be useful. - */ - -/* - * Note : this code is not as quick as it could be, however it - * IS 100% reliable, and for the actual data transfer where speed - * counts, we will always do a pseudo DMA or DMA transfer. - */ - -static int NCR5380_transfer_pio(struct Scsi_Host *instance, - unsigned char *phase, int *count, - unsigned char **data) -{ - unsigned char p = *phase, tmp; - int c = *count; - unsigned char *d = *data; - - /* - * The NCR5380 chip will only drive the SCSI bus when the - * phase specified in the appropriate bits of the TARGET COMMAND - * REGISTER match the STATUS REGISTER - */ - - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); - - do { - /* - * Wait for assertion of REQ, after which the phase bits will be - * valid - */ - - if (NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0) - break; - - dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n"); - - /* Check for phase mismatch */ - if ((NCR5380_read(STATUS_REG) & PHASE_MASK) != p) { - dsprintk(NDEBUG_PIO, instance, "phase mismatch\n"); - NCR5380_dprint_phase(NDEBUG_PIO, instance); - break; - } - - /* Do actual transfer from SCSI bus to / from memory */ - if (!(p & SR_IO)) - NCR5380_write(OUTPUT_DATA_REG, *d); - else - *d = NCR5380_read(CURRENT_SCSI_DATA_REG); - - ++d; - - /* - * The SCSI standard suggests that in MSGOUT phase, the initiator - * should drop ATN on the last byte of the message phase - * after REQ has been asserted for the handshake but before - * the initiator raises ACK. - */ - - if (!(p & SR_IO)) { - if (!((p & SR_MSG) && c > 1)) { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); - NCR5380_dprint(NDEBUG_PIO, instance); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_DATA | ICR_ASSERT_ACK); - } else { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_DATA | ICR_ASSERT_ATN); - NCR5380_dprint(NDEBUG_PIO, instance); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); - } - } else { - NCR5380_dprint(NDEBUG_PIO, instance); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); - } - - if (NCR5380_poll_politely(instance, - STATUS_REG, SR_REQ, 0, 5 * HZ) < 0) - break; - - dsprintk(NDEBUG_HANDSHAKE, instance, "REQ negated, handshake complete\n"); - -/* - * We have several special cases to consider during REQ/ACK handshaking : - * 1. We were in MSGOUT phase, and we are on the last byte of the - * message. ATN must be dropped as ACK is dropped. - * - * 2. We are in a MSGIN phase, and we are on the last byte of the - * message. We must exit with ACK asserted, so that the calling - * code may raise ATN before dropping ACK to reject the message. - * - * 3. ACK and ATN are clear and the target may proceed as normal. - */ - if (!(p == PHASE_MSGIN && c == 1)) { - if (p == PHASE_MSGOUT && c > 1) - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); - else - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - } - } while (--c); - - dsprintk(NDEBUG_PIO, instance, "residual %d\n", c); - - *count = c; - *data = d; - tmp = NCR5380_read(STATUS_REG); - /* The phase read from the bus is valid if either REQ is (already) - * asserted or if ACK hasn't been released yet. The latter applies if - * we're in MSG IN, DATA IN or STATUS and all bytes have been received. - */ - if ((tmp & SR_REQ) || ((tmp & SR_IO) && c == 0)) - *phase = tmp & PHASE_MASK; - else - *phase = PHASE_UNKNOWN; - - if (!c || (*phase == p)) - return 0; - else - return -1; -} - -/** - * do_reset - issue a reset command - * @instance: adapter to reset - * - * Issue a reset sequence to the NCR5380 and try and get the bus - * back into sane shape. - * - * This clears the reset interrupt flag because there may be no handler for - * it. When the driver is initialized, the NCR5380_intr() handler has not yet - * been installed. And when in EH we may have released the ST DMA interrupt. - */ - -static void do_reset(struct Scsi_Host *instance) -{ - unsigned long flags; - - local_irq_save(flags); - NCR5380_write(TARGET_COMMAND_REG, - PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK)); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST); - udelay(50); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); - local_irq_restore(flags); -} - -/** - * do_abort - abort the currently established nexus by going to - * MESSAGE OUT phase and sending an ABORT message. - * @instance: relevant scsi host instance - * - * Returns 0 on success, -1 on failure. - */ - -static int do_abort(struct Scsi_Host *instance) -{ - unsigned char *msgptr, phase, tmp; - int len; - int rc; - - /* Request message out phase */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); - - /* - * Wait for the target to indicate a valid phase by asserting - * REQ. Once this happens, we'll have either a MSGOUT phase - * and can immediately send the ABORT message, or we'll have some - * other phase and will have to source/sink data. - * - * We really don't care what value was on the bus or what value - * the target sees, so we just handshake. - */ - - rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ); - if (rc < 0) - goto timeout; - - tmp = NCR5380_read(STATUS_REG) & PHASE_MASK; - - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); - - if (tmp != PHASE_MSGOUT) { - NCR5380_write(INITIATOR_COMMAND_REG, - ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); - rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 3 * HZ); - if (rc < 0) - goto timeout; - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); - } - - tmp = ABORT; - msgptr = &tmp; - len = 1; - phase = PHASE_MSGOUT; - NCR5380_transfer_pio(instance, &phase, &len, &msgptr); - - /* - * If we got here, and the command completed successfully, - * we're about to go into bus free state. - */ - - return len ? -1 : 0; - -timeout: - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - return -1; -} - -#if defined(REAL_DMA) -/* - * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, - * unsigned char *phase, int *count, unsigned char **data) - * - * Purpose : transfers data in given phase using either real - * or pseudo DMA. - * - * Inputs : instance - instance of driver, *phase - pointer to - * what phase is expected, *count - pointer to number of - * bytes to transfer, **data - pointer to data pointer. - * - * Returns : -1 when different phase is entered without transferring - * maximum number of bytes, 0 if all bytes or transferred or exit - * is in same phase. - * - * Also, *phase, *count, *data are modified in place. - */ - - -static int NCR5380_transfer_dma(struct Scsi_Host *instance, - unsigned char *phase, int *count, - unsigned char **data) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - register int c = *count; - register unsigned char p = *phase; - -#if defined(CONFIG_SUN3) - /* sanity check */ - if (!sun3_dma_setup_done) { - pr_err("scsi%d: transfer_dma without setup!\n", - instance->host_no); - BUG(); - } - hostdata->dma_len = c; - - dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n", - (p & SR_IO) ? "receive" : "send", c, *data); - - /* netbsd turns off ints here, why not be safe and do it too */ - - /* send start chain */ - sun3scsi_dma_start(c, *data); - - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); - NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | - MR_ENABLE_EOP_INTR); - if (p & SR_IO) { - NCR5380_write(INITIATOR_COMMAND_REG, 0); - NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); - } else { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA); - NCR5380_write(START_DMA_SEND_REG, 0); - } - -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_DMA_ENABLE; -#endif - - sun3_dma_active = 1; - -#else /* !defined(CONFIG_SUN3) */ - register unsigned char *d = *data; - unsigned char tmp; - - if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { - *phase = tmp; - return -1; - } - - if (hostdata->read_overruns && (p & SR_IO)) - c -= hostdata->read_overruns; - - dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n", - (p & SR_IO) ? "receive" : "send", c, d); - - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); - NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY | - MR_ENABLE_EOP_INTR); - - if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) { - /* On the Medusa, it is a must to initialize the DMA before - * starting the NCR. This is also the cleaner way for the TT. - */ - hostdata->dma_len = (p & SR_IO) ? - NCR5380_dma_read_setup(instance, d, c) : - NCR5380_dma_write_setup(instance, d, c); - } - - if (p & SR_IO) - NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); - else { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); - NCR5380_write(START_DMA_SEND_REG, 0); - } - - if (hostdata->flags & FLAG_LATE_DMA_SETUP) { - /* On the Falcon, the DMA setup must be done after the last */ - /* NCR access, else the DMA setup gets trashed! - */ - hostdata->dma_len = (p & SR_IO) ? - NCR5380_dma_read_setup(instance, d, c) : - NCR5380_dma_write_setup(instance, d, c); - } -#endif /* !defined(CONFIG_SUN3) */ - - return 0; -} -#endif /* defined(REAL_DMA) */ - -/* - * Function : NCR5380_information_transfer (struct Scsi_Host *instance) - * - * Purpose : run through the various SCSI phases and do as the target - * directs us to. Operates on the currently connected command, - * instance->connected. - * - * Inputs : instance, instance for which we are doing commands - * - * Side effects : SCSI things happen, the disconnected queue will be - * modified if a command disconnects, *instance->connected will - * change. - * - * XXX Note : we need to watch for bus free or a reset condition here - * to recover from an unexpected bus free condition. - */ - -static void NCR5380_information_transfer(struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - unsigned char msgout = NOP; - int sink = 0; - int len; - int transfersize; - unsigned char *data; - unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; - struct scsi_cmnd *cmd; - -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_INTR; -#endif - - while ((cmd = hostdata->connected)) { - struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd); - - tmp = NCR5380_read(STATUS_REG); - /* We only have a valid SCSI phase when REQ is asserted */ - if (tmp & SR_REQ) { - phase = (tmp & PHASE_MASK); - if (phase != old_phase) { - old_phase = phase; - NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); - } -#if defined(CONFIG_SUN3) - if (phase == PHASE_CMDOUT) { -#if defined(REAL_DMA) - void *d; - unsigned long count; - - if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { - count = cmd->SCp.buffer->length; - d = sg_virt(cmd->SCp.buffer); - } else { - count = cmd->SCp.this_residual; - d = cmd->SCp.ptr; - } - /* this command setup for dma yet? */ - if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != cmd)) { - if (cmd->request->cmd_type == REQ_TYPE_FS) { - sun3scsi_dma_setup(instance, d, count, - rq_data_dir(cmd->request)); - sun3_dma_setup_done = cmd; - } - } -#endif -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_INTR; -#endif - } -#endif /* CONFIG_SUN3 */ - - if (sink && (phase != PHASE_MSGOUT)) { - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | - ICR_ASSERT_ACK); - while (NCR5380_read(STATUS_REG) & SR_REQ) - ; - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_ATN); - sink = 0; - continue; - } - - switch (phase) { - case PHASE_DATAOUT: -#if (NDEBUG & NDEBUG_NO_DATAOUT) - shost_printk(KERN_DEBUG, instance, "NDEBUG_NO_DATAOUT set, attempted DATAOUT aborted\n"); - sink = 1; - do_abort(instance); - cmd->result = DID_ERROR << 16; - complete_cmd(instance, cmd); - hostdata->connected = NULL; - return; -#endif - case PHASE_DATAIN: - /* - * If there is no room left in the current buffer in the - * scatter-gather list, move onto the next one. - */ - - if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { - ++cmd->SCp.buffer; - --cmd->SCp.buffers_residual; - cmd->SCp.this_residual = cmd->SCp.buffer->length; - cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); - merge_contiguous_buffers(cmd); - dsprintk(NDEBUG_INFORMATION, instance, "%d bytes and %d buffers left\n", - cmd->SCp.this_residual, - cmd->SCp.buffers_residual); - } - - /* - * The preferred transfer method is going to be - * PSEUDO-DMA for systems that are strictly PIO, - * since we can let the hardware do the handshaking. - * - * For this to work, we need to know the transfersize - * ahead of time, since the pseudo-DMA code will sit - * in an unconditional loop. - */ - - /* ++roman: I suggest, this should be - * #if def(REAL_DMA) - * instead of leaving REAL_DMA out. - */ - -#if defined(REAL_DMA) -#if !defined(CONFIG_SUN3) - transfersize = 0; - if (!cmd->device->borken) -#endif - transfersize = NCR5380_dma_xfer_len(instance, cmd, phase); - - if (transfersize >= DMA_MIN_SIZE) { - len = transfersize; - cmd->SCp.phase = phase; - if (NCR5380_transfer_dma(instance, &phase, - &len, (unsigned char **)&cmd->SCp.ptr)) { - /* - * If the watchdog timer fires, all future - * accesses to this device will use the - * polled-IO. - */ - scmd_printk(KERN_INFO, cmd, - "switching to slow handshake\n"); - cmd->device->borken = 1; - sink = 1; - do_abort(instance); - cmd->result = DID_ERROR << 16; - /* XXX - need to source or sink data here, as appropriate */ - } else { -#ifdef REAL_DMA - /* ++roman: When using real DMA, - * information_transfer() should return after - * starting DMA since it has nothing more to - * do. - */ - return; -#else - cmd->SCp.this_residual -= transfersize - len; -#endif - } - } else -#endif /* defined(REAL_DMA) */ - { - /* Break up transfer into 3 ms chunks, - * presuming 6 accesses per handshake. - */ - transfersize = min((unsigned long)cmd->SCp.this_residual, - hostdata->accesses_per_ms / 2); - len = transfersize; - NCR5380_transfer_pio(instance, &phase, &len, - (unsigned char **)&cmd->SCp.ptr); - cmd->SCp.this_residual -= transfersize - len; - } -#if defined(CONFIG_SUN3) && defined(REAL_DMA) - /* if we had intended to dma that command clear it */ - if (sun3_dma_setup_done == cmd) - sun3_dma_setup_done = NULL; -#endif - return; - case PHASE_MSGIN: - len = 1; - data = &tmp; - NCR5380_transfer_pio(instance, &phase, &len, &data); - cmd->SCp.Message = tmp; - - switch (tmp) { - case ABORT: - case COMMAND_COMPLETE: - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - dsprintk(NDEBUG_QUEUES, instance, - "COMMAND COMPLETE %p target %d lun %llu\n", - cmd, scmd_id(cmd), cmd->device->lun); - - hostdata->connected = NULL; -#ifdef SUPPORT_TAGS - cmd_free_tag(cmd); - if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { - u8 lun = cmd->device->lun; - struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; - - dsprintk(NDEBUG_TAGS, instance, - "QUEUE_FULL %p target %d lun %d nr_allocated %d\n", - cmd, scmd_id(cmd), lun, ta->nr_allocated); - if (ta->queue_size > ta->nr_allocated) - ta->queue_size = ta->nr_allocated; - } -#endif - - cmd->result &= ~0xffff; - cmd->result |= cmd->SCp.Status; - cmd->result |= cmd->SCp.Message << 8; - - if (cmd->cmnd[0] == REQUEST_SENSE) - complete_cmd(instance, cmd); - else { - if (cmd->SCp.Status == SAM_STAT_CHECK_CONDITION || - cmd->SCp.Status == SAM_STAT_COMMAND_TERMINATED) { - dsprintk(NDEBUG_QUEUES, instance, "autosense: adding cmd %p to tail of autosense queue\n", - cmd); - list_add_tail(&ncmd->list, - &hostdata->autosense); - } else - complete_cmd(instance, cmd); - } - - /* - * Restore phase bits to 0 so an interrupted selection, - * arbitration can resume. - */ - NCR5380_write(TARGET_COMMAND_REG, 0); - - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - - maybe_release_dma_irq(instance); - return; - case MESSAGE_REJECT: - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - switch (hostdata->last_message) { - case HEAD_OF_QUEUE_TAG: - case ORDERED_QUEUE_TAG: - case SIMPLE_QUEUE_TAG: - /* The target obviously doesn't support tagged - * queuing, even though it announced this ability in - * its INQUIRY data ?!? (maybe only this LUN?) Ok, - * clear 'tagged_supported' and lock the LUN, since - * the command is treated as untagged further on. - */ - cmd->device->tagged_supported = 0; - hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); - cmd->tag = TAG_NONE; - dsprintk(NDEBUG_TAGS, instance, "target %d lun %llu rejected QUEUE_TAG message; tagged queuing disabled\n", - scmd_id(cmd), cmd->device->lun); - break; - } - break; - case DISCONNECT: - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - hostdata->connected = NULL; - list_add(&ncmd->list, &hostdata->disconnected); - dsprintk(NDEBUG_INFORMATION | NDEBUG_QUEUES, - instance, "connected command %p for target %d lun %llu moved to disconnected queue\n", - cmd, scmd_id(cmd), cmd->device->lun); - - /* - * Restore phase bits to 0 so an interrupted selection, - * arbitration can resume. - */ - NCR5380_write(TARGET_COMMAND_REG, 0); - - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_DMA_ENABLE; -#endif - return; - /* - * The SCSI data pointer is *IMPLICITLY* saved on a disconnect - * operation, in violation of the SCSI spec so we can safely - * ignore SAVE/RESTORE pointers calls. - * - * Unfortunately, some disks violate the SCSI spec and - * don't issue the required SAVE_POINTERS message before - * disconnecting, and we have to break spec to remain - * compatible. - */ - case SAVE_POINTERS: - case RESTORE_POINTERS: - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - break; - case EXTENDED_MESSAGE: - /* - * Start the message buffer with the EXTENDED_MESSAGE - * byte, since spi_print_msg() wants the whole thing. - */ - extended_msg[0] = EXTENDED_MESSAGE; - /* Accept first byte by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - - spin_unlock_irq(&hostdata->lock); - - dsprintk(NDEBUG_EXTENDED, instance, "receiving extended message\n"); - - len = 2; - data = extended_msg + 1; - phase = PHASE_MSGIN; - NCR5380_transfer_pio(instance, &phase, &len, &data); - dsprintk(NDEBUG_EXTENDED, instance, "length %d, code 0x%02x\n", - (int)extended_msg[1], - (int)extended_msg[2]); - - if (!len && extended_msg[1] > 0 && - extended_msg[1] <= sizeof(extended_msg) - 2) { - /* Accept third byte by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - len = extended_msg[1] - 1; - data = extended_msg + 3; - phase = PHASE_MSGIN; - - NCR5380_transfer_pio(instance, &phase, &len, &data); - dsprintk(NDEBUG_EXTENDED, instance, "message received, residual %d\n", - len); - - switch (extended_msg[2]) { - case EXTENDED_SDTR: - case EXTENDED_WDTR: - case EXTENDED_MODIFY_DATA_POINTER: - case EXTENDED_EXTENDED_IDENTIFY: - tmp = 0; - } - } else if (len) { - shost_printk(KERN_ERR, instance, "error receiving extended message\n"); - tmp = 0; - } else { - shost_printk(KERN_NOTICE, instance, "extended message code %02x length %d is too long\n", - extended_msg[2], extended_msg[1]); - tmp = 0; - } - - spin_lock_irq(&hostdata->lock); - if (!hostdata->connected) - return; - - /* Fall through to reject message */ - - /* - * If we get something weird that we aren't expecting, - * reject it. - */ - default: - if (!tmp) { - shost_printk(KERN_ERR, instance, "rejecting message "); - spi_print_msg(extended_msg); - printk("\n"); - } else if (tmp != EXTENDED_MESSAGE) - scmd_printk(KERN_INFO, cmd, - "rejecting unknown message %02x\n", - tmp); - else - scmd_printk(KERN_INFO, cmd, - "rejecting unknown extended message code %02x, length %d\n", - extended_msg[1], extended_msg[0]); - - msgout = MESSAGE_REJECT; - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); - break; - } /* switch (tmp) */ - break; - case PHASE_MSGOUT: - len = 1; - data = &msgout; - hostdata->last_message = msgout; - NCR5380_transfer_pio(instance, &phase, &len, &data); - if (msgout == ABORT) { - hostdata->connected = NULL; - cmd->result = DID_ERROR << 16; - complete_cmd(instance, cmd); - maybe_release_dma_irq(instance); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return; - } - msgout = NOP; - break; - case PHASE_CMDOUT: - len = cmd->cmd_len; - data = cmd->cmnd; - /* - * XXX for performance reasons, on machines with a - * PSEUDO-DMA architecture we should probably - * use the dma transfer function. - */ - NCR5380_transfer_pio(instance, &phase, &len, &data); - break; - case PHASE_STATIN: - len = 1; - data = &tmp; - NCR5380_transfer_pio(instance, &phase, &len, &data); - cmd->SCp.Status = tmp; - break; - default: - shost_printk(KERN_ERR, instance, "unknown phase\n"); - NCR5380_dprint(NDEBUG_ANY, instance); - } /* switch(phase) */ - } else { - spin_unlock_irq(&hostdata->lock); - NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ); - spin_lock_irq(&hostdata->lock); - } - } -} - -/* - * Function : void NCR5380_reselect (struct Scsi_Host *instance) - * - * Purpose : does reselection, initializing the instance->connected - * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q - * nexus has been reestablished, - * - * Inputs : instance - this instance of the NCR5380. - */ - - -/* it might eventually prove necessary to do a dma setup on - reselection, but it doesn't seem to be needed now -- sam */ - -static void NCR5380_reselect(struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata = shost_priv(instance); - unsigned char target_mask; - unsigned char lun; -#ifdef SUPPORT_TAGS - unsigned char tag; -#endif - unsigned char msg[3]; - int __maybe_unused len; - unsigned char __maybe_unused *data, __maybe_unused phase; - struct NCR5380_cmd *ncmd; - struct scsi_cmnd *tmp; - - /* - * Disable arbitration, etc. since the host adapter obviously - * lost, and tell an interrupted NCR5380_select() to restart. - */ - - NCR5380_write(MODE_REG, MR_BASE); - - target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); - - dsprintk(NDEBUG_RESELECTION, instance, "reselect\n"); - - /* - * At this point, we have detected that our SCSI ID is on the bus, - * SEL is true and BSY was false for at least one bus settle delay - * (400 ns). - * - * We must assert BSY ourselves, until the target drops the SEL - * signal. - */ - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); - if (NCR5380_poll_politely(instance, - STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - return; - } - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - - /* - * Wait for target to go into MSGIN. - */ - - if (NCR5380_poll_politely(instance, - STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) { - do_abort(instance); - return; - } - -#if defined(CONFIG_SUN3) && defined(REAL_DMA) - /* acknowledge toggle to MSGIN */ - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN)); - - /* peek at the byte without really hitting the bus */ - msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG); -#else - len = 1; - data = msg; - phase = PHASE_MSGIN; - NCR5380_transfer_pio(instance, &phase, &len, &data); - - if (len) { - do_abort(instance); - return; - } -#endif - - if (!(msg[0] & 0x80)) { - shost_printk(KERN_ERR, instance, "expecting IDENTIFY message, got "); - spi_print_msg(msg); - printk("\n"); - do_abort(instance); - return; - } - lun = msg[0] & 0x07; - -#if defined(SUPPORT_TAGS) && !defined(CONFIG_SUN3) - /* If the phase is still MSGIN, the target wants to send some more - * messages. In case it supports tagged queuing, this is probably a - * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. - */ - tag = TAG_NONE; - if (phase == PHASE_MSGIN && (hostdata->flags & FLAG_TAGGED_QUEUING)) { - /* Accept previous IDENTIFY message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - len = 2; - data = msg + 1; - if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && - msg[1] == SIMPLE_QUEUE_TAG) - tag = msg[2]; - dsprintk(NDEBUG_TAGS, instance, "reselect: target mask %02x, lun %d sent tag %d\n", - target_mask, lun, tag); - } -#endif - - /* - * Find the command corresponding to the I_T_L or I_T_L_Q nexus we - * just reestablished, and remove it from the disconnected queue. - */ - - tmp = NULL; - list_for_each_entry(ncmd, &hostdata->disconnected, list) { - struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); - - if (target_mask == (1 << scmd_id(cmd)) && - lun == (u8)cmd->device->lun -#ifdef SUPPORT_TAGS - && (tag == cmd->tag) -#endif - ) { - list_del(&ncmd->list); - tmp = cmd; - break; - } - } - - if (tmp) { - dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance, - "reselect: removed %p from disconnected queue\n", tmp); - } else { - -#ifdef SUPPORT_TAGS - shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d tag %d not in disconnected queue.\n", - target_mask, lun, tag); -#else - shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n", - target_mask, lun); -#endif - /* - * Since we have an established nexus that we can't do anything - * with, we must abort it. - */ - do_abort(instance); - return; - } - -#if defined(CONFIG_SUN3) && defined(REAL_DMA) - /* engage dma setup for the command we just saw */ - { - void *d; - unsigned long count; - - if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) { - count = tmp->SCp.buffer->length; - d = sg_virt(tmp->SCp.buffer); - } else { - count = tmp->SCp.this_residual; - d = tmp->SCp.ptr; - } - /* setup this command for dma if not already */ - if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != tmp)) { - sun3scsi_dma_setup(instance, d, count, - rq_data_dir(tmp->request)); - sun3_dma_setup_done = tmp; - } - } - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); -#endif - - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - -#if defined(SUPPORT_TAGS) && defined(CONFIG_SUN3) - /* If the phase is still MSGIN, the target wants to send some more - * messages. In case it supports tagged queuing, this is probably a - * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. - */ - tag = TAG_NONE; - if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { - /* Accept previous IDENTIFY message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - len = 2; - data = msg + 1; - if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && - msg[1] == SIMPLE_QUEUE_TAG) - tag = msg[2]; - dsprintk(NDEBUG_TAGS, instance, "reselect: target mask %02x, lun %d sent tag %d\n" - target_mask, lun, tag); - } -#endif - - hostdata->connected = tmp; - dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu, tag %d\n", - scmd_id(tmp), tmp->device->lun, tmp->tag); -} - - -/** - * list_find_cmd - test for presence of a command in a linked list - * @haystack: list of commands - * @needle: command to search for - */ - -static bool list_find_cmd(struct list_head *haystack, - struct scsi_cmnd *needle) -{ - struct NCR5380_cmd *ncmd; - - list_for_each_entry(ncmd, haystack, list) - if (NCR5380_to_scmd(ncmd) == needle) - return true; - return false; -} - -/** - * list_remove_cmd - remove a command from linked list - * @haystack: list of commands - * @needle: command to remove - */ - -static bool list_del_cmd(struct list_head *haystack, - struct scsi_cmnd *needle) -{ - if (list_find_cmd(haystack, needle)) { - struct NCR5380_cmd *ncmd = scsi_cmd_priv(needle); - - list_del(&ncmd->list); - return true; - } - return false; -} - -/** - * NCR5380_abort - scsi host eh_abort_handler() method - * @cmd: the command to be aborted - * - * Try to abort a given command by removing it from queues and/or sending - * the target an abort message. This may not succeed in causing a target - * to abort the command. Nonetheless, the low-level driver must forget about - * the command because the mid-layer reclaims it and it may be re-issued. - * - * The normal path taken by a command is as follows. For EH we trace this - * same path to locate and abort the command. - * - * unissued -> selecting -> [unissued -> selecting ->]... connected -> - * [disconnected -> connected ->]... - * [autosense -> connected ->] done - * - * If cmd was not found at all then presumably it has already been completed, - * in which case return SUCCESS to try to avoid further EH measures. - * - * If the command has not completed yet, we must not fail to find it. - * We have no option but to forget the aborted command (even if it still - * lacks sense data). The mid-layer may re-issue a command that is in error - * recovery (see scsi_send_eh_cmnd), but the logic and data structures in - * this driver are such that a command can appear on one queue only. - * - * The lock protects driver data structures, but EH handlers also use it - * to serialize their own execution and prevent their own re-entry. - */ - -static int NCR5380_abort(struct scsi_cmnd *cmd) -{ - struct Scsi_Host *instance = cmd->device->host; - struct NCR5380_hostdata *hostdata = shost_priv(instance); - unsigned long flags; - int result = SUCCESS; - - spin_lock_irqsave(&hostdata->lock, flags); - -#if (NDEBUG & NDEBUG_ANY) - scmd_printk(KERN_INFO, cmd, __func__); -#endif - NCR5380_dprint(NDEBUG_ANY, instance); - NCR5380_dprint_phase(NDEBUG_ANY, instance); - - if (list_del_cmd(&hostdata->unissued, cmd)) { - dsprintk(NDEBUG_ABORT, instance, - "abort: removed %p from issue queue\n", cmd); - cmd->result = DID_ABORT << 16; - cmd->scsi_done(cmd); /* No tag or busy flag to worry about */ - goto out; - } - - if (hostdata->selecting == cmd) { - dsprintk(NDEBUG_ABORT, instance, - "abort: cmd %p == selecting\n", cmd); - hostdata->selecting = NULL; - cmd->result = DID_ABORT << 16; - complete_cmd(instance, cmd); - goto out; - } - - if (list_del_cmd(&hostdata->disconnected, cmd)) { - dsprintk(NDEBUG_ABORT, instance, - "abort: removed %p from disconnected list\n", cmd); - /* Can't call NCR5380_select() and send ABORT because that - * means releasing the lock. Need a bus reset. - */ - set_host_byte(cmd, DID_ERROR); - complete_cmd(instance, cmd); - result = FAILED; - goto out; - } - - if (hostdata->connected == cmd) { - dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd); - hostdata->connected = NULL; -#ifdef REAL_DMA - hostdata->dma_len = 0; -#endif - if (do_abort(instance)) { - set_host_byte(cmd, DID_ERROR); - complete_cmd(instance, cmd); - result = FAILED; - goto out; - } - set_host_byte(cmd, DID_ABORT); - complete_cmd(instance, cmd); - goto out; - } - - if (list_del_cmd(&hostdata->autosense, cmd)) { - dsprintk(NDEBUG_ABORT, instance, - "abort: removed %p from sense queue\n", cmd); - set_host_byte(cmd, DID_ERROR); - complete_cmd(instance, cmd); - } - -out: - if (result == FAILED) - dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd); - else - dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd); - - queue_work(hostdata->work_q, &hostdata->main_task); - maybe_release_dma_irq(instance); - spin_unlock_irqrestore(&hostdata->lock, flags); - - return result; -} - - -/** - * NCR5380_bus_reset - reset the SCSI bus - * @cmd: SCSI command undergoing EH - * - * Returns SUCCESS - */ - -static int NCR5380_bus_reset(struct scsi_cmnd *cmd) -{ - struct Scsi_Host *instance = cmd->device->host; - struct NCR5380_hostdata *hostdata = shost_priv(instance); - int i; - unsigned long flags; - struct NCR5380_cmd *ncmd; - - spin_lock_irqsave(&hostdata->lock, flags); - -#if (NDEBUG & NDEBUG_ANY) - scmd_printk(KERN_INFO, cmd, __func__); -#endif - NCR5380_dprint(NDEBUG_ANY, instance); - NCR5380_dprint_phase(NDEBUG_ANY, instance); - - do_reset(instance); - - /* reset NCR registers */ - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(TARGET_COMMAND_REG, 0); - NCR5380_write(SELECT_ENABLE_REG, 0); - - /* After the reset, there are no more connected or disconnected commands - * and no busy units; so clear the low-level status here to avoid - * conflicts when the mid-level code tries to wake up the affected - * commands! - */ - - if (list_del_cmd(&hostdata->unissued, cmd)) { - cmd->result = DID_RESET << 16; - cmd->scsi_done(cmd); - } - - if (hostdata->selecting) { - hostdata->selecting->result = DID_RESET << 16; - complete_cmd(instance, hostdata->selecting); - hostdata->selecting = NULL; - } - - list_for_each_entry(ncmd, &hostdata->disconnected, list) { - struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); - - set_host_byte(cmd, DID_RESET); - cmd->scsi_done(cmd); - } - INIT_LIST_HEAD(&hostdata->disconnected); - - list_for_each_entry(ncmd, &hostdata->autosense, list) { - struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); - - set_host_byte(cmd, DID_RESET); - cmd->scsi_done(cmd); - } - INIT_LIST_HEAD(&hostdata->autosense); - - if (hostdata->connected) { - set_host_byte(hostdata->connected, DID_RESET); - complete_cmd(instance, hostdata->connected); - hostdata->connected = NULL; - } - -#ifdef SUPPORT_TAGS - free_all_tags(hostdata); -#endif - for (i = 0; i < 8; ++i) - hostdata->busy[i] = 0; -#ifdef REAL_DMA - hostdata->dma_len = 0; -#endif - - queue_work(hostdata->work_q, &hostdata->main_task); - maybe_release_dma_irq(instance); - spin_unlock_irqrestore(&hostdata->lock, flags); - - return SUCCESS; -} diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index 78d1b2963f2c..a59ad94ea52b 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -14,55 +14,23 @@ * */ - -/**************************************************************************/ -/* */ -/* Notes for Falcon SCSI: */ -/* ---------------------- */ -/* */ -/* Since the Falcon SCSI uses the ST-DMA chip, that is shared among */ -/* several device drivers, locking and unlocking the access to this */ -/* chip is required. But locking is not possible from an interrupt, */ -/* since it puts the process to sleep if the lock is not available. */ -/* This prevents "late" locking of the DMA chip, i.e. locking it just */ -/* before using it, since in case of disconnection-reconnection */ -/* commands, the DMA is started from the reselection interrupt. */ -/* */ -/* Two possible schemes for ST-DMA-locking would be: */ -/* 1) The lock is taken for each command separately and disconnecting */ -/* is forbidden (i.e. can_queue = 1). */ -/* 2) The DMA chip is locked when the first command comes in and */ -/* released when the last command is finished and all queues are */ -/* empty. */ -/* The first alternative would result in bad performance, since the */ -/* interleaving of commands would not be used. The second is unfair to */ -/* other drivers using the ST-DMA, because the queues will seldom be */ -/* totally empty if there is a lot of disk traffic. */ -/* */ -/* For this reasons I decided to employ a more elaborate scheme: */ -/* - First, we give up the lock every time we can (for fairness), this */ -/* means every time a command finishes and there are no other commands */ -/* on the disconnected queue. */ -/* - If there are others waiting to lock the DMA chip, we stop */ -/* issuing commands, i.e. moving them onto the issue queue. */ -/* Because of that, the disconnected queue will run empty in a */ -/* while. Instead we go to sleep on a 'fairness_queue'. */ -/* - If the lock is released, all processes waiting on the fairness */ -/* queue will be woken. The first of them tries to re-lock the DMA, */ -/* the others wait for the first to finish this task. After that, */ -/* they can all run on and do their commands... */ -/* This sounds complicated (and it is it :-(), but it seems to be a */ -/* good compromise between fairness and performance: As long as no one */ -/* else wants to work with the ST-DMA chip, SCSI can go along as */ -/* usual. If now someone else comes, this behaviour is changed to a */ -/* "fairness mode": just already initiated commands are finished and */ -/* then the lock is released. The other one waiting will probably win */ -/* the race for locking the DMA, since it was waiting for longer. And */ -/* after it has finished, SCSI can go ahead again. Finally: I hope I */ -/* have not produced any deadlock possibilities! */ -/* */ -/**************************************************************************/ - +/* + * Notes for Falcon SCSI DMA + * + * The 5380 device is one of several that all share the DMA chip. Hence + * "locking" and "unlocking" access to this chip is required. + * + * Two possible schemes for ST DMA acquisition by atari_scsi are: + * 1) The lock is taken for each command separately (i.e. can_queue == 1). + * 2) The lock is taken when the first command arrives and released + * when the last command is finished (i.e. can_queue > 1). + * + * The first alternative limits SCSI bus utilization, since interleaving + * commands is not possible. The second gives better performance but is + * unfair to other drivers needing to use the ST DMA chip. In order to + * allow the IDE and floppy drivers equal access to the ST DMA chip + * the default is can_queue == 1. + */ #include <linux/module.h> #include <linux/types.h> @@ -83,13 +51,10 @@ #include <scsi/scsi_host.h> -/* Definitions for the core NCR5380 driver. */ - -#define REAL_DMA -#define SUPPORT_TAGS -#define MAX_TAGS 32 #define DMA_MIN_SIZE 32 +/* Definitions for the core NCR5380 driver. */ + #define NCR5380_implementation_fields /* none */ #define NCR5380_read(reg) atari_scsi_reg_read(reg) @@ -99,9 +64,9 @@ #define NCR5380_abort atari_scsi_abort #define NCR5380_info atari_scsi_info -#define NCR5380_dma_read_setup(instance, data, count) \ +#define NCR5380_dma_recv_setup(instance, data, count) \ atari_scsi_dma_setup(instance, data, count, 0) -#define NCR5380_dma_write_setup(instance, data, count) \ +#define NCR5380_dma_send_setup(instance, data, count) \ atari_scsi_dma_setup(instance, data, count, 1) #define NCR5380_dma_residual(instance) \ atari_scsi_dma_residual(instance) @@ -159,14 +124,11 @@ static inline unsigned long SCSI_DMA_GETADR(void) return adr; } -#ifdef REAL_DMA static void atari_scsi_fetch_restbytes(void); -#endif static unsigned char (*atari_scsi_reg_read)(unsigned char reg); static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value); -#ifdef REAL_DMA static unsigned long atari_dma_residual, atari_dma_startaddr; static short atari_dma_active; /* pointer to the dribble buffer */ @@ -185,7 +147,6 @@ static char *atari_dma_orig_addr; /* mask for address bits that can't be used with the ST-DMA */ static unsigned long atari_dma_stram_mask; #define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0) -#endif static int setup_can_queue = -1; module_param(setup_can_queue, int, 0); @@ -193,16 +154,12 @@ static int setup_cmd_per_lun = -1; module_param(setup_cmd_per_lun, int, 0); static int setup_sg_tablesize = -1; module_param(setup_sg_tablesize, int, 0); -static int setup_use_tagged_queuing = -1; -module_param(setup_use_tagged_queuing, int, 0); static int setup_hostid = -1; module_param(setup_hostid, int, 0); static int setup_toshiba_delay = -1; module_param(setup_toshiba_delay, int, 0); -#if defined(REAL_DMA) - static int scsi_dma_is_ignored_buserr(unsigned char dma_stat) { int i; @@ -255,12 +212,9 @@ static void scsi_dma_buserr(int irq, void *dummy) } #endif -#endif - static irqreturn_t scsi_tt_intr(int irq, void *dev) { -#ifdef REAL_DMA struct Scsi_Host *instance = dev; struct NCR5380_hostdata *hostdata = shost_priv(instance); int dma_stat; @@ -342,8 +296,6 @@ static irqreturn_t scsi_tt_intr(int irq, void *dev) tt_scsi_dma.dma_ctrl = 0; } -#endif /* REAL_DMA */ - NCR5380_intr(irq, dev); return IRQ_HANDLED; @@ -352,7 +304,6 @@ static irqreturn_t scsi_tt_intr(int irq, void *dev) static irqreturn_t scsi_falcon_intr(int irq, void *dev) { -#ifdef REAL_DMA struct Scsi_Host *instance = dev; struct NCR5380_hostdata *hostdata = shost_priv(instance); int dma_stat; @@ -405,15 +356,12 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dev) atari_dma_orig_addr = NULL; } -#endif /* REAL_DMA */ - NCR5380_intr(irq, dev); return IRQ_HANDLED; } -#ifdef REAL_DMA static void atari_scsi_fetch_restbytes(void) { int nr; @@ -436,7 +384,6 @@ static void atari_scsi_fetch_restbytes(void) *dst++ = *src++; } } -#endif /* REAL_DMA */ /* This function releases the lock on the DMA chip if there is no @@ -464,6 +411,10 @@ static int falcon_get_lock(struct Scsi_Host *instance) if (IS_A_TT()) return 1; + if (stdma_is_locked_by(scsi_falcon_intr) && + instance->hostt->can_queue > 1) + return 1; + if (in_interrupt()) return stdma_try_lock(scsi_falcon_intr, instance); @@ -495,8 +446,7 @@ static int __init atari_scsi_setup(char *str) setup_sg_tablesize = ints[3]; if (ints[0] >= 4) setup_hostid = ints[4]; - if (ints[0] >= 5) - setup_use_tagged_queuing = ints[5]; + /* ints[5] (use_tagged_queuing) is ignored */ /* ints[6] (use_pdma) is ignored */ if (ints[0] >= 7) setup_toshiba_delay = ints[7]; @@ -508,8 +458,6 @@ __setup("atascsi=", atari_scsi_setup); #endif /* !MODULE */ -#if defined(REAL_DMA) - static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, void *data, unsigned long count, int dir) @@ -545,9 +493,6 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, */ dma_cache_maintenance(addr, count, dir); - if (count == 0) - printk(KERN_NOTICE "SCSI warning: DMA programmed for 0 bytes !\n"); - if (IS_A_TT()) { tt_scsi_dma.dma_ctrl = dir; SCSI_DMA_WRITE_P(dma_addr, addr); @@ -624,6 +569,9 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len, { unsigned long possible_len, limit; + if (wanted_len < DMA_MIN_SIZE) + return 0; + if (IS_A_TT()) /* TT SCSI DMA can transfer arbitrary #bytes */ return wanted_len; @@ -703,9 +651,6 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len, } -#endif /* REAL_DMA */ - - /* NCR5380 register access functions * * There are separate functions for TT and Falcon, because the access @@ -736,7 +681,7 @@ static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value) } -#include "atari_NCR5380.c" +#include "NCR5380.c" static int atari_scsi_bus_reset(struct scsi_cmnd *cmd) { @@ -745,7 +690,6 @@ static int atari_scsi_bus_reset(struct scsi_cmnd *cmd) local_irq_save(flags); -#ifdef REAL_DMA /* Abort a maybe active DMA transfer */ if (IS_A_TT()) { tt_scsi_dma.dma_ctrl = 0; @@ -754,7 +698,6 @@ static int atari_scsi_bus_reset(struct scsi_cmnd *cmd) atari_dma_active = 0; atari_dma_orig_addr = NULL; } -#endif rv = NCR5380_bus_reset(cmd); @@ -781,6 +724,7 @@ static struct scsi_host_template atari_scsi_template = { .eh_abort_handler = atari_scsi_abort, .eh_bus_reset_handler = atari_scsi_bus_reset, .this_id = 7, + .cmd_per_lun = 2, .use_clustering = DISABLE_CLUSTERING, .cmd_size = NCR5380_CMD_SIZE, }; @@ -804,24 +748,11 @@ static int __init atari_scsi_probe(struct platform_device *pdev) atari_scsi_reg_write = atari_scsi_falcon_reg_write; } - /* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary. - * Higher values should work, too; try it! - * (But cmd_per_lun costs memory!) - * - * But there seems to be a bug somewhere that requires CAN_QUEUE to be - * 2*CMD_PER_LUN. At least on a TT, no spurious timeouts seen since - * changed CMD_PER_LUN... - * - * Note: The Falcon currently uses 8/1 setting due to unsolved problems - * with cmd_per_lun != 1 - */ if (ATARIHW_PRESENT(TT_SCSI)) { atari_scsi_template.can_queue = 16; - atari_scsi_template.cmd_per_lun = 8; atari_scsi_template.sg_tablesize = SG_ALL; } else { - atari_scsi_template.can_queue = 8; - atari_scsi_template.cmd_per_lun = 1; + atari_scsi_template.can_queue = 1; atari_scsi_template.sg_tablesize = SG_NONE; } @@ -850,8 +781,6 @@ static int __init atari_scsi_probe(struct platform_device *pdev) } } - -#ifdef REAL_DMA /* If running on a Falcon and if there's TT-Ram (i.e., more than one * memory block, since there's always ST-Ram in a Falcon), then * allocate a STRAM_BUFFER_SIZE byte dribble buffer for transfers @@ -867,7 +796,6 @@ static int __init atari_scsi_probe(struct platform_device *pdev) atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer); atari_dma_orig_addr = 0; } -#endif instance = scsi_host_alloc(&atari_scsi_template, sizeof(struct NCR5380_hostdata)); @@ -879,9 +807,6 @@ static int __init atari_scsi_probe(struct platform_device *pdev) instance->irq = irq->start; host_flags |= IS_A_TT() ? 0 : FLAG_LATE_DMA_SETUP; -#ifdef SUPPORT_TAGS - host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0; -#endif host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; error = NCR5380_init(instance, host_flags); @@ -897,7 +822,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev) goto fail_irq; } tt_mfp.active_edge |= 0x80; /* SCSI int on L->H */ -#ifdef REAL_DMA + tt_scsi_dma.dma_ctrl = 0; atari_dma_residual = 0; @@ -919,17 +844,14 @@ static int __init atari_scsi_probe(struct platform_device *pdev) hostdata->read_overruns = 4; } -#endif } else { /* Nothing to do for the interrupt: the ST-DMA is initialized * already. */ -#ifdef REAL_DMA atari_dma_residual = 0; atari_dma_active = 0; atari_dma_stram_mask = (ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 : 0xff000000); -#endif } NCR5380_maybe_reset_bus(instance); diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index 06dc215ea050..0f797a55d504 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h @@ -874,8 +874,8 @@ bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad, /* * itnim callbacks */ -void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, - struct bfad_itnim_s **itnim_drv); +int bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, + struct bfad_itnim_s **itnim_drv); void bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv); void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv); diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c index 4f089d76afb1..2e3b19e7e079 100644 --- a/drivers/scsi/bfa/bfa_fcs_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c @@ -588,12 +588,13 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport) struct bfa_fcs_lport_s *port = rport->port; struct bfa_fcs_itnim_s *itnim; struct bfad_itnim_s *itnim_drv; + int ret; /* * call bfad to allocate the itnim */ - bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv); - if (itnim == NULL) { + ret = bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv); + if (ret) { bfa_trc(port->fcs, rport->pwwn); return NULL; } diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 6c805e13f8dd..02d806012fa1 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -440,13 +440,13 @@ bfad_im_slave_destroy(struct scsi_device *sdev) * BFA FCS itnim alloc callback, after successful PRLI * Context: Interrupt */ -void +int bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, struct bfad_itnim_s **itnim_drv) { *itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC); if (*itnim_drv == NULL) - return; + return -ENOMEM; (*itnim_drv)->im = bfad->im; *itnim = &(*itnim_drv)->fcs_itnim; @@ -457,6 +457,7 @@ bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, */ INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler); bfad->bfad_flags |= BFAD_RPORT_ONLINE; + return 0; } /* diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 499e369eabf0..fdd4eb4e41b2 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -65,7 +65,7 @@ #include "bnx2fc_constants.h" #define BNX2FC_NAME "bnx2fc" -#define BNX2FC_VERSION "2.9.6" +#define BNX2FC_VERSION "2.10.3" #define PFX "bnx2fc: " @@ -261,6 +261,7 @@ struct bnx2fc_interface { u8 vlan_enabled; int vlan_id; bool enabled; + u8 tm_timeout; }; #define bnx2fc_from_ctlr(x) \ diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index d7029ea5d319..a1881993982c 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -107,6 +107,26 @@ MODULE_PARM_DESC(debug_logging, "\t\t0x10 - fcoe L2 fame related logs.\n" "\t\t0xff - LOG all messages."); +uint bnx2fc_devloss_tmo; +module_param_named(devloss_tmo, bnx2fc_devloss_tmo, uint, S_IRUGO); +MODULE_PARM_DESC(devloss_tmo, " Change devloss_tmo for the remote ports " + "attached via bnx2fc."); + +uint bnx2fc_max_luns = BNX2FC_MAX_LUN; +module_param_named(max_luns, bnx2fc_max_luns, uint, S_IRUGO); +MODULE_PARM_DESC(max_luns, " Change the default max_lun per SCSI host. Default " + "0xffff."); + +uint bnx2fc_queue_depth; +module_param_named(queue_depth, bnx2fc_queue_depth, uint, S_IRUGO); +MODULE_PARM_DESC(queue_depth, " Change the default queue depth of SCSI devices " + "attached via bnx2fc."); + +uint bnx2fc_log_fka; +module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is " + "initiating a FIP keep alive when debug logging is enabled."); + static int bnx2fc_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu); /* notification function for CPU hotplug events */ @@ -692,7 +712,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) int rc = 0; shost->max_cmd_len = BNX2FC_MAX_CMD_LEN; - shost->max_lun = BNX2FC_MAX_LUN; + shost->max_lun = bnx2fc_max_luns; shost->max_id = BNX2FC_MAX_FCP_TGT; shost->max_channel = 0; if (lport->vport) @@ -1061,6 +1081,20 @@ static u8 *bnx2fc_get_src_mac(struct fc_lport *lport) */ static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) { + struct fip_header *fiph; + struct ethhdr *eth_hdr; + u16 op; + u8 sub; + + fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2); + eth_hdr = (struct ethhdr *)skb_mac_header(skb); + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (op == FIP_OP_CTRL && sub == FIP_SC_SOL && bnx2fc_log_fka) + BNX2FC_MISC_DBG("Sending FKA from %pM to %pM.\n", + eth_hdr->h_source, eth_hdr->h_dest); + skb->dev = bnx2fc_from_ctlr(fip)->netdev; dev_queue_xmit(skb); } @@ -1102,6 +1136,9 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled) return -EIO; } + if (bnx2fc_devloss_tmo) + fc_host_dev_loss_tmo(vn_port->host) = bnx2fc_devloss_tmo; + if (disabled) { fc_vport_set_state(vport, FC_VPORT_DISABLED); } else { @@ -1495,6 +1532,9 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, } fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; + if (bnx2fc_devloss_tmo) + fc_host_dev_loss_tmo(shost) = bnx2fc_devloss_tmo; + /* Allocate exchange manager */ if (!npiv) rc = bnx2fc_em_config(lport, hba); @@ -1999,6 +2039,8 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev) return; } + pr_info(PFX "FCoE initialized for %s.\n", dev->netdev->name); + /* Add HBA to the adapter list */ mutex_lock(&bnx2fc_dev_lock); list_add_tail(&hba->list, &adapter_list); @@ -2293,6 +2335,7 @@ static int _bnx2fc_create(struct net_device *netdev, ctlr = bnx2fc_to_ctlr(interface); cdev = fcoe_ctlr_to_ctlr_dev(ctlr); interface->vlan_id = vlan_id; + interface->tm_timeout = BNX2FC_TM_TIMEOUT; interface->timer_work_queue = create_singlethread_workqueue("bnx2fc_timer_wq"); @@ -2612,6 +2655,15 @@ static int bnx2fc_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } +static int bnx2fc_slave_configure(struct scsi_device *sdev) +{ + if (!bnx2fc_queue_depth) + return 0; + + scsi_change_queue_depth(sdev, bnx2fc_queue_depth); + return 0; +} + /** * bnx2fc_mod_init - module init entry point * @@ -2858,6 +2910,50 @@ static struct fc_function_template bnx2fc_vport_xport_function = { .bsg_request = fc_lport_bsg_request, }; +/* + * Additional scsi_host attributes. + */ +static ssize_t +bnx2fc_tm_timeout_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct fc_lport *lport = shost_priv(shost); + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + + sprintf(buf, "%u\n", interface->tm_timeout); + return strlen(buf); +} + +static ssize_t +bnx2fc_tm_timeout_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct fc_lport *lport = shost_priv(shost); + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + int rval, val; + + rval = kstrtouint(buf, 10, &val); + if (rval) + return rval; + if (val > 255) + return -ERANGE; + + interface->tm_timeout = (u8)val; + return strlen(buf); +} + +static DEVICE_ATTR(tm_timeout, S_IRUGO|S_IWUSR, bnx2fc_tm_timeout_show, + bnx2fc_tm_timeout_store); + +static struct device_attribute *bnx2fc_host_attrs[] = { + &dev_attr_tm_timeout, + NULL, +}; + /** * scsi_host_template structure used while registering with SCSI-ml */ @@ -2877,6 +2973,8 @@ static struct scsi_host_template bnx2fc_shost_template = { .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, .max_sectors = 1024, .track_queue_depth = 1, + .slave_configure = bnx2fc_slave_configure, + .shost_attrs = bnx2fc_host_attrs, }; static struct libfc_function_template bnx2fc_libfc_fcn_templ = { diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 2230dab67ca5..026f394a3851 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -179,12 +179,24 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code) bnx2fc_unmap_sg_list(io_req); io_req->sc_cmd = NULL; + + /* Sanity checks before returning command to mid-layer */ if (!sc_cmd) { printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. " "IO(0x%x) already cleaned up\n", io_req->xid); return; } + if (!sc_cmd->device) { + pr_err(PFX "0x%x: sc_cmd->device is NULL.\n", io_req->xid); + return; + } + if (!sc_cmd->device->host) { + pr_err(PFX "0x%x: sc_cmd->device->host is NULL.\n", + io_req->xid); + return; + } + sc_cmd->result = err_code << 16; BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n", @@ -770,7 +782,7 @@ retry_tmf: spin_unlock_bh(&tgt->tgt_lock); rc = wait_for_completion_timeout(&io_req->tm_done, - BNX2FC_TM_TIMEOUT * HZ); + interface->tm_timeout * HZ); spin_lock_bh(&tgt->tgt_lock); io_req->wait_for_comp = 0; diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 72894378ffcf..133901fd3e35 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -675,7 +675,7 @@ bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid) { struct list_head *list; struct list_head *tmp; - struct bnx2i_endpoint *ep; + struct bnx2i_endpoint *ep = NULL; read_lock_bh(&hba->ep_rdwr_lock); list_for_each_safe(list, tmp, &hba->ep_ofld_list) { @@ -703,7 +703,7 @@ bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid) { struct list_head *list; struct list_head *tmp; - struct bnx2i_endpoint *ep; + struct bnx2i_endpoint *ep = NULL; read_lock_bh(&hba->ep_rdwr_lock); list_for_each_safe(list, tmp, &hba->ep_destroy_list) { diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index fa09d4be2b53..83458f7a2824 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c @@ -292,850 +292,30 @@ bool scsi_opcode_sa_name(int opcode, int service_action, struct error_info { unsigned short code12; /* 0x0302 looks better than 0x03,0x02 */ - const char * text; + unsigned short size; }; /* - * The canonical list of T10 Additional Sense Codes is available at: - * http://www.t10.org/lists/asc-num.txt [most recent: 20141221] + * There are 700+ entries in this table. To save space, we don't store + * (code, pointer) pairs, which would make sizeof(struct + * error_info)==16 on 64 bits. Rather, the second element just stores + * the size (including \0) of the corresponding string, and we use the + * sum of these to get the appropriate offset into additional_text + * defined below. This approach saves 12 bytes per entry. */ - static const struct error_info additional[] = { - {0x0000, "No additional sense information"}, - {0x0001, "Filemark detected"}, - {0x0002, "End-of-partition/medium detected"}, - {0x0003, "Setmark detected"}, - {0x0004, "Beginning-of-partition/medium detected"}, - {0x0005, "End-of-data detected"}, - {0x0006, "I/O process terminated"}, - {0x0007, "Programmable early warning detected"}, - {0x0011, "Audio play operation in progress"}, - {0x0012, "Audio play operation paused"}, - {0x0013, "Audio play operation successfully completed"}, - {0x0014, "Audio play operation stopped due to error"}, - {0x0015, "No current audio status to return"}, - {0x0016, "Operation in progress"}, - {0x0017, "Cleaning requested"}, - {0x0018, "Erase operation in progress"}, - {0x0019, "Locate operation in progress"}, - {0x001A, "Rewind operation in progress"}, - {0x001B, "Set capacity operation in progress"}, - {0x001C, "Verify operation in progress"}, - {0x001D, "ATA pass through information available"}, - {0x001E, "Conflicting SA creation request"}, - {0x001F, "Logical unit transitioning to another power condition"}, - {0x0020, "Extended copy information available"}, - {0x0021, "Atomic command aborted due to ACA"}, - - {0x0100, "No index/sector signal"}, - - {0x0200, "No seek complete"}, - - {0x0300, "Peripheral device write fault"}, - {0x0301, "No write current"}, - {0x0302, "Excessive write errors"}, - - {0x0400, "Logical unit not ready, cause not reportable"}, - {0x0401, "Logical unit is in process of becoming ready"}, - {0x0402, "Logical unit not ready, initializing command required"}, - {0x0403, "Logical unit not ready, manual intervention required"}, - {0x0404, "Logical unit not ready, format in progress"}, - {0x0405, "Logical unit not ready, rebuild in progress"}, - {0x0406, "Logical unit not ready, recalculation in progress"}, - {0x0407, "Logical unit not ready, operation in progress"}, - {0x0408, "Logical unit not ready, long write in progress"}, - {0x0409, "Logical unit not ready, self-test in progress"}, - {0x040A, "Logical unit not accessible, asymmetric access state " - "transition"}, - {0x040B, "Logical unit not accessible, target port in standby state"}, - {0x040C, "Logical unit not accessible, target port in unavailable " - "state"}, - {0x040D, "Logical unit not ready, structure check required"}, - {0x040E, "Logical unit not ready, security session in progress"}, - {0x0410, "Logical unit not ready, auxiliary memory not accessible"}, - {0x0411, "Logical unit not ready, notify (enable spinup) required"}, - {0x0412, "Logical unit not ready, offline"}, - {0x0413, "Logical unit not ready, SA creation in progress"}, - {0x0414, "Logical unit not ready, space allocation in progress"}, - {0x0415, "Logical unit not ready, robotics disabled"}, - {0x0416, "Logical unit not ready, configuration required"}, - {0x0417, "Logical unit not ready, calibration required"}, - {0x0418, "Logical unit not ready, a door is open"}, - {0x0419, "Logical unit not ready, operating in sequential mode"}, - {0x041A, "Logical unit not ready, start stop unit command in " - "progress"}, - {0x041B, "Logical unit not ready, sanitize in progress"}, - {0x041C, "Logical unit not ready, additional power use not yet " - "granted"}, - {0x041D, "Logical unit not ready, configuration in progress"}, - {0x041E, "Logical unit not ready, microcode activation required"}, - {0x041F, "Logical unit not ready, microcode download required"}, - {0x0420, "Logical unit not ready, logical unit reset required"}, - {0x0421, "Logical unit not ready, hard reset required"}, - {0x0422, "Logical unit not ready, power cycle required"}, - - {0x0500, "Logical unit does not respond to selection"}, - - {0x0600, "No reference position found"}, - - {0x0700, "Multiple peripheral devices selected"}, - - {0x0800, "Logical unit communication failure"}, - {0x0801, "Logical unit communication time-out"}, - {0x0802, "Logical unit communication parity error"}, - {0x0803, "Logical unit communication CRC error (Ultra-DMA/32)"}, - {0x0804, "Unreachable copy target"}, - - {0x0900, "Track following error"}, - {0x0901, "Tracking servo failure"}, - {0x0902, "Focus servo failure"}, - {0x0903, "Spindle servo failure"}, - {0x0904, "Head select fault"}, - {0x0905, "Vibration induced tracking error"}, - - {0x0A00, "Error log overflow"}, - - {0x0B00, "Warning"}, - {0x0B01, "Warning - specified temperature exceeded"}, - {0x0B02, "Warning - enclosure degraded"}, - {0x0B03, "Warning - background self-test failed"}, - {0x0B04, "Warning - background pre-scan detected medium error"}, - {0x0B05, "Warning - background medium scan detected medium error"}, - {0x0B06, "Warning - non-volatile cache now volatile"}, - {0x0B07, "Warning - degraded power to non-volatile cache"}, - {0x0B08, "Warning - power loss expected"}, - {0x0B09, "Warning - device statistics notification active"}, - - {0x0C00, "Write error"}, - {0x0C01, "Write error - recovered with auto reallocation"}, - {0x0C02, "Write error - auto reallocation failed"}, - {0x0C03, "Write error - recommend reassignment"}, - {0x0C04, "Compression check miscompare error"}, - {0x0C05, "Data expansion occurred during compression"}, - {0x0C06, "Block not compressible"}, - {0x0C07, "Write error - recovery needed"}, - {0x0C08, "Write error - recovery failed"}, - {0x0C09, "Write error - loss of streaming"}, - {0x0C0A, "Write error - padding blocks added"}, - {0x0C0B, "Auxiliary memory write error"}, - {0x0C0C, "Write error - unexpected unsolicited data"}, - {0x0C0D, "Write error - not enough unsolicited data"}, - {0x0C0E, "Multiple write errors"}, - {0x0C0F, "Defects in error window"}, - {0x0C10, "Incomplete multiple atomic write operations"}, - - {0x0D00, "Error detected by third party temporary initiator"}, - {0x0D01, "Third party device failure"}, - {0x0D02, "Copy target device not reachable"}, - {0x0D03, "Incorrect copy target device type"}, - {0x0D04, "Copy target device data underrun"}, - {0x0D05, "Copy target device data overrun"}, - - {0x0E00, "Invalid information unit"}, - {0x0E01, "Information unit too short"}, - {0x0E02, "Information unit too long"}, - {0x0E03, "Invalid field in command information unit"}, - - {0x1000, "Id CRC or ECC error"}, - {0x1001, "Logical block guard check failed"}, - {0x1002, "Logical block application tag check failed"}, - {0x1003, "Logical block reference tag check failed"}, - {0x1004, "Logical block protection error on recover buffered data"}, - {0x1005, "Logical block protection method error"}, - - {0x1100, "Unrecovered read error"}, - {0x1101, "Read retries exhausted"}, - {0x1102, "Error too long to correct"}, - {0x1103, "Multiple read errors"}, - {0x1104, "Unrecovered read error - auto reallocate failed"}, - {0x1105, "L-EC uncorrectable error"}, - {0x1106, "CIRC unrecovered error"}, - {0x1107, "Data re-synchronization error"}, - {0x1108, "Incomplete block read"}, - {0x1109, "No gap found"}, - {0x110A, "Miscorrected error"}, - {0x110B, "Unrecovered read error - recommend reassignment"}, - {0x110C, "Unrecovered read error - recommend rewrite the data"}, - {0x110D, "De-compression CRC error"}, - {0x110E, "Cannot decompress using declared algorithm"}, - {0x110F, "Error reading UPC/EAN number"}, - {0x1110, "Error reading ISRC number"}, - {0x1111, "Read error - loss of streaming"}, - {0x1112, "Auxiliary memory read error"}, - {0x1113, "Read error - failed retransmission request"}, - {0x1114, "Read error - lba marked bad by application client"}, - {0x1115, "Write after sanitize required"}, - - {0x1200, "Address mark not found for id field"}, - - {0x1300, "Address mark not found for data field"}, - - {0x1400, "Recorded entity not found"}, - {0x1401, "Record not found"}, - {0x1402, "Filemark or setmark not found"}, - {0x1403, "End-of-data not found"}, - {0x1404, "Block sequence error"}, - {0x1405, "Record not found - recommend reassignment"}, - {0x1406, "Record not found - data auto-reallocated"}, - {0x1407, "Locate operation failure"}, - - {0x1500, "Random positioning error"}, - {0x1501, "Mechanical positioning error"}, - {0x1502, "Positioning error detected by read of medium"}, - - {0x1600, "Data synchronization mark error"}, - {0x1601, "Data sync error - data rewritten"}, - {0x1602, "Data sync error - recommend rewrite"}, - {0x1603, "Data sync error - data auto-reallocated"}, - {0x1604, "Data sync error - recommend reassignment"}, - - {0x1700, "Recovered data with no error correction applied"}, - {0x1701, "Recovered data with retries"}, - {0x1702, "Recovered data with positive head offset"}, - {0x1703, "Recovered data with negative head offset"}, - {0x1704, "Recovered data with retries and/or circ applied"}, - {0x1705, "Recovered data using previous sector id"}, - {0x1706, "Recovered data without ECC - data auto-reallocated"}, - {0x1707, "Recovered data without ECC - recommend reassignment"}, - {0x1708, "Recovered data without ECC - recommend rewrite"}, - {0x1709, "Recovered data without ECC - data rewritten"}, - - {0x1800, "Recovered data with error correction applied"}, - {0x1801, "Recovered data with error corr. & retries applied"}, - {0x1802, "Recovered data - data auto-reallocated"}, - {0x1803, "Recovered data with CIRC"}, - {0x1804, "Recovered data with L-EC"}, - {0x1805, "Recovered data - recommend reassignment"}, - {0x1806, "Recovered data - recommend rewrite"}, - {0x1807, "Recovered data with ECC - data rewritten"}, - {0x1808, "Recovered data with linking"}, - - {0x1900, "Defect list error"}, - {0x1901, "Defect list not available"}, - {0x1902, "Defect list error in primary list"}, - {0x1903, "Defect list error in grown list"}, - - {0x1A00, "Parameter list length error"}, - - {0x1B00, "Synchronous data transfer error"}, - - {0x1C00, "Defect list not found"}, - {0x1C01, "Primary defect list not found"}, - {0x1C02, "Grown defect list not found"}, - - {0x1D00, "Miscompare during verify operation"}, - {0x1D01, "Miscompare verify of unmapped LBA"}, - - {0x1E00, "Recovered id with ECC correction"}, - - {0x1F00, "Partial defect list transfer"}, - - {0x2000, "Invalid command operation code"}, - {0x2001, "Access denied - initiator pending-enrolled"}, - {0x2002, "Access denied - no access rights"}, - {0x2003, "Access denied - invalid mgmt id key"}, - {0x2004, "Illegal command while in write capable state"}, - {0x2005, "Obsolete"}, - {0x2006, "Illegal command while in explicit address mode"}, - {0x2007, "Illegal command while in implicit address mode"}, - {0x2008, "Access denied - enrollment conflict"}, - {0x2009, "Access denied - invalid LU identifier"}, - {0x200A, "Access denied - invalid proxy token"}, - {0x200B, "Access denied - ACL LUN conflict"}, - {0x200C, "Illegal command when not in append-only mode"}, - - {0x2100, "Logical block address out of range"}, - {0x2101, "Invalid element address"}, - {0x2102, "Invalid address for write"}, - {0x2103, "Invalid write crossing layer jump"}, - {0x2104, "Unaligned write command"}, - {0x2105, "Write boundary violation"}, - {0x2106, "Attempt to read invalid data"}, - {0x2107, "Read boundary violation"}, - - {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"}, - - {0x2300, "Invalid token operation, cause not reportable"}, - {0x2301, "Invalid token operation, unsupported token type"}, - {0x2302, "Invalid token operation, remote token usage not supported"}, - {0x2303, "Invalid token operation, remote rod token creation not " - "supported"}, - {0x2304, "Invalid token operation, token unknown"}, - {0x2305, "Invalid token operation, token corrupt"}, - {0x2306, "Invalid token operation, token revoked"}, - {0x2307, "Invalid token operation, token expired"}, - {0x2308, "Invalid token operation, token cancelled"}, - {0x2309, "Invalid token operation, token deleted"}, - {0x230A, "Invalid token operation, invalid token length"}, - - {0x2400, "Invalid field in cdb"}, - {0x2401, "CDB decryption error"}, - {0x2402, "Obsolete"}, - {0x2403, "Obsolete"}, - {0x2404, "Security audit value frozen"}, - {0x2405, "Security working key frozen"}, - {0x2406, "Nonce not unique"}, - {0x2407, "Nonce timestamp out of range"}, - {0x2408, "Invalid XCDB"}, - - {0x2500, "Logical unit not supported"}, - - {0x2600, "Invalid field in parameter list"}, - {0x2601, "Parameter not supported"}, - {0x2602, "Parameter value invalid"}, - {0x2603, "Threshold parameters not supported"}, - {0x2604, "Invalid release of persistent reservation"}, - {0x2605, "Data decryption error"}, - {0x2606, "Too many target descriptors"}, - {0x2607, "Unsupported target descriptor type code"}, - {0x2608, "Too many segment descriptors"}, - {0x2609, "Unsupported segment descriptor type code"}, - {0x260A, "Unexpected inexact segment"}, - {0x260B, "Inline data length exceeded"}, - {0x260C, "Invalid operation for copy source or destination"}, - {0x260D, "Copy segment granularity violation"}, - {0x260E, "Invalid parameter while port is enabled"}, - {0x260F, "Invalid data-out buffer integrity check value"}, - {0x2610, "Data decryption key fail limit reached"}, - {0x2611, "Incomplete key-associated data set"}, - {0x2612, "Vendor specific key reference not found"}, - - {0x2700, "Write protected"}, - {0x2701, "Hardware write protected"}, - {0x2702, "Logical unit software write protected"}, - {0x2703, "Associated write protect"}, - {0x2704, "Persistent write protect"}, - {0x2705, "Permanent write protect"}, - {0x2706, "Conditional write protect"}, - {0x2707, "Space allocation failed write protect"}, - {0x2708, "Zone is read only"}, - - {0x2800, "Not ready to ready change, medium may have changed"}, - {0x2801, "Import or export element accessed"}, - {0x2802, "Format-layer may have changed"}, - {0x2803, "Import/export element accessed, medium changed"}, - - {0x2900, "Power on, reset, or bus device reset occurred"}, - {0x2901, "Power on occurred"}, - {0x2902, "Scsi bus reset occurred"}, - {0x2903, "Bus device reset function occurred"}, - {0x2904, "Device internal reset"}, - {0x2905, "Transceiver mode changed to single-ended"}, - {0x2906, "Transceiver mode changed to lvd"}, - {0x2907, "I_T nexus loss occurred"}, - - {0x2A00, "Parameters changed"}, - {0x2A01, "Mode parameters changed"}, - {0x2A02, "Log parameters changed"}, - {0x2A03, "Reservations preempted"}, - {0x2A04, "Reservations released"}, - {0x2A05, "Registrations preempted"}, - {0x2A06, "Asymmetric access state changed"}, - {0x2A07, "Implicit asymmetric access state transition failed"}, - {0x2A08, "Priority changed"}, - {0x2A09, "Capacity data has changed"}, - {0x2A0A, "Error history I_T nexus cleared"}, - {0x2A0B, "Error history snapshot released"}, - {0x2A0C, "Error recovery attributes have changed"}, - {0x2A0D, "Data encryption capabilities changed"}, - {0x2A10, "Timestamp changed"}, - {0x2A11, "Data encryption parameters changed by another i_t nexus"}, - {0x2A12, "Data encryption parameters changed by vendor specific " - "event"}, - {0x2A13, "Data encryption key instance counter has changed"}, - {0x2A14, "SA creation capabilities data has changed"}, - {0x2A15, "Medium removal prevention preempted"}, - - {0x2B00, "Copy cannot execute since host cannot disconnect"}, - - {0x2C00, "Command sequence error"}, - {0x2C01, "Too many windows specified"}, - {0x2C02, "Invalid combination of windows specified"}, - {0x2C03, "Current program area is not empty"}, - {0x2C04, "Current program area is empty"}, - {0x2C05, "Illegal power condition request"}, - {0x2C06, "Persistent prevent conflict"}, - {0x2C07, "Previous busy status"}, - {0x2C08, "Previous task set full status"}, - {0x2C09, "Previous reservation conflict status"}, - {0x2C0A, "Partition or collection contains user objects"}, - {0x2C0B, "Not reserved"}, - {0x2C0C, "Orwrite generation does not match"}, - {0x2C0D, "Reset write pointer not allowed"}, - {0x2C0E, "Zone is offline"}, - - {0x2D00, "Overwrite error on update in place"}, - - {0x2E00, "Insufficient time for operation"}, - {0x2E01, "Command timeout before processing"}, - {0x2E02, "Command timeout during processing"}, - {0x2E03, "Command timeout during processing due to error recovery"}, - - {0x2F00, "Commands cleared by another initiator"}, - {0x2F01, "Commands cleared by power loss notification"}, - {0x2F02, "Commands cleared by device server"}, - {0x2F03, "Some commands cleared by queuing layer event"}, - - {0x3000, "Incompatible medium installed"}, - {0x3001, "Cannot read medium - unknown format"}, - {0x3002, "Cannot read medium - incompatible format"}, - {0x3003, "Cleaning cartridge installed"}, - {0x3004, "Cannot write medium - unknown format"}, - {0x3005, "Cannot write medium - incompatible format"}, - {0x3006, "Cannot format medium - incompatible medium"}, - {0x3007, "Cleaning failure"}, - {0x3008, "Cannot write - application code mismatch"}, - {0x3009, "Current session not fixated for append"}, - {0x300A, "Cleaning request rejected"}, - {0x300C, "WORM medium - overwrite attempted"}, - {0x300D, "WORM medium - integrity check"}, - {0x3010, "Medium not formatted"}, - {0x3011, "Incompatible volume type"}, - {0x3012, "Incompatible volume qualifier"}, - {0x3013, "Cleaning volume expired"}, - - {0x3100, "Medium format corrupted"}, - {0x3101, "Format command failed"}, - {0x3102, "Zoned formatting failed due to spare linking"}, - {0x3103, "Sanitize command failed"}, - - {0x3200, "No defect spare location available"}, - {0x3201, "Defect list update failure"}, - - {0x3300, "Tape length error"}, - - {0x3400, "Enclosure failure"}, - - {0x3500, "Enclosure services failure"}, - {0x3501, "Unsupported enclosure function"}, - {0x3502, "Enclosure services unavailable"}, - {0x3503, "Enclosure services transfer failure"}, - {0x3504, "Enclosure services transfer refused"}, - {0x3505, "Enclosure services checksum error"}, - - {0x3600, "Ribbon, ink, or toner failure"}, - - {0x3700, "Rounded parameter"}, - - {0x3800, "Event status notification"}, - {0x3802, "Esn - power management class event"}, - {0x3804, "Esn - media class event"}, - {0x3806, "Esn - device busy class event"}, - {0x3807, "Thin Provisioning soft threshold reached"}, - - {0x3900, "Saving parameters not supported"}, - - {0x3A00, "Medium not present"}, - {0x3A01, "Medium not present - tray closed"}, - {0x3A02, "Medium not present - tray open"}, - {0x3A03, "Medium not present - loadable"}, - {0x3A04, "Medium not present - medium auxiliary memory accessible"}, - - {0x3B00, "Sequential positioning error"}, - {0x3B01, "Tape position error at beginning-of-medium"}, - {0x3B02, "Tape position error at end-of-medium"}, - {0x3B03, "Tape or electronic vertical forms unit not ready"}, - {0x3B04, "Slew failure"}, - {0x3B05, "Paper jam"}, - {0x3B06, "Failed to sense top-of-form"}, - {0x3B07, "Failed to sense bottom-of-form"}, - {0x3B08, "Reposition error"}, - {0x3B09, "Read past end of medium"}, - {0x3B0A, "Read past beginning of medium"}, - {0x3B0B, "Position past end of medium"}, - {0x3B0C, "Position past beginning of medium"}, - {0x3B0D, "Medium destination element full"}, - {0x3B0E, "Medium source element empty"}, - {0x3B0F, "End of medium reached"}, - {0x3B11, "Medium magazine not accessible"}, - {0x3B12, "Medium magazine removed"}, - {0x3B13, "Medium magazine inserted"}, - {0x3B14, "Medium magazine locked"}, - {0x3B15, "Medium magazine unlocked"}, - {0x3B16, "Mechanical positioning or changer error"}, - {0x3B17, "Read past end of user object"}, - {0x3B18, "Element disabled"}, - {0x3B19, "Element enabled"}, - {0x3B1A, "Data transfer device removed"}, - {0x3B1B, "Data transfer device inserted"}, - {0x3B1C, "Too many logical objects on partition to support " - "operation"}, - - {0x3D00, "Invalid bits in identify message"}, - - {0x3E00, "Logical unit has not self-configured yet"}, - {0x3E01, "Logical unit failure"}, - {0x3E02, "Timeout on logical unit"}, - {0x3E03, "Logical unit failed self-test"}, - {0x3E04, "Logical unit unable to update self-test log"}, - - {0x3F00, "Target operating conditions have changed"}, - {0x3F01, "Microcode has been changed"}, - {0x3F02, "Changed operating definition"}, - {0x3F03, "Inquiry data has changed"}, - {0x3F04, "Component device attached"}, - {0x3F05, "Device identifier changed"}, - {0x3F06, "Redundancy group created or modified"}, - {0x3F07, "Redundancy group deleted"}, - {0x3F08, "Spare created or modified"}, - {0x3F09, "Spare deleted"}, - {0x3F0A, "Volume set created or modified"}, - {0x3F0B, "Volume set deleted"}, - {0x3F0C, "Volume set deassigned"}, - {0x3F0D, "Volume set reassigned"}, - {0x3F0E, "Reported luns data has changed"}, - {0x3F0F, "Echo buffer overwritten"}, - {0x3F10, "Medium loadable"}, - {0x3F11, "Medium auxiliary memory accessible"}, - {0x3F12, "iSCSI IP address added"}, - {0x3F13, "iSCSI IP address removed"}, - {0x3F14, "iSCSI IP address changed"}, - {0x3F15, "Inspect referrals sense descriptors"}, - {0x3F16, "Microcode has been changed without reset"}, -/* - * {0x40NN, "Ram failure"}, - * {0x40NN, "Diagnostic failure on component nn"}, - * {0x41NN, "Data path failure"}, - * {0x42NN, "Power-on or self-test failure"}, - */ - {0x4300, "Message error"}, - - {0x4400, "Internal target failure"}, - {0x4401, "Persistent reservation information lost"}, - {0x4471, "ATA device failed set features"}, - - {0x4500, "Select or reselect failure"}, - - {0x4600, "Unsuccessful soft reset"}, - - {0x4700, "Scsi parity error"}, - {0x4701, "Data phase CRC error detected"}, - {0x4702, "Scsi parity error detected during st data phase"}, - {0x4703, "Information unit iuCRC error detected"}, - {0x4704, "Asynchronous information protection error detected"}, - {0x4705, "Protocol service CRC error"}, - {0x4706, "Phy test function in progress"}, - {0x477f, "Some commands cleared by iSCSI Protocol event"}, - - {0x4800, "Initiator detected error message received"}, - - {0x4900, "Invalid message error"}, - - {0x4A00, "Command phase error"}, - - {0x4B00, "Data phase error"}, - {0x4B01, "Invalid target port transfer tag received"}, - {0x4B02, "Too much write data"}, - {0x4B03, "Ack/nak timeout"}, - {0x4B04, "Nak received"}, - {0x4B05, "Data offset error"}, - {0x4B06, "Initiator response timeout"}, - {0x4B07, "Connection lost"}, - {0x4B08, "Data-in buffer overflow - data buffer size"}, - {0x4B09, "Data-in buffer overflow - data buffer descriptor area"}, - {0x4B0A, "Data-in buffer error"}, - {0x4B0B, "Data-out buffer overflow - data buffer size"}, - {0x4B0C, "Data-out buffer overflow - data buffer descriptor area"}, - {0x4B0D, "Data-out buffer error"}, - {0x4B0E, "PCIe fabric error"}, - {0x4B0F, "PCIe completion timeout"}, - {0x4B10, "PCIe completer abort"}, - {0x4B11, "PCIe poisoned tlp received"}, - {0x4B12, "PCIe eCRC check failed"}, - {0x4B13, "PCIe unsupported request"}, - {0x4B14, "PCIe acs violation"}, - {0x4B15, "PCIe tlp prefix blocked"}, - - {0x4C00, "Logical unit failed self-configuration"}, -/* - * {0x4DNN, "Tagged overlapped commands (nn = queue tag)"}, - */ - {0x4E00, "Overlapped commands attempted"}, - - {0x5000, "Write append error"}, - {0x5001, "Write append position error"}, - {0x5002, "Position error related to timing"}, - - {0x5100, "Erase failure"}, - {0x5101, "Erase failure - incomplete erase operation detected"}, - - {0x5200, "Cartridge fault"}, - - {0x5300, "Media load or eject failed"}, - {0x5301, "Unload tape failure"}, - {0x5302, "Medium removal prevented"}, - {0x5303, "Medium removal prevented by data transfer element"}, - {0x5304, "Medium thread or unthread failure"}, - {0x5305, "Volume identifier invalid"}, - {0x5306, "Volume identifier missing"}, - {0x5307, "Duplicate volume identifier"}, - {0x5308, "Element status unknown"}, - {0x5309, "Data transfer device error - load failed"}, - {0x530a, "Data transfer device error - unload failed"}, - {0x530b, "Data transfer device error - unload missing"}, - {0x530c, "Data transfer device error - eject failed"}, - {0x530d, "Data transfer device error - library communication failed"}, - - {0x5400, "Scsi to host system interface failure"}, - - {0x5500, "System resource failure"}, - {0x5501, "System buffer full"}, - {0x5502, "Insufficient reservation resources"}, - {0x5503, "Insufficient resources"}, - {0x5504, "Insufficient registration resources"}, - {0x5505, "Insufficient access control resources"}, - {0x5506, "Auxiliary memory out of space"}, - {0x5507, "Quota error"}, - {0x5508, "Maximum number of supplemental decryption keys exceeded"}, - {0x5509, "Medium auxiliary memory not accessible"}, - {0x550A, "Data currently unavailable"}, - {0x550B, "Insufficient power for operation"}, - {0x550C, "Insufficient resources to create rod"}, - {0x550D, "Insufficient resources to create rod token"}, - {0x550E, "Insufficient zone resources"}, - - {0x5700, "Unable to recover table-of-contents"}, - - {0x5800, "Generation does not exist"}, - - {0x5900, "Updated block read"}, - - {0x5A00, "Operator request or state change input"}, - {0x5A01, "Operator medium removal request"}, - {0x5A02, "Operator selected write protect"}, - {0x5A03, "Operator selected write permit"}, - - {0x5B00, "Log exception"}, - {0x5B01, "Threshold condition met"}, - {0x5B02, "Log counter at maximum"}, - {0x5B03, "Log list codes exhausted"}, - - {0x5C00, "Rpl status change"}, - {0x5C01, "Spindles synchronized"}, - {0x5C02, "Spindles not synchronized"}, - - {0x5D00, "Failure prediction threshold exceeded"}, - {0x5D01, "Media failure prediction threshold exceeded"}, - {0x5D02, "Logical unit failure prediction threshold exceeded"}, - {0x5D03, "Spare area exhaustion prediction threshold exceeded"}, - {0x5D10, "Hardware impending failure general hard drive failure"}, - {0x5D11, "Hardware impending failure drive error rate too high"}, - {0x5D12, "Hardware impending failure data error rate too high"}, - {0x5D13, "Hardware impending failure seek error rate too high"}, - {0x5D14, "Hardware impending failure too many block reassigns"}, - {0x5D15, "Hardware impending failure access times too high"}, - {0x5D16, "Hardware impending failure start unit times too high"}, - {0x5D17, "Hardware impending failure channel parametrics"}, - {0x5D18, "Hardware impending failure controller detected"}, - {0x5D19, "Hardware impending failure throughput performance"}, - {0x5D1A, "Hardware impending failure seek time performance"}, - {0x5D1B, "Hardware impending failure spin-up retry count"}, - {0x5D1C, "Hardware impending failure drive calibration retry count"}, - {0x5D20, "Controller impending failure general hard drive failure"}, - {0x5D21, "Controller impending failure drive error rate too high"}, - {0x5D22, "Controller impending failure data error rate too high"}, - {0x5D23, "Controller impending failure seek error rate too high"}, - {0x5D24, "Controller impending failure too many block reassigns"}, - {0x5D25, "Controller impending failure access times too high"}, - {0x5D26, "Controller impending failure start unit times too high"}, - {0x5D27, "Controller impending failure channel parametrics"}, - {0x5D28, "Controller impending failure controller detected"}, - {0x5D29, "Controller impending failure throughput performance"}, - {0x5D2A, "Controller impending failure seek time performance"}, - {0x5D2B, "Controller impending failure spin-up retry count"}, - {0x5D2C, "Controller impending failure drive calibration retry count"}, - {0x5D30, "Data channel impending failure general hard drive failure"}, - {0x5D31, "Data channel impending failure drive error rate too high"}, - {0x5D32, "Data channel impending failure data error rate too high"}, - {0x5D33, "Data channel impending failure seek error rate too high"}, - {0x5D34, "Data channel impending failure too many block reassigns"}, - {0x5D35, "Data channel impending failure access times too high"}, - {0x5D36, "Data channel impending failure start unit times too high"}, - {0x5D37, "Data channel impending failure channel parametrics"}, - {0x5D38, "Data channel impending failure controller detected"}, - {0x5D39, "Data channel impending failure throughput performance"}, - {0x5D3A, "Data channel impending failure seek time performance"}, - {0x5D3B, "Data channel impending failure spin-up retry count"}, - {0x5D3C, "Data channel impending failure drive calibration retry " - "count"}, - {0x5D40, "Servo impending failure general hard drive failure"}, - {0x5D41, "Servo impending failure drive error rate too high"}, - {0x5D42, "Servo impending failure data error rate too high"}, - {0x5D43, "Servo impending failure seek error rate too high"}, - {0x5D44, "Servo impending failure too many block reassigns"}, - {0x5D45, "Servo impending failure access times too high"}, - {0x5D46, "Servo impending failure start unit times too high"}, - {0x5D47, "Servo impending failure channel parametrics"}, - {0x5D48, "Servo impending failure controller detected"}, - {0x5D49, "Servo impending failure throughput performance"}, - {0x5D4A, "Servo impending failure seek time performance"}, - {0x5D4B, "Servo impending failure spin-up retry count"}, - {0x5D4C, "Servo impending failure drive calibration retry count"}, - {0x5D50, "Spindle impending failure general hard drive failure"}, - {0x5D51, "Spindle impending failure drive error rate too high"}, - {0x5D52, "Spindle impending failure data error rate too high"}, - {0x5D53, "Spindle impending failure seek error rate too high"}, - {0x5D54, "Spindle impending failure too many block reassigns"}, - {0x5D55, "Spindle impending failure access times too high"}, - {0x5D56, "Spindle impending failure start unit times too high"}, - {0x5D57, "Spindle impending failure channel parametrics"}, - {0x5D58, "Spindle impending failure controller detected"}, - {0x5D59, "Spindle impending failure throughput performance"}, - {0x5D5A, "Spindle impending failure seek time performance"}, - {0x5D5B, "Spindle impending failure spin-up retry count"}, - {0x5D5C, "Spindle impending failure drive calibration retry count"}, - {0x5D60, "Firmware impending failure general hard drive failure"}, - {0x5D61, "Firmware impending failure drive error rate too high"}, - {0x5D62, "Firmware impending failure data error rate too high"}, - {0x5D63, "Firmware impending failure seek error rate too high"}, - {0x5D64, "Firmware impending failure too many block reassigns"}, - {0x5D65, "Firmware impending failure access times too high"}, - {0x5D66, "Firmware impending failure start unit times too high"}, - {0x5D67, "Firmware impending failure channel parametrics"}, - {0x5D68, "Firmware impending failure controller detected"}, - {0x5D69, "Firmware impending failure throughput performance"}, - {0x5D6A, "Firmware impending failure seek time performance"}, - {0x5D6B, "Firmware impending failure spin-up retry count"}, - {0x5D6C, "Firmware impending failure drive calibration retry count"}, - {0x5DFF, "Failure prediction threshold exceeded (false)"}, - - {0x5E00, "Low power condition on"}, - {0x5E01, "Idle condition activated by timer"}, - {0x5E02, "Standby condition activated by timer"}, - {0x5E03, "Idle condition activated by command"}, - {0x5E04, "Standby condition activated by command"}, - {0x5E05, "Idle_b condition activated by timer"}, - {0x5E06, "Idle_b condition activated by command"}, - {0x5E07, "Idle_c condition activated by timer"}, - {0x5E08, "Idle_c condition activated by command"}, - {0x5E09, "Standby_y condition activated by timer"}, - {0x5E0A, "Standby_y condition activated by command"}, - {0x5E41, "Power state change to active"}, - {0x5E42, "Power state change to idle"}, - {0x5E43, "Power state change to standby"}, - {0x5E45, "Power state change to sleep"}, - {0x5E47, "Power state change to device control"}, - - {0x6000, "Lamp failure"}, - - {0x6100, "Video acquisition error"}, - {0x6101, "Unable to acquire video"}, - {0x6102, "Out of focus"}, - - {0x6200, "Scan head positioning error"}, - - {0x6300, "End of user area encountered on this track"}, - {0x6301, "Packet does not fit in available space"}, - - {0x6400, "Illegal mode for this track"}, - {0x6401, "Invalid packet size"}, - - {0x6500, "Voltage fault"}, - - {0x6600, "Automatic document feeder cover up"}, - {0x6601, "Automatic document feeder lift up"}, - {0x6602, "Document jam in automatic document feeder"}, - {0x6603, "Document miss feed automatic in document feeder"}, - - {0x6700, "Configuration failure"}, - {0x6701, "Configuration of incapable logical units failed"}, - {0x6702, "Add logical unit failed"}, - {0x6703, "Modification of logical unit failed"}, - {0x6704, "Exchange of logical unit failed"}, - {0x6705, "Remove of logical unit failed"}, - {0x6706, "Attachment of logical unit failed"}, - {0x6707, "Creation of logical unit failed"}, - {0x6708, "Assign failure occurred"}, - {0x6709, "Multiply assigned logical unit"}, - {0x670A, "Set target port groups command failed"}, - {0x670B, "ATA device feature not enabled"}, - - {0x6800, "Logical unit not configured"}, - {0x6801, "Subsidiary logical unit not configured"}, - - {0x6900, "Data loss on logical unit"}, - {0x6901, "Multiple logical unit failures"}, - {0x6902, "Parity/data mismatch"}, - - {0x6A00, "Informational, refer to log"}, - - {0x6B00, "State change has occurred"}, - {0x6B01, "Redundancy level got better"}, - {0x6B02, "Redundancy level got worse"}, - - {0x6C00, "Rebuild failure occurred"}, - - {0x6D00, "Recalculate failure occurred"}, - - {0x6E00, "Command to logical unit failed"}, - - {0x6F00, "Copy protection key exchange failure - authentication " - "failure"}, - {0x6F01, "Copy protection key exchange failure - key not present"}, - {0x6F02, "Copy protection key exchange failure - key not established"}, - {0x6F03, "Read of scrambled sector without authentication"}, - {0x6F04, "Media region code is mismatched to logical unit region"}, - {0x6F05, "Drive region must be permanent/region reset count error"}, - {0x6F06, "Insufficient block count for binding nonce recording"}, - {0x6F07, "Conflict in binding nonce recording"}, -/* - * {0x70NN, "Decompression exception short algorithm id of nn"}, - */ - {0x7100, "Decompression exception long algorithm id"}, - - {0x7200, "Session fixation error"}, - {0x7201, "Session fixation error writing lead-in"}, - {0x7202, "Session fixation error writing lead-out"}, - {0x7203, "Session fixation error - incomplete track in session"}, - {0x7204, "Empty or partially written reserved track"}, - {0x7205, "No more track reservations allowed"}, - {0x7206, "RMZ extension is not allowed"}, - {0x7207, "No more test zone extensions are allowed"}, - - {0x7300, "Cd control error"}, - {0x7301, "Power calibration area almost full"}, - {0x7302, "Power calibration area is full"}, - {0x7303, "Power calibration area error"}, - {0x7304, "Program memory area update failure"}, - {0x7305, "Program memory area is full"}, - {0x7306, "RMA/PMA is almost full"}, - {0x7310, "Current power calibration area almost full"}, - {0x7311, "Current power calibration area is full"}, - {0x7317, "RDZ is full"}, - - {0x7400, "Security error"}, - {0x7401, "Unable to decrypt data"}, - {0x7402, "Unencrypted data encountered while decrypting"}, - {0x7403, "Incorrect data encryption key"}, - {0x7404, "Cryptographic integrity validation failed"}, - {0x7405, "Error decrypting data"}, - {0x7406, "Unknown signature verification key"}, - {0x7407, "Encryption parameters not useable"}, - {0x7408, "Digital signature validation failure"}, - {0x7409, "Encryption mode mismatch on read"}, - {0x740A, "Encrypted block not raw read enabled"}, - {0x740B, "Incorrect Encryption parameters"}, - {0x740C, "Unable to decrypt parameter list"}, - {0x740D, "Encryption algorithm disabled"}, - {0x7410, "SA creation parameter value invalid"}, - {0x7411, "SA creation parameter value rejected"}, - {0x7412, "Invalid SA usage"}, - {0x7421, "Data Encryption configuration prevented"}, - {0x7430, "SA creation parameter not supported"}, - {0x7440, "Authentication failed"}, - {0x7461, "External data encryption key manager access error"}, - {0x7462, "External data encryption key manager error"}, - {0x7463, "External data encryption key not found"}, - {0x7464, "External data encryption request not authorized"}, - {0x746E, "External data encryption control timeout"}, - {0x746F, "External data encryption control error"}, - {0x7471, "Logical unit access not authorized"}, - {0x7479, "Security conflict in translated device"}, - - {0, NULL} +#define SENSE_CODE(c, s) {c, sizeof(s)}, +#include "sense_codes.h" +#undef SENSE_CODE }; +static const char *additional_text = +#define SENSE_CODE(c, s) s "\0" +#include "sense_codes.h" +#undef SENSE_CODE + ; + struct error_info2 { unsigned char code1, code2_min, code2_max; const char * str; @@ -1197,11 +377,14 @@ scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt) { int i; unsigned short code = ((asc << 8) | ascq); + unsigned offset = 0; *fmt = NULL; - for (i = 0; additional[i].text; i++) + for (i = 0; i < ARRAY_SIZE(additional); i++) { if (additional[i].code12 == code) - return additional[i].text; + return additional_text + offset; + offset += additional[i].size; + } for (i = 0; additional2[i].fmt; i++) { if (additional2[i].code1 == asc && ascq >= additional2[i].code2_min && diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index d8a5cb3cd2bd..ce1507023132 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -1615,6 +1615,13 @@ err1: * place at the same time and the failure was due to CXL services being * unable to keep up. * + * As this routine is called on ioctl context, it holds the ioctl r/w + * semaphore that is used to drain ioctls in recovery scenarios. The + * implementation to achieve the pacing described above (a local mutex) + * requires that the ioctl r/w semaphore be dropped and reacquired to + * avoid a 3-way deadlock when multiple process recoveries operate in + * parallel. + * * Because a user can detect an error condition before the kernel, it is * quite possible for this routine to act as the kernel's EEH detection * source (MMIO read of mbox_r). Because of this, there is a window of @@ -1642,9 +1649,17 @@ static int cxlflash_afu_recover(struct scsi_device *sdev, int rc = 0; atomic_inc(&cfg->recovery_threads); + up_read(&cfg->ioctl_rwsem); rc = mutex_lock_interruptible(mutex); + down_read(&cfg->ioctl_rwsem); if (rc) goto out; + rc = check_state(cfg); + if (rc) { + dev_err(dev, "%s: Failed state! rc=%d\n", __func__, rc); + rc = -ENODEV; + goto out; + } dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n", __func__, recover->reason, rctxid); diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index a655cf29c16f..752b5c9d1ab2 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -190,15 +190,18 @@ static int submit_stpg(struct scsi_device *sdev, int group_id, ALUA_FAILOVER_RETRIES, NULL, req_flags); } -struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, - int group_id) +static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, + int group_id) { struct alua_port_group *pg; + if (!id_str || !id_size || !strlen(id_str)) + return NULL; + list_for_each_entry(pg, &port_group_list, node) { if (pg->group_id != group_id) continue; - if (pg->device_id_len != id_size) + if (!pg->device_id_len || pg->device_id_len != id_size) continue; if (strncmp(pg->device_id_str, id_str, id_size)) continue; @@ -219,8 +222,8 @@ struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, * Allocate a new port_group structure for a given * device. */ -struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev, - int group_id, int tpgs) +static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev, + int group_id, int tpgs) { struct alua_port_group *pg, *tmp_pg; @@ -232,14 +235,14 @@ struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev, sizeof(pg->device_id_str)); if (pg->device_id_len <= 0) { /* - * Internal error: TPGS supported but no device - * identifcation found. Disable ALUA support. + * TPGS supported but no device identification found. + * Generate private device identification. */ - kfree(pg); sdev_printk(KERN_INFO, sdev, "%s: No device descriptors found\n", ALUA_DH_NAME); - return ERR_PTR(-ENXIO); + pg->device_id_str[0] = '\0'; + pg->device_id_len = 0; } pg->group_id = group_id; pg->tpgs = tpgs; @@ -354,9 +357,15 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, return SCSI_DH_NOMEM; return SCSI_DH_DEV_UNSUPP; } - sdev_printk(KERN_INFO, sdev, - "%s: device %s port group %x rel port %x\n", - ALUA_DH_NAME, pg->device_id_str, group_id, rel_port); + if (pg->device_id_len) + sdev_printk(KERN_INFO, sdev, + "%s: device %s port group %x rel port %x\n", + ALUA_DH_NAME, pg->device_id_str, + group_id, rel_port); + else + sdev_printk(KERN_INFO, sdev, + "%s: port group %x rel port %x\n", + ALUA_DH_NAME, group_id, rel_port); /* Check for existing port group references */ spin_lock(&h->pg_lock); diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c index 6c14e68b9e1a..9b5a457d4bca 100644 --- a/drivers/scsi/dmx3191d.c +++ b/drivers/scsi/dmx3191d.c @@ -34,11 +34,14 @@ * Definitions for the generic 5380 driver. */ -#define DONT_USE_INTR - #define NCR5380_read(reg) inb(instance->io_port + reg) #define NCR5380_write(reg, value) outb(value, instance->io_port + reg) +#define NCR5380_dma_xfer_len(instance, cmd, phase) (0) +#define NCR5380_dma_recv_setup(instance, dst, len) (0) +#define NCR5380_dma_send_setup(instance, src, len) (0) +#define NCR5380_dma_residual(instance) (0) + #define NCR5380_implementation_fields /* none */ #include "NCR5380.h" @@ -62,7 +65,6 @@ static struct scsi_host_template dmx3191d_driver_template = { .cmd_per_lun = 2, .use_clustering = DISABLE_CLUSTERING, .cmd_size = NCR5380_CMD_SIZE, - .max_sectors = 128, }; static int dmx3191d_probe_one(struct pci_dev *pdev, @@ -93,7 +95,7 @@ static int dmx3191d_probe_one(struct pci_dev *pdev, */ shost->irq = NO_IRQ; - error = NCR5380_init(shost, FLAG_NO_PSEUDO_DMA); + error = NCR5380_init(shost, 0); if (error) goto out_host_put; diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c index 6c736b071cf4..459863f94e46 100644 --- a/drivers/scsi/dtc.c +++ b/drivers/scsi/dtc.c @@ -1,6 +1,3 @@ -#define PSEUDO_DMA -#define DONT_USE_INTR - /* * DTC 3180/3280 driver, by * Ray Van Tassle rayvt@comm.mot.com @@ -54,7 +51,6 @@ #include <scsi/scsi_host.h> #include "dtc.h" -#define AUTOPROBE_IRQ #include "NCR5380.h" /* @@ -229,7 +225,7 @@ found: instance->base = addr; ((struct NCR5380_hostdata *)(instance)->hostdata)->base = base; - if (NCR5380_init(instance, FLAG_NO_DMA_FIXUP)) + if (NCR5380_init(instance, FLAG_LATE_DMA_SETUP)) goto out_unregister; NCR5380_maybe_reset_bus(instance); @@ -244,9 +240,10 @@ found: if (instance->irq == 255) instance->irq = NO_IRQ; -#ifndef DONT_USE_INTR /* With interrupts enabled, it will sometimes hang when doing heavy * reads. So better not enable them until I finger it out. */ + instance->irq = NO_IRQ; + if (instance->irq != NO_IRQ) if (request_irq(instance->irq, dtc_intr, 0, "dtc", instance)) { @@ -258,11 +255,7 @@ found: printk(KERN_WARNING "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); printk(KERN_WARNING "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); } -#else - if (instance->irq != NO_IRQ) - printk(KERN_WARNING "scsi%d : interrupts not used. Might as well not jumper it.\n", instance->host_no); - instance->irq = NO_IRQ; -#endif + dprintk(NDEBUG_INIT, "scsi%d : irq = %d\n", instance->host_no, instance->irq); @@ -323,7 +316,8 @@ static int dtc_biosparam(struct scsi_device *sdev, struct block_device *dev, * timeout. */ -static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) +static inline int dtc_pread(struct Scsi_Host *instance, + unsigned char *dst, int len) { unsigned char *d = dst; int i; /* For counting time spent in the poll-loop */ @@ -352,8 +346,6 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, while (!(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS)) ++i; rtrc(0); - if (i > hostdata->spin_max_r) - hostdata->spin_max_r = i; return (0); } @@ -370,7 +362,8 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, * timeout. */ -static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len) +static inline int dtc_pwrite(struct Scsi_Host *instance, + unsigned char *src, int len) { int i; struct NCR5380_hostdata *hostdata = shost_priv(instance); @@ -400,8 +393,6 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, rtrc(7); /* Check for parity error here. fixme. */ rtrc(0); - if (i > hostdata->spin_max_w) - hostdata->spin_max_w = i; return (0); } @@ -440,8 +431,6 @@ static struct scsi_host_template driver_template = { .detect = dtc_detect, .release = dtc_release, .proc_name = "dtc3x80", - .show_info = dtc_show_info, - .write_info = dtc_write_info, .info = dtc_info, .queuecommand = dtc_queue_command, .eh_abort_handler = dtc_abort, diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h index 56732cba8aba..fcb0a8ea7bda 100644 --- a/drivers/scsi/dtc.h +++ b/drivers/scsi/dtc.h @@ -21,14 +21,17 @@ #define NCR5380_dma_xfer_len(instance, cmd, phase) \ dtc_dma_xfer_len(cmd) +#define NCR5380_dma_recv_setup dtc_pread +#define NCR5380_dma_send_setup dtc_pwrite +#define NCR5380_dma_residual(instance) (0) #define NCR5380_intr dtc_intr #define NCR5380_queue_command dtc_queue_command #define NCR5380_abort dtc_abort #define NCR5380_bus_reset dtc_bus_reset #define NCR5380_info dtc_info -#define NCR5380_show_info dtc_show_info -#define NCR5380_write_info dtc_write_info + +#define NCR5380_io_delay(x) udelay(x) /* 15 12 11 10 1001 1100 0000 0000 */ diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c index ca8003f0d8a3..4299fa485622 100644 --- a/drivers/scsi/eata_pio.c +++ b/drivers/scsi/eata_pio.c @@ -729,6 +729,7 @@ static int register_pio_HBA(long base, struct get_conf *gc, struct pci_dev *pdev break; case 0x24: SD(sh)->EATA_revision = 'z'; + break; default: SD(sh)->EATA_revision = '?'; } diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index 33581ba4386e..2aca4d16f39e 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -246,7 +246,7 @@ static struct scsi_host_template driver_template = { .eh_target_reset_handler = esas2r_target_reset, .can_queue = 128, .this_id = -1, - .sg_tablesize = SCSI_MAX_SG_SEGMENTS, + .sg_tablesize = SG_CHUNK_SIZE, .cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN, .present = 0, @@ -271,7 +271,7 @@ module_param(num_sg_lists, int, 0); MODULE_PARM_DESC(num_sg_lists, "Number of scatter/gather lists. Default 1024."); -int sg_tablesize = SCSI_MAX_SG_SEGMENTS; +int sg_tablesize = SG_CHUNK_SIZE; module_param(sg_tablesize, int, 0); MODULE_PARM_DESC(sg_tablesize, "Maximum number of entries in a scatter/gather table."); diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index ce129e595b55..9ddc9200e0a4 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -39,7 +39,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.6.0.17a" +#define DRV_VERSION "1.6.0.21" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index f3032ca5051b..d9fd2f841585 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -439,7 +439,6 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ int sg_count = 0; unsigned long flags = 0; unsigned long ptr; - struct fc_rport_priv *rdata; spinlock_t *io_lock = NULL; int io_lock_acquired = 0; @@ -455,14 +454,17 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ return 0; } - rdata = lp->tt.rport_lookup(lp, rport->port_id); - if (!rdata || (rdata->rp_state == RPORT_ST_DELETE)) { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "returning IO as rport is removed\n"); - atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); - sc->result = DID_NO_CONNECT; - done(sc); - return 0; + if (rport) { + struct fc_rport_libfc_priv *rp = rport->dd_data; + + if (!rp || rp->rp_state != RPORT_ST_READY) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "returning DID_NO_CONNECT for IO as rport is removed\n"); + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + sc->result = DID_NO_CONNECT<<16; + done(sc); + return 0; + } } if (lp->state != LPORT_ST_READY || !(lp->link_up)) @@ -1091,6 +1093,11 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, atomic64_inc( &term_stats->terminate_fw_timeouts); break; + case FCPIO_ITMF_REJECTED: + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "abort reject recd. id %d\n", + (int)(id & FNIC_TAG_MASK)); + break; case FCPIO_IO_NOT_FOUND: if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) atomic64_inc(&abts_stats->abort_io_not_found); @@ -1111,9 +1118,15 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, spin_unlock_irqrestore(io_lock, flags); return; } - CMD_ABTS_STATUS(sc) = hdr_status; + CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; + /* If the status is IO not found consider it as success */ + if (hdr_status == FCPIO_IO_NOT_FOUND) + CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS; + else + CMD_ABTS_STATUS(sc) = hdr_status; + atomic64_dec(&fnic_stats->io_stats.active_ios); if (atomic64_read(&fnic->io_cmpl_skip)) atomic64_dec(&fnic->io_cmpl_skip); @@ -1926,21 +1939,31 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; + start_time = io_req->start_time; /* * firmware completed the abort, check the status, - * free the io_req irrespective of failure or success + * free the io_req if successful. If abort fails, + * Device reset will clean the I/O. */ - if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS) + if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS) + CMD_SP(sc) = NULL; + else { ret = FAILED; - - CMD_SP(sc) = NULL; + spin_unlock_irqrestore(io_lock, flags); + goto fnic_abort_cmd_end; + } spin_unlock_irqrestore(io_lock, flags); - start_time = io_req->start_time; fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); + if (sc->scsi_done) { + /* Call SCSI completion function to complete the IO */ + sc->result = (DID_ABORT << 16); + sc->scsi_done(sc); + } + fnic_abort_cmd_end: FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, sc->request->tag, sc, @@ -2018,7 +2041,9 @@ lr_io_req_end: * successfully aborted, 1 otherwise */ static int fnic_clean_pending_aborts(struct fnic *fnic, - struct scsi_cmnd *lr_sc) + struct scsi_cmnd *lr_sc, + bool new_sc) + { int tag, abt_tag; struct fnic_io_req *io_req; @@ -2036,10 +2061,10 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, spin_lock_irqsave(io_lock, flags); sc = scsi_host_find_tag(fnic->lport->host, tag); /* - * ignore this lun reset cmd or cmds that do not belong to - * this lun + * ignore this lun reset cmd if issued using new SC + * or cmds that do not belong to this lun */ - if (!sc || sc == lr_sc || sc->device != lun_dev) { + if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) { spin_unlock_irqrestore(io_lock, flags); continue; } @@ -2145,11 +2170,27 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, goto clean_pending_aborts_end; } CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; - CMD_SP(sc) = NULL; + + /* original sc used for lr is handled by dev reset code */ + if (sc != lr_sc) + CMD_SP(sc) = NULL; spin_unlock_irqrestore(io_lock, flags); - fnic_release_ioreq_buf(fnic, io_req, sc); - mempool_free(io_req, fnic->io_req_pool); + /* original sc used for lr is handled by dev reset code */ + if (sc != lr_sc) { + fnic_release_ioreq_buf(fnic, io_req, sc); + mempool_free(io_req, fnic->io_req_pool); + } + + /* + * Any IO is returned during reset, it needs to call scsi_done + * to return the scsi_cmnd to upper layer. + */ + if (sc->scsi_done) { + /* Set result to let upper SCSI layer retry */ + sc->result = DID_RESET << 16; + sc->scsi_done(sc); + } } schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov)); @@ -2243,6 +2284,7 @@ int fnic_device_reset(struct scsi_cmnd *sc) int tag = 0; DECLARE_COMPLETION_ONSTACK(tm_done); int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/ + bool new_sc = 0; /* Wait for rport to unblock */ fc_block_scsi_eh(sc); @@ -2288,13 +2330,12 @@ int fnic_device_reset(struct scsi_cmnd *sc) * fix the way the EH ioctls work for real, but until * that happens we fail these explicit requests here. */ - if (shost_use_blk_mq(sc->device->host)) - goto fnic_device_reset_end; tag = fnic_scsi_host_start_tag(fnic, sc); if (unlikely(tag == SCSI_NO_TAG)) goto fnic_device_reset_end; tag_gen_flag = 1; + new_sc = 1; } io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); @@ -2429,7 +2470,7 @@ int fnic_device_reset(struct scsi_cmnd *sc) * the lun reset cmd. If all cmds get cleaned, the lun reset * succeeds */ - if (fnic_clean_pending_aborts(fnic, sc)) { + if (fnic_clean_pending_aborts(fnic, sc, new_sc)) { spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 90091e693020..516bd6c4f442 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -18,50 +18,10 @@ * * Added ISAPNP support for DTC436 adapters, * Thomas Sailer, sailer@ife.ee.ethz.ch - */ - -/* - * TODO : flesh out DMA support, find some one actually using this (I have - * a memory mapped Trantor board that works fine) - */ - -/* - * The card is detected and initialized in one of several ways : - * 1. With command line overrides - NCR5380=port,irq may be - * used on the LILO command line to override the defaults. - * - * 2. With the GENERIC_NCR5380_OVERRIDE compile time define. This is - * specified as an array of address, irq, dma, board tuples. Ie, for - * one board at 0x350, IRQ5, no dma, I could say - * -DGENERIC_NCR5380_OVERRIDE={{0xcc000, 5, DMA_NONE, BOARD_NCR5380}} - * - * -1 should be specified for no or DMA interrupt, -2 to autoprobe for an - * IRQ line if overridden on the command line. * - * 3. When included as a module, with arguments passed on the command line: - * ncr_irq=xx the interrupt - * ncr_addr=xx the port or base address (for port or memory - * mapped, resp.) - * ncr_dma=xx the DMA - * ncr_5380=1 to set up for a NCR5380 board - * ncr_53c400=1 to set up for a NCR53C400 board - * e.g. - * modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1 - * for a port mapped NCR5380 board or - * modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1 - * for a memory mapped NCR53C400 board with interrupts disabled. - * - * 255 should be specified for no or DMA interrupt, 254 to autoprobe for an - * IRQ line if overridden on the command line. - * + * See Documentation/scsi/g_NCR5380.txt for more info. */ -#define AUTOPROBE_IRQ - -#ifdef CONFIG_SCSI_GENERIC_NCR53C400 -#define PSEUDO_DMA -#endif - #include <asm/io.h> #include <linux/blkdev.h> #include <linux/module.h> @@ -270,7 +230,7 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) #ifndef SCSI_G_NCR5380_MEM int i; int port_idx = -1; - unsigned long region_size = 16; + unsigned long region_size; #endif static unsigned int __initdata ncr_53c400a_ports[] = { 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0 @@ -290,6 +250,7 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) #ifdef SCSI_G_NCR5380_MEM unsigned long base; void __iomem *iomem; + resource_size_t iomem_size; #endif if (ncr_irq) @@ -350,25 +311,17 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) flags = 0; switch (overrides[current_override].board) { case BOARD_NCR5380: - flags = FLAG_NO_PSEUDO_DMA; - break; - case BOARD_NCR53C400: -#ifdef PSEUDO_DMA - flags = FLAG_NO_DMA_FIXUP; -#endif + flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP; break; case BOARD_NCR53C400A: - flags = FLAG_NO_DMA_FIXUP; ports = ncr_53c400a_ports; magic = ncr_53c400a_magic; break; case BOARD_HP_C2502: - flags = FLAG_NO_DMA_FIXUP; ports = ncr_53c400a_ports; magic = hp_c2502_magic; break; case BOARD_DTC3181E: - flags = FLAG_NO_DMA_FIXUP; ports = dtc_3181e_ports; magic = ncr_53c400a_magic; break; @@ -381,20 +334,22 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) /* Disable the adapter and look for a free io port */ magic_configure(-1, 0, magic); + region_size = 16; + if (overrides[current_override].NCR5380_map_name != PORT_AUTO) for (i = 0; ports[i]; i++) { - if (!request_region(ports[i], 16, "ncr53c80")) + if (!request_region(ports[i], region_size, "ncr53c80")) continue; if (overrides[current_override].NCR5380_map_name == ports[i]) break; - release_region(ports[i], 16); + release_region(ports[i], region_size); } else for (i = 0; ports[i]; i++) { - if (!request_region(ports[i], 16, "ncr53c80")) + if (!request_region(ports[i], region_size, "ncr53c80")) continue; if (inb(ports[i]) == 0xff) break; - release_region(ports[i], 16); + release_region(ports[i], region_size); } if (ports[i]) { /* At this point we have our region reserved */ @@ -410,17 +365,19 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) else { /* Not a 53C400A style setup - just grab */ - if(!(request_region(overrides[current_override].NCR5380_map_name, NCR5380_region_size, "ncr5380"))) + region_size = 8; + if (!request_region(overrides[current_override].NCR5380_map_name, + region_size, "ncr5380")) continue; - region_size = NCR5380_region_size; } #else base = overrides[current_override].NCR5380_map_name; - if (!request_mem_region(base, NCR5380_region_size, "ncr5380")) + iomem_size = NCR53C400_region_size; + if (!request_mem_region(base, iomem_size, "ncr5380")) continue; - iomem = ioremap(base, NCR5380_region_size); + iomem = ioremap(base, iomem_size); if (!iomem) { - release_mem_region(base, NCR5380_region_size); + release_mem_region(base, iomem_size); continue; } #endif @@ -458,6 +415,7 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) #else instance->base = overrides[current_override].NCR5380_map_name; hostdata->iomem = iomem; + hostdata->iomem_size = iomem_size; switch (overrides[current_override].board) { case BOARD_NCR53C400: hostdata->c400_ctl_status = 0x100; @@ -472,7 +430,7 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) } #endif - if (NCR5380_init(instance, flags)) + if (NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP)) goto out_unregister; switch (overrides[current_override].board) { @@ -524,7 +482,7 @@ out_release: release_region(overrides[current_override].NCR5380_map_name, region_size); #else iounmap(iomem); - release_mem_region(base, NCR5380_region_size); + release_mem_region(base, iomem_size); #endif return count; } @@ -546,45 +504,18 @@ static int generic_NCR5380_release_resources(struct Scsi_Host *instance) #ifndef SCSI_G_NCR5380_MEM release_region(instance->io_port, instance->n_io_port); #else - iounmap(((struct NCR5380_hostdata *)instance->hostdata)->iomem); - release_mem_region(instance->base, NCR5380_region_size); -#endif - return 0; -} - -#ifdef BIOSPARAM -/** - * generic_NCR5380_biosparam - * @disk: disk to compute geometry for - * @dev: device identifier for this disk - * @ip: sizes to fill in - * - * Generates a BIOS / DOS compatible H-C-S mapping for the specified - * device / size. - * - * XXX Most SCSI boards use this mapping, I could be incorrect. Someone - * using hard disks on a trantor should verify that this mapping - * corresponds to that used by the BIOS / ASPI driver by running the linux - * fdisk program and matching the H_C_S coordinates to what DOS uses. - * - * Locks: none - */ + { + struct NCR5380_hostdata *hostdata = shost_priv(instance); -static int -generic_NCR5380_biosparam(struct scsi_device *sdev, struct block_device *bdev, - sector_t capacity, int *ip) -{ - ip[0] = 64; - ip[1] = 32; - ip[2] = capacity >> 11; + iounmap(hostdata->iomem); + release_mem_region(instance->base, hostdata->iomem_size); + } +#endif return 0; } -#endif - -#ifdef PSEUDO_DMA /** - * NCR5380_pread - pseudo DMA read + * generic_NCR5380_pread - pseudo DMA read * @instance: adapter to read from * @dst: buffer to read into * @len: buffer length @@ -593,7 +524,8 @@ generic_NCR5380_biosparam(struct scsi_device *sdev, struct block_device *bdev, * controller */ -static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) +static inline int generic_NCR5380_pread(struct Scsi_Host *instance, + unsigned char *dst, int len) { struct NCR5380_hostdata *hostdata = shost_priv(instance); int blocks = len / 128; @@ -661,7 +593,7 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, } /** - * NCR5380_write - pseudo DMA write + * generic_NCR5380_pwrite - pseudo DMA write * @instance: adapter to read from * @dst: buffer to read into * @len: buffer length @@ -670,7 +602,8 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, * controller */ -static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len) +static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance, + unsigned char *src, int len) { struct NCR5380_hostdata *hostdata = shost_priv(instance); int blocks = len / 128; @@ -738,10 +671,15 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, return 0; } -static int generic_NCR5380_dma_xfer_len(struct scsi_cmnd *cmd) +static int generic_NCR5380_dma_xfer_len(struct Scsi_Host *instance, + struct scsi_cmnd *cmd) { + struct NCR5380_hostdata *hostdata = shost_priv(instance); int transfersize = cmd->transfersize; + if (hostdata->flags & FLAG_NO_PSEUDO_DMA) + return 0; + /* Limit transfers to 32K, for xx400 & xx406 * pseudoDMA that transfers in 128 bytes blocks. */ @@ -756,8 +694,6 @@ static int generic_NCR5380_dma_xfer_len(struct scsi_cmnd *cmd) return transfersize; } -#endif /* PSEUDO_DMA */ - /* * Include the NCR5380 core code that we build our driver around */ @@ -773,7 +709,6 @@ static struct scsi_host_template driver_template = { .queuecommand = generic_NCR5380_queue_command, .eh_abort_handler = generic_NCR5380_abort, .eh_bus_reset_handler = generic_NCR5380_bus_reset, - .bios_param = NCR5380_BIOSPARAM, .can_queue = 16, .this_id = 7, .sg_tablesize = SG_ALL, diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h index 6f3d2ac4f185..595177428d76 100644 --- a/drivers/scsi/g_NCR5380.h +++ b/drivers/scsi/g_NCR5380.h @@ -14,13 +14,6 @@ #ifndef GENERIC_NCR5380_H #define GENERIC_NCR5380_H -#ifdef CONFIG_SCSI_GENERIC_NCR53C400 -#define BIOSPARAM -#define NCR5380_BIOSPARAM generic_NCR5380_biosparam -#else -#define NCR5380_BIOSPARAM NULL -#endif - #define __STRVAL(x) #x #define STRVAL(x) __STRVAL(x) @@ -30,12 +23,6 @@ #define NCR5380_map_type int #define NCR5380_map_name port -#ifdef CONFIG_SCSI_GENERIC_NCR53C400 -#define NCR5380_region_size 16 -#else -#define NCR5380_region_size 8 -#endif - #define NCR5380_read(reg) \ inb(instance->io_port + (reg)) #define NCR5380_write(reg, value) \ @@ -55,7 +42,7 @@ #define NCR5380_map_name base #define NCR53C400_mem_base 0x3880 #define NCR53C400_host_buffer 0x3900 -#define NCR5380_region_size 0x3a00 +#define NCR53C400_region_size 0x3a00 #define NCR5380_read(reg) \ readb(((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \ @@ -66,6 +53,7 @@ #define NCR5380_implementation_fields \ void __iomem *iomem; \ + resource_size_t iomem_size; \ int c400_ctl_status; \ int c400_blk_cnt; \ int c400_host_buf; @@ -73,16 +61,18 @@ #endif #define NCR5380_dma_xfer_len(instance, cmd, phase) \ - generic_NCR5380_dma_xfer_len(cmd) + generic_NCR5380_dma_xfer_len(instance, cmd) +#define NCR5380_dma_recv_setup generic_NCR5380_pread +#define NCR5380_dma_send_setup generic_NCR5380_pwrite +#define NCR5380_dma_residual(instance) (0) #define NCR5380_intr generic_NCR5380_intr #define NCR5380_queue_command generic_NCR5380_queue_command #define NCR5380_abort generic_NCR5380_abort #define NCR5380_bus_reset generic_NCR5380_bus_reset -#define NCR5380_pread generic_NCR5380_pread -#define NCR5380_pwrite generic_NCR5380_pwrite #define NCR5380_info generic_NCR5380_info -#define NCR5380_show_info generic_NCR5380_show_info + +#define NCR5380_io_delay(x) udelay(x) #define BOARD_NCR5380 0 #define BOARD_NCR53C400 1 diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 29e89f340b64..d7cab724f203 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -23,7 +23,7 @@ #include <scsi/sas_ata.h> #include <scsi/libsas.h> -#define DRV_VERSION "v1.3" +#define DRV_VERSION "v1.4" #define HISI_SAS_MAX_PHYS 9 #define HISI_SAS_MAX_QUEUES 32 @@ -133,6 +133,9 @@ struct hisi_sas_hw { int (*hw_init)(struct hisi_hba *hisi_hba); void (*setup_itct)(struct hisi_hba *hisi_hba, struct hisi_sas_device *device); + int (*slot_index_alloc)(struct hisi_hba *hisi_hba, int *slot_idx, + struct domain_device *device); + struct hisi_sas_device *(*alloc_dev)(struct domain_device *device); void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no); int (*get_free_slot)(struct hisi_hba *hisi_hba, int *q, int *s); void (*start_delivery)(struct hisi_hba *hisi_hba); @@ -298,7 +301,7 @@ struct hisi_sas_command_table_stp { u8 atapi_cdb[ATAPI_CDB_LEN]; }; -#define HISI_SAS_SGE_PAGE_CNT SCSI_MAX_SG_SEGMENTS +#define HISI_SAS_SGE_PAGE_CNT SG_CHUNK_SIZE struct hisi_sas_sge_page { struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT]; }; diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 097ab4f27a6b..18dd5ea2c721 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -227,7 +227,11 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba, } else n_elem = task->num_scatter; - rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); + if (hisi_hba->hw->slot_index_alloc) + rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx, + device); + else + rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); if (rc) goto err_out; rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue, @@ -417,7 +421,10 @@ static int hisi_sas_dev_found(struct domain_device *device) struct hisi_sas_device *sas_dev; struct device *dev = &hisi_hba->pdev->dev; - sas_dev = hisi_sas_alloc_dev(device); + if (hisi_hba->hw->alloc_dev) + sas_dev = hisi_hba->hw->alloc_dev(device); + else + sas_dev = hisi_sas_alloc_dev(device); if (!sas_dev) { dev_err(dev, "fail alloc dev: max support %d devices\n", HISI_SAS_MAX_DEVICES); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index b7337476454b..bbe98ecea0bc 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -465,6 +465,62 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, return readl(regs); } +/* This function needs to be protected from pre-emption. */ +static int +slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx, + struct domain_device *device) +{ + unsigned int index = 0; + void *bitmap = hisi_hba->slot_index_tags; + int sata_dev = dev_is_sata(device); + + while (1) { + index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, + index); + if (index >= hisi_hba->slot_index_count) + return -SAS_QUEUE_FULL; + /* + * SAS IPTT bit0 should be 1 + */ + if (sata_dev || (index & 1)) + break; + index++; + } + + set_bit(index, bitmap); + *slot_idx = index; + return 0; +} + +static struct +hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) +{ + struct hisi_hba *hisi_hba = device->port->ha->lldd_ha; + struct hisi_sas_device *sas_dev = NULL; + int i, sata_dev = dev_is_sata(device); + + spin_lock(&hisi_hba->lock); + for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { + /* + * SATA device id bit0 should be 0 + */ + if (sata_dev && (i & 1)) + continue; + if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { + hisi_hba->devices[i].device_id = i; + sas_dev = &hisi_hba->devices[i]; + sas_dev->dev_status = HISI_SAS_DEV_NORMAL; + sas_dev->dev_type = device->dev_type; + sas_dev->hisi_hba = hisi_hba; + sas_dev->sas_device = device; + break; + } + } + spin_unlock(&hisi_hba->lock); + + return sas_dev; +} + static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); @@ -544,7 +600,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, } qw0 |= ((1 << ITCT_HDR_VALID_OFF) | - (device->max_linkrate << ITCT_HDR_MCR_OFF) | + (device->linkrate << ITCT_HDR_MCR_OFF) | (1 << ITCT_HDR_VLN_OFF) | (port->id << ITCT_HDR_PORT_ID_OFF)); itct->qw0 = cpu_to_le64(qw0); @@ -554,10 +610,11 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, itct->sas_addr = __swab64(itct->sas_addr); /* qw2 */ - itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) | - (0xff00ULL << ITCT_HDR_BITLT_OFF) | - (0xff00ULL << ITCT_HDR_MCTLT_OFF) | - (0xff00ULL << ITCT_HDR_RTOLT_OFF)); + if (!dev_is_sata(device)) + itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) | + (0x1ULL << ITCT_HDR_BITLT_OFF) | + (0x32ULL << ITCT_HDR_MCTLT_OFF) | + (0x1ULL << ITCT_HDR_RTOLT_OFF)); } static void free_device_v2_hw(struct hisi_hba *hisi_hba, @@ -715,7 +772,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF); hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1); hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4); - hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x4E20); + hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x32); hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1); hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1); @@ -1993,22 +2050,23 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p) u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate; irqreturn_t res = IRQ_HANDLED; u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; - int phy_no; + int phy_no, offset; phy_no = sas_phy->id; initial_fis = &hisi_hba->initial_fis[phy_no]; fis = &initial_fis->fis; - ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1); - hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk | 1 << phy_no); + offset = 4 * (phy_no / 4); + ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1 + offset); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, + ent_msk | 1 << ((phy_no % 4) * 8)); - ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1); - ent_tmp = ent_int; + ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1 + offset); + ent_tmp = ent_int & (1 << (ENT_INT_SRC1_D2H_FIS_CH1_OFF * + (phy_no % 4))); ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4); if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) { dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no); - hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp); - hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk); res = IRQ_NONE; goto end; } @@ -2056,8 +2114,8 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p) queue_work(hisi_hba->wq, &phy->phyup_ws); end: - hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp); - hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk); + hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk); return res; } @@ -2165,6 +2223,8 @@ static int hisi_sas_v2_init(struct hisi_hba *hisi_hba) static const struct hisi_sas_hw hisi_sas_v2_hw = { .hw_init = hisi_sas_v2_init, .setup_itct = setup_itct_v2_hw, + .slot_index_alloc = slot_index_alloc_quirk_v2_hw, + .alloc_dev = alloc_dev_quirk_v2_hw, .sl_notify = sl_notify_v2_hw, .get_wideport_bitmap = get_wideport_bitmap_v2_hw, .free_device = free_device_v2_hw, diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 5be944c8b71c..ff8dcd5b0631 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -60,7 +60,7 @@ * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' * with an optional trailing '-' followed by a byte value (0-255). */ -#define HPSA_DRIVER_VERSION "3.4.14-0" +#define HPSA_DRIVER_VERSION "3.4.16-0" #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" #define HPSA "hpsa" @@ -294,6 +294,9 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h); static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, struct ReportExtendedLUNdata *buf, int bufsize); static int hpsa_luns_changed(struct ctlr_info *h); +static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, + struct hpsa_scsi_dev_t *dev, + unsigned char *scsi3addr); static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) { @@ -728,6 +731,29 @@ static ssize_t unique_id_show(struct device *dev, sn[12], sn[13], sn[14], sn[15]); } +static ssize_t sas_address_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct scsi_device *sdev; + struct hpsa_scsi_dev_t *hdev; + unsigned long flags; + u64 sas_address; + + sdev = to_scsi_device(dev); + h = sdev_to_hba(sdev); + spin_lock_irqsave(&h->lock, flags); + hdev = sdev->hostdata; + if (!hdev || is_logical_device(hdev) || !hdev->expose_device) { + spin_unlock_irqrestore(&h->lock, flags); + return -ENODEV; + } + sas_address = hdev->sas_address; + spin_unlock_irqrestore(&h->lock, flags); + + return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address); +} + static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, struct device_attribute *attr, char *buf) { @@ -840,6 +866,7 @@ static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); +static DEVICE_ATTR(sas_address, S_IRUGO, sas_address_show, NULL); static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, host_show_hp_ssd_smart_path_enabled, NULL); static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL); @@ -865,6 +892,7 @@ static struct device_attribute *hpsa_sdev_attrs[] = { &dev_attr_unique_id, &dev_attr_hp_ssd_smart_path_enabled, &dev_attr_path_info, + &dev_attr_sas_address, NULL, }; @@ -1637,9 +1665,8 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, for (j = 0; j < ndevices; j++) { if (dev[j] == NULL) continue; - if (dev[j]->devtype != TYPE_DISK) - continue; - if (dev[j]->devtype != TYPE_ZBC) + if (dev[j]->devtype != TYPE_DISK && + dev[j]->devtype != TYPE_ZBC) continue; if (is_logical_device(dev[j])) continue; @@ -1684,9 +1711,8 @@ static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, for (i = 0; i < ndevices; i++) { if (dev[i] == NULL) continue; - if (dev[i]->devtype != TYPE_DISK) - continue; - if (dev[i]->devtype != TYPE_ZBC) + if (dev[i]->devtype != TYPE_DISK && + dev[i]->devtype != TYPE_ZBC) continue; if (!is_logical_device(dev[i])) continue; @@ -1720,6 +1746,51 @@ static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) return rc; } +static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h, + struct hpsa_scsi_dev_t *dev) +{ + int i; + int count = 0; + + for (i = 0; i < h->nr_cmds; i++) { + struct CommandList *c = h->cmd_pool + i; + int refcount = atomic_inc_return(&c->refcount); + + if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, + dev->scsi3addr)) { + unsigned long flags; + + spin_lock_irqsave(&h->lock, flags); /* Implied MB */ + if (!hpsa_is_cmd_idle(c)) + ++count; + spin_unlock_irqrestore(&h->lock, flags); + } + + cmd_free(h, c); + } + + return count; +} + +static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h, + struct hpsa_scsi_dev_t *device) +{ + int cmds = 0; + int waits = 0; + + while (1) { + cmds = hpsa_find_outstanding_commands_for_dev(h, device); + if (cmds == 0) + break; + if (++waits > 20) + break; + dev_warn(&h->pdev->dev, + "%s: removing device with %d outstanding commands!\n", + __func__, cmds); + msleep(1000); + } +} + static void hpsa_remove_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) { @@ -1743,8 +1814,13 @@ static void hpsa_remove_device(struct ctlr_info *h, hpsa_show_dev_msg(KERN_WARNING, h, device, "didn't find device for removal."); } - } else /* HBA */ + } else { /* HBA */ + + device->removed = 1; + hpsa_wait_for_outstanding_commands_for_dev(h, device); + hpsa_remove_sas_device(device); + } } static void adjust_hpsa_scsi_table(struct ctlr_info *h, @@ -2146,7 +2222,8 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, static int handle_ioaccel_mode2_error(struct ctlr_info *h, struct CommandList *c, struct scsi_cmnd *cmd, - struct io_accel2_cmd *c2) + struct io_accel2_cmd *c2, + struct hpsa_scsi_dev_t *dev) { int data_len; int retry = 0; @@ -2210,8 +2287,27 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE: case IOACCEL2_STATUS_SR_INVALID_DEVICE: case IOACCEL2_STATUS_SR_IOACCEL_DISABLED: - /* We will get an event from ctlr to trigger rescan */ - retry = 1; + /* + * Did an HBA disk disappear? We will eventually + * get a state change event from the controller but + * in the meantime, we need to tell the OS that the + * HBA disk is no longer there and stop I/O + * from going down. This allows the potential re-insert + * of the disk to get the same device node. + */ + if (dev->physical_device && dev->expose_device) { + cmd->result = DID_NO_CONNECT << 16; + dev->removed = 1; + h->drv_req_rescan = 1; + dev_warn(&h->pdev->dev, + "%s: device is gone!\n", __func__); + } else + /* + * Retry by sending down the RAID path. + * We will get an event from ctlr to + * trigger rescan regardless. + */ + retry = 1; break; default: retry = 1; @@ -2335,13 +2431,15 @@ static void process_ioaccel2_completion(struct ctlr_info *h, c2->error_data.serv_response == IOACCEL2_SERV_RESPONSE_FAILURE) { if (c2->error_data.status == - IOACCEL2_STATUS_SR_IOACCEL_DISABLED) + IOACCEL2_STATUS_SR_IOACCEL_DISABLED) { dev->offload_enabled = 0; + dev->offload_to_be_enabled = 0; + } return hpsa_retry_cmd(h, c); } - if (handle_ioaccel_mode2_error(h, c, cmd, c2)) + if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev)) return hpsa_retry_cmd(h, c); return hpsa_cmd_free_and_done(h, c, cmd); @@ -2806,7 +2904,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, goto out; } rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_FROMDEVICE, NO_TIMEOUT); + PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); if (rc) goto out; ei = c->err_info; @@ -2832,7 +2930,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, /* fill_cmd can't fail here, no data buffer to map. */ (void) fill_cmd(c, reset_type, h, NULL, 0, 0, scsi3addr, TYPE_MSG); - rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); if (rc) { dev_warn(&h->pdev->dev, "Failed to send reset command\n"); goto out; @@ -3080,7 +3178,7 @@ static int hpsa_get_raid_map(struct ctlr_info *h, return -1; } rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_FROMDEVICE, NO_TIMEOUT); + PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); if (rc) goto out; ei = c->err_info; @@ -3123,7 +3221,7 @@ static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h, c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_FROMDEVICE, NO_TIMEOUT); + PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); if (rc) goto out; ei = c->err_info; @@ -3151,7 +3249,7 @@ static int hpsa_bmic_id_controller(struct ctlr_info *h, goto out; rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_FROMDEVICE, NO_TIMEOUT); + PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); if (rc) goto out; ei = c->err_info; @@ -3182,7 +3280,7 @@ static int hpsa_bmic_id_physical_device(struct ctlr_info *h, c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, - NO_TIMEOUT); + DEFAULT_TIMEOUT); ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); @@ -3250,7 +3348,7 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h, c->Request.CDB[5] = 0; rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, - NO_TIMEOUT); + DEFAULT_TIMEOUT); if (rc) goto out; @@ -3462,7 +3560,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, if (extended_response) c->Request.CDB[1] = extended_response; rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_FROMDEVICE, NO_TIMEOUT); + PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); if (rc) goto out; ei = c->err_info; @@ -3569,7 +3667,8 @@ static int hpsa_volume_offline(struct ctlr_info *h, c = cmd_alloc(h); (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); - rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); + rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, + DEFAULT_TIMEOUT); if (rc) { cmd_free(h, c); return 0; @@ -3644,7 +3743,8 @@ static int hpsa_device_supports_aborts(struct ctlr_info *h, c = cmd_alloc(h); (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG); - (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); + (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, + DEFAULT_TIMEOUT); /* no unmap needed here because no data xfer. */ ei = c->err_info; switch (ei->CommandStatus) { @@ -5234,6 +5334,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) dev = cmd->device->hostdata; if (!dev) { + cmd->result = NOT_READY << 16; /* host byte */ + cmd->scsi_done(cmd); + return 0; + } + + if (dev->removed) { cmd->result = DID_NO_CONNECT << 16; cmd->scsi_done(cmd); return 0; @@ -5414,7 +5520,7 @@ static int hpsa_send_test_unit_ready(struct ctlr_info *h, /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD); - rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); if (rc) return rc; /* no unmap needed here because no data xfer. */ @@ -5638,7 +5744,7 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, 0, 0, scsi3addr, TYPE_MSG); if (h->needs_abort_tags_swizzled) swizzle_abort_tag(&c->Request.CDB[4]); - (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); + (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); hpsa_get_tag(h, abort, &taglower, &tagupper); dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n", __func__, tagupper, taglower); @@ -5803,7 +5909,7 @@ static int hpsa_send_abort_ioaccel2(struct ctlr_info *h, c = cmd_alloc(h); setup_ioaccel2_abort_cmd(c, h, abort, reply_queue); c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; - (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); + (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); hpsa_get_tag(h, abort, &taglower, &tagupper); dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n", @@ -6348,7 +6454,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) c->SG[0].Len = cpu_to_le32(iocommand.buf_size); c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ } - rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); + rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, + DEFAULT_TIMEOUT); if (iocommand.buf_size > 0) hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); @@ -6480,7 +6587,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) } c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); } - status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); + status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, + DEFAULT_TIMEOUT); if (sg_used) hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); @@ -8254,8 +8362,10 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h) event_type = "configuration change"; /* Stop sending new RAID offload reqs via the IO accelerator */ scsi_block_requests(h->scsi_host); - for (i = 0; i < h->ndevices; i++) + for (i = 0; i < h->ndevices; i++) { h->dev[i]->offload_enabled = 0; + h->dev[i]->offload_to_be_enabled = 0; + } hpsa_drain_accel_commands(h); /* Set 'accelerator path config change' bit */ dev_warn(&h->pdev->dev, @@ -8541,11 +8651,6 @@ reinit_after_soft_reset: if (rc) goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */ - /* hook into SCSI subsystem */ - rc = hpsa_scsi_add_host(h); - if (rc) - goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ - /* create the resubmit workqueue */ h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); if (!h->rescan_ctlr_wq) { @@ -8642,6 +8747,11 @@ reinit_after_soft_reset: dev_info(&h->pdev->dev, "Can't track change to report lun data\n"); + /* hook into SCSI subsystem */ + rc = hpsa_scsi_add_host(h); + if (rc) + goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ + /* Monitor the controller for firmware lockups */ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); @@ -8703,7 +8813,7 @@ static void hpsa_flush_cache(struct ctlr_info *h) goto out; } rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_TODEVICE, NO_TIMEOUT); + PCI_DMA_TODEVICE, DEFAULT_TIMEOUT); if (rc) goto out; if (c->err_info->CommandStatus != 0) @@ -8742,7 +8852,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h) goto errout; rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_FROMDEVICE, NO_TIMEOUT); + PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); if ((rc != 0) || (c->err_info->CommandStatus != 0)) goto errout; @@ -8754,7 +8864,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h) goto errout; rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_TODEVICE, NO_TIMEOUT); + PCI_DMA_TODEVICE, DEFAULT_TIMEOUT); if ((rc != 0) || (c->err_info->CommandStatus != 0)) goto errout; @@ -8764,7 +8874,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h) goto errout; rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_FROMDEVICE, NO_TIMEOUT); + PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); if ((rc != 0) || (c->err_info->CommandStatus != 0)) goto errout; @@ -9602,6 +9712,7 @@ hpsa_sas_get_linkerrors(struct sas_phy *phy) static int hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) { + *identifier = 0; return 0; } diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index d06bb7417e36..a1487e67f7a1 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -63,6 +63,7 @@ struct hpsa_scsi_dev_t { unsigned char scsi3addr[8]; /* as presented to the HW */ u8 physical_device : 1; u8 expose_device; + u8 removed : 1; /* device is marked for death */ #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" unsigned char device_id[16]; /* from inquiry pg. 0x83 */ u64 sas_address; diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 6bffd91b973a..c051694bfcb0 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -2127,7 +2127,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) struct iscsi_conn *conn; struct iscsi_task *task; struct iscsi_tm *hdr; - int rc, age; + int age; cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; @@ -2188,10 +2188,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) hdr = &conn->tmhdr; iscsi_prep_abort_task_pdu(task, hdr); - if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) { - rc = FAILED; + if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) goto failed; - } switch (conn->tmf_state) { case TMF_SUCCESS: @@ -2423,7 +2421,7 @@ static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) * * This will attempt to send a warm target reset. */ -int iscsi_eh_target_reset(struct scsi_cmnd *sc) +static int iscsi_eh_target_reset(struct scsi_cmnd *sc) { struct iscsi_cls_session *cls_session; struct iscsi_session *session; @@ -2495,7 +2493,6 @@ done: mutex_unlock(&session->eh_mutex); return rc; } -EXPORT_SYMBOL_GPL(iscsi_eh_target_reset); /** * iscsi_eh_recover_target - reset target and possibly the session diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 90a3ca5a4dbd..d5bd420595c1 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -694,6 +694,7 @@ struct lpfc_hba { uint8_t wwnn[8]; uint8_t wwpn[8]; uint32_t RandomData[7]; + uint32_t fcp_embed_io; /* HBA Config Parameters */ uint32_t cfg_ack0; @@ -757,7 +758,6 @@ struct lpfc_hba { uint32_t cfg_fdmi_on; #define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */ #define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */ -#define LPFC_FDMI_SMART_SAN 2 /* SmartSAN supported */ uint32_t cfg_enable_SmartSAN; lpfc_vpd_t vpd; /* vital product data */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 343ae9482891..cfec2eca4dd3 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -4584,15 +4584,14 @@ LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality"); # lpfc_fdmi_on: Controls FDMI support. # 0 No FDMI support (default) # 1 Traditional FDMI support -# 2 Smart SAN support -# If lpfc_enable_SmartSAN is set 1, the driver sets lpfc_fdmi_on to value 2 -# overwriting the current value. If lpfc_enable_SmartSAN is set 0, the -# driver uses the current value of lpfc_fdmi_on provided it has value 0 or 1. -# A value of 2 with lpfc_enable_SmartSAN set to 0 causes the driver to -# set lpfc_fdmi_on back to 1. -# Value range [0,2]. Default value is 0. +# Traditional FDMI support means the driver will assume FDMI-2 support; +# however, if that fails, it will fallback to FDMI-1. +# If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. +# If lpfc_enable_SmartSAN is set 0, the driver uses the current value of +# lpfc_fdmi_on. +# Value range [0,1]. Default value is 0. */ -LPFC_ATTR_R(fdmi_on, 0, 0, 2, "Enable FDMI support"); +LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support"); /* # Specifies the maximum number of ELS cmds we can have outstanding (for @@ -5150,7 +5149,6 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport) sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); } - /* * Dynamic FC Host Attributes Support */ @@ -5857,14 +5855,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) else phba->cfg_poll = lpfc_poll; - /* Ensure fdmi_on and enable_SmartSAN don't conflict */ - if (phba->cfg_enable_SmartSAN) { - phba->cfg_fdmi_on = LPFC_FDMI_SMART_SAN; - } else { - if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN) - phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT; - } - phba->cfg_soft_wwnn = 0L; phba->cfg_soft_wwpn = 0L; lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 79e261d2a0c8..a38816e96654 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -2322,7 +2322,7 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; memset(ae, 0, 256); - strncpy(ae->un.AttrString, "Smart SAN Version 1.0", + strncpy(ae->un.AttrString, "Smart SAN Version 2.0", sizeof(ae->un.AttrString)); len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString)); @@ -2397,7 +2397,7 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, uint32_t size; ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - ae->un.AttrInt = cpu_to_be32(0); + ae->un.AttrInt = cpu_to_be32(1); size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); ad->AttrType = cpu_to_be16(RPRT_SMART_SECURITY); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 7f5abb8f52bc..0498f5760d2b 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -690,16 +690,17 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); if (fabric_param_changed) { /* Reset FDMI attribute masks based on config parameter */ - if (phba->cfg_fdmi_on == LPFC_FDMI_NO_SUPPORT) { - vport->fdmi_hba_mask = 0; - vport->fdmi_port_mask = 0; - } else { + if (phba->cfg_enable_SmartSAN || + (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { /* Setup appropriate attribute masks */ vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; - if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN) + if (phba->cfg_enable_SmartSAN) vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; else vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; + } else { + vport->fdmi_hba_mask = 0; + vport->fdmi_port_mask = 0; } } @@ -1069,7 +1070,10 @@ stop_rr_fcf_flogi: lpfc_sli4_unreg_all_rpis(vport); } } - lpfc_issue_reg_vfi(vport); + + /* Do not register VFI if the driver aborted FLOGI */ + if (!lpfc_error_lost_link(irsp)) + lpfc_issue_reg_vfi(vport); lpfc_nlp_put(ndlp); goto out; } @@ -4705,6 +4709,144 @@ lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, desc->length = cpu_to_be32(sizeof(desc->info)); } +void +lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, + struct lpfc_vport *vport) +{ + desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); + + desc->bbc_info.port_bbc = cpu_to_be32( + vport->fc_sparam.cmn.bbCreditMsb | + vport->fc_sparam.cmn.bbCreditlsb << 8); + if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) + desc->bbc_info.attached_port_bbc = cpu_to_be32( + vport->phba->fc_fabparam.cmn.bbCreditMsb | + vport->phba->fc_fabparam.cmn.bbCreditlsb << 8); + else + desc->bbc_info.attached_port_bbc = 0; + + desc->bbc_info.rtt = 0; + desc->length = cpu_to_be32(sizeof(desc->bbc_info)); +} + +void +lpfc_rdp_res_oed_temp_desc(struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) +{ + uint32_t flags; + + desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); + + desc->oed_info.hi_alarm = + cpu_to_be16(page_a2[SSF_TEMP_HIGH_ALARM]); + desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_TEMP_LOW_ALARM]); + desc->oed_info.hi_warning = + cpu_to_be16(page_a2[SSF_TEMP_HIGH_WARNING]); + desc->oed_info.lo_warning = + cpu_to_be16(page_a2[SSF_TEMP_LOW_WARNING]); + flags = 0xf; /* All four are valid */ + flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); + desc->oed_info.function_flags = cpu_to_be32(flags); + desc->length = cpu_to_be32(sizeof(desc->oed_info)); +} + +void +lpfc_rdp_res_oed_voltage_desc(struct fc_rdp_oed_sfp_desc *desc, + uint8_t *page_a2) +{ + uint32_t flags; + + desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); + + desc->oed_info.hi_alarm = + cpu_to_be16(page_a2[SSF_VOLTAGE_HIGH_ALARM]); + desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_VOLTAGE_LOW_ALARM]); + desc->oed_info.hi_warning = + cpu_to_be16(page_a2[SSF_VOLTAGE_HIGH_WARNING]); + desc->oed_info.lo_warning = + cpu_to_be16(page_a2[SSF_VOLTAGE_LOW_WARNING]); + flags = 0xf; /* All four are valid */ + flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); + desc->oed_info.function_flags = cpu_to_be32(flags); + desc->length = cpu_to_be32(sizeof(desc->oed_info)); +} + +void +lpfc_rdp_res_oed_txbias_desc(struct fc_rdp_oed_sfp_desc *desc, + uint8_t *page_a2) +{ + uint32_t flags; + + desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); + + desc->oed_info.hi_alarm = + cpu_to_be16(page_a2[SSF_BIAS_HIGH_ALARM]); + desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_BIAS_LOW_ALARM]); + desc->oed_info.hi_warning = + cpu_to_be16(page_a2[SSF_BIAS_HIGH_WARNING]); + desc->oed_info.lo_warning = + cpu_to_be16(page_a2[SSF_BIAS_LOW_WARNING]); + flags = 0xf; /* All four are valid */ + flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); + desc->oed_info.function_flags = cpu_to_be32(flags); + desc->length = cpu_to_be32(sizeof(desc->oed_info)); +} + +void +lpfc_rdp_res_oed_txpower_desc(struct fc_rdp_oed_sfp_desc *desc, + uint8_t *page_a2) +{ + uint32_t flags; + + desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); + + desc->oed_info.hi_alarm = + cpu_to_be16(page_a2[SSF_TXPOWER_HIGH_ALARM]); + desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_TXPOWER_LOW_ALARM]); + desc->oed_info.hi_warning = + cpu_to_be16(page_a2[SSF_TXPOWER_HIGH_WARNING]); + desc->oed_info.lo_warning = + cpu_to_be16(page_a2[SSF_TXPOWER_LOW_WARNING]); + flags = 0xf; /* All four are valid */ + flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); + desc->oed_info.function_flags = cpu_to_be32(flags); + desc->length = cpu_to_be32(sizeof(desc->oed_info)); +} + + +void +lpfc_rdp_res_oed_rxpower_desc(struct fc_rdp_oed_sfp_desc *desc, + uint8_t *page_a2) +{ + uint32_t flags; + + desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); + + desc->oed_info.hi_alarm = + cpu_to_be16(page_a2[SSF_RXPOWER_HIGH_ALARM]); + desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_RXPOWER_LOW_ALARM]); + desc->oed_info.hi_warning = + cpu_to_be16(page_a2[SSF_RXPOWER_HIGH_WARNING]); + desc->oed_info.lo_warning = + cpu_to_be16(page_a2[SSF_RXPOWER_LOW_WARNING]); + flags = 0xf; /* All four are valid */ + flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); + desc->oed_info.function_flags = cpu_to_be32(flags); + desc->length = cpu_to_be32(sizeof(desc->oed_info)); +} + +void +lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, + uint8_t *page_a0, struct lpfc_vport *vport) +{ + desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); + memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); + memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); + memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); + memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 2); + memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); + desc->length = cpu_to_be32(sizeof(desc->opd_info)); +} + int lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) { @@ -4776,6 +4918,8 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) if (rdp_cap == 0) rdp_cap = RDP_CAP_UNKNOWN; + if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) + rdp_cap |= RDP_CAP_USER_CONFIGURED; desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); desc->length = cpu_to_be32(sizeof(desc->info)); @@ -4875,6 +5019,19 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, lpfc_rdp_res_diag_port_names(&rdp_res->diag_port_names_desc, phba); lpfc_rdp_res_attach_port_names(&rdp_res->attached_port_names_desc, vport, ndlp); + lpfc_rdp_res_bbc_desc(&rdp_res->bbc_desc, &rdp_context->link_stat, + vport); + lpfc_rdp_res_oed_temp_desc(&rdp_res->oed_temp_desc, + rdp_context->page_a2); + lpfc_rdp_res_oed_voltage_desc(&rdp_res->oed_voltage_desc, + rdp_context->page_a2); + lpfc_rdp_res_oed_txbias_desc(&rdp_res->oed_txbias_desc, + rdp_context->page_a2); + lpfc_rdp_res_oed_txpower_desc(&rdp_res->oed_txpower_desc, + rdp_context->page_a2); + lpfc_rdp_res_oed_rxpower_desc(&rdp_res->oed_rxpower_desc, + rdp_context->page_a2); + lpfc_rdp_res_opd_desc(&rdp_res->opd_desc, rdp_context->page_a0, vport); fec_size = lpfc_rdp_res_fec_desc(&rdp_res->fec_desc, &rdp_context->link_stat); rdp_res->length = cpu_to_be32(fec_size + RDP_DESC_PAYLOAD_SIZE); @@ -7849,8 +8006,9 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) return; } - if ((phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) && - (vport->load_flag & FC_ALLOW_FDMI)) + if ((phba->cfg_enable_SmartSAN || + (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && + (vport->load_flag & FC_ALLOW_FDMI)) lpfc_start_fdmi(vport); } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 25b5dcd1a5c8..ed223937798a 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -4545,7 +4545,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) (!(vport->load_flag & FC_UNLOADING)) && (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == - LPFC_SLI_INTF_IF_TYPE_2)) { + LPFC_SLI_INTF_IF_TYPE_2) && + (atomic_read(&ndlp->kref.refcount) > 0)) { mbox->context1 = lpfc_nlp_get(ndlp); mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr; diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index dd20412c7e4c..39f0fd000d2c 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -1134,9 +1134,10 @@ struct fc_rdp_link_error_status_desc { #define RDP_PS_16GB 0x0400 #define RDP_PS_32GB 0x0200 -#define RDP_CAP_UNKNOWN 0x0001 -#define RDP_PS_UNKNOWN 0x0002 -#define RDP_PS_NOT_ESTABLISHED 0x0001 +#define RDP_CAP_USER_CONFIGURED 0x0002 +#define RDP_CAP_UNKNOWN 0x0001 +#define RDP_PS_UNKNOWN 0x0002 +#define RDP_PS_NOT_ESTABLISHED 0x0001 struct fc_rdp_port_speed { uint16_t capabilities; @@ -1192,6 +1193,58 @@ struct fc_rdp_sfp_desc { struct fc_rdp_sfp_info sfp_info; }; +/* Buffer Credit Descriptor */ +struct fc_rdp_bbc_info { + uint32_t port_bbc; /* FC_Port buffer-to-buffer credit */ + uint32_t attached_port_bbc; + uint32_t rtt; /* Round trip time */ +}; +#define RDP_BBC_DESC_TAG 0x00010006 +struct fc_rdp_bbc_desc { + uint32_t tag; + uint32_t length; + struct fc_rdp_bbc_info bbc_info; +}; + +#define RDP_OED_TEMPERATURE 0x1 +#define RDP_OED_VOLTAGE 0x2 +#define RDP_OED_TXBIAS 0x3 +#define RDP_OED_TXPOWER 0x4 +#define RDP_OED_RXPOWER 0x5 + +#define RDP_OED_TYPE_SHIFT 28 +/* Optical Element Data descriptor */ +struct fc_rdp_oed_info { + uint16_t hi_alarm; + uint16_t lo_alarm; + uint16_t hi_warning; + uint16_t lo_warning; + uint32_t function_flags; +}; +#define RDP_OED_DESC_TAG 0x00010007 +struct fc_rdp_oed_sfp_desc { + uint32_t tag; + uint32_t length; + struct fc_rdp_oed_info oed_info; +}; + +/* Optical Product Data descriptor */ +struct fc_rdp_opd_sfp_info { + uint8_t vendor_name[16]; + uint8_t model_number[16]; + uint8_t serial_number[16]; + uint8_t reserved[2]; + uint8_t revision[2]; + uint8_t date[8]; +}; + +#define RDP_OPD_DESC_TAG 0x00010008 +struct fc_rdp_opd_sfp_desc { + uint32_t tag; + uint32_t length; + struct fc_rdp_opd_sfp_info opd_info; +}; + struct fc_rdp_req_frame { uint32_t rdp_command; /* ELS command opcode (0x18)*/ uint32_t rdp_des_length; /* RDP Payload Word 1 */ @@ -1208,7 +1261,14 @@ struct fc_rdp_res_frame { struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13-21 */ struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22-27 */ struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28-33 */ - struct fc_fec_rdp_desc fec_desc; /* FC Word 34 - 37 */ + struct fc_rdp_bbc_desc bbc_desc; /* FC Word 34-38*/ + struct fc_rdp_oed_sfp_desc oed_temp_desc; /* FC Word 39-43*/ + struct fc_rdp_oed_sfp_desc oed_voltage_desc; /* FC word 44-48*/ + struct fc_rdp_oed_sfp_desc oed_txbias_desc; /* FC word 49-53*/ + struct fc_rdp_oed_sfp_desc oed_txpower_desc; /* FC word 54-58*/ + struct fc_rdp_oed_sfp_desc oed_rxpower_desc; /* FC word 59-63*/ + struct fc_rdp_opd_sfp_desc opd_desc; /* FC word 64-80*/ + struct fc_fec_rdp_desc fec_desc; /* FC word 81-84*/ }; @@ -1216,7 +1276,10 @@ struct fc_rdp_res_frame { + sizeof(struct fc_rdp_sfp_desc) \ + sizeof(struct fc_rdp_port_speed_desc) \ + sizeof(struct fc_rdp_link_error_status_desc) \ - + (sizeof(struct fc_rdp_port_name_desc) * 2)) + + (sizeof(struct fc_rdp_port_name_desc) * 2) \ + + sizeof(struct fc_rdp_bbc_desc) \ + + (sizeof(struct fc_rdp_oed_sfp_desc) * 5) \ + + sizeof(struct fc_rdp_opd_sfp_desc)) /******** FDMI ********/ diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 608f9415fb08..0c7070bf2813 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2015 Emulex. All rights reserved. * + * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -2557,7 +2557,26 @@ struct lpfc_mbx_memory_dump_type3 { /* SFF-8472 Table 3.1a Diagnostics: Data Fields Address/Page A2 */ -#define SSF_AW_THRESHOLDS 0 +#define SSF_TEMP_HIGH_ALARM 0 +#define SSF_TEMP_LOW_ALARM 2 +#define SSF_TEMP_HIGH_WARNING 4 +#define SSF_TEMP_LOW_WARNING 6 +#define SSF_VOLTAGE_HIGH_ALARM 8 +#define SSF_VOLTAGE_LOW_ALARM 10 +#define SSF_VOLTAGE_HIGH_WARNING 12 +#define SSF_VOLTAGE_LOW_WARNING 14 +#define SSF_BIAS_HIGH_ALARM 16 +#define SSF_BIAS_LOW_ALARM 18 +#define SSF_BIAS_HIGH_WARNING 20 +#define SSF_BIAS_LOW_WARNING 22 +#define SSF_TXPOWER_HIGH_ALARM 24 +#define SSF_TXPOWER_LOW_ALARM 26 +#define SSF_TXPOWER_HIGH_WARNING 28 +#define SSF_TXPOWER_LOW_WARNING 30 +#define SSF_RXPOWER_HIGH_ALARM 32 +#define SSF_RXPOWER_LOW_ALARM 34 +#define SSF_RXPOWER_HIGH_WARNING 36 +#define SSF_RXPOWER_LOW_WARNING 38 #define SSF_EXT_CAL_CONSTANTS 56 #define SSF_CC_DMI 95 #define SFF_TEMPERATURE_B1 96 @@ -2865,6 +2884,9 @@ struct lpfc_sli4_parameters { uint32_t word17; uint32_t word18; uint32_t word19; +#define cfg_ext_embed_cb_SHIFT 0 +#define cfg_ext_embed_cb_MASK 0x00000001 +#define cfg_ext_embed_cb_WORD word19 }; struct lpfc_mbx_get_sli4_parameters { @@ -3919,6 +3941,9 @@ union lpfc_wqe { union lpfc_wqe128 { uint32_t words[32]; struct lpfc_wqe_generic generic; + struct fcp_icmnd64_wqe fcp_icmd; + struct fcp_iread64_wqe fcp_iread; + struct fcp_iwrite64_wqe fcp_iwrite; struct xmit_seq64_wqe xmit_sequence; struct gen_req64_wqe gen_req; }; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index f57d02c3b6cf..b43f7ac9812c 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -6158,11 +6158,12 @@ lpfc_create_shost(struct lpfc_hba *phba) * any initial discovery should be completed. */ vport->load_flag |= FC_ALLOW_FDMI; - if (phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) { + if (phba->cfg_enable_SmartSAN || + (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { /* Setup appropriate attribute masks */ vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; - if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN) + if (phba->cfg_enable_SmartSAN) vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; else vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; @@ -7264,8 +7265,15 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.fcp_cq[idx] = qdesc; /* Create Fast Path FCP WQs */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, - phba->sli4_hba.wq_ecount); + if (phba->fcp_embed_io) { + qdesc = lpfc_sli4_queue_alloc(phba, + LPFC_WQE128_SIZE, + LPFC_WQE128_DEF_COUNT); + } else { + qdesc = lpfc_sli4_queue_alloc(phba, + phba->sli4_hba.wq_esize, + phba->sli4_hba.wq_ecount); + } if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0503 Failed allocate fast-path FCP " @@ -9510,6 +9518,15 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; + /* + * Issue IOs with CDB embedded in WQE to minimized the number + * of DMAs the firmware has to do. Setting this to 1 also forces + * the driver to use 128 bytes WQEs for FCP IOs. + */ + if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) + phba->fcp_embed_io = 1; + else + phba->fcp_embed_io = 0; return 0; } diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index f87f90e9b7df..12dbe99ccc50 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -2145,10 +2145,12 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); reg_vfi->e_d_tov = phba->fc_edtov; reg_vfi->r_a_tov = phba->fc_ratov; - reg_vfi->bde.addrHigh = putPaddrHigh(phys); - reg_vfi->bde.addrLow = putPaddrLow(phys); - reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); - reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + if (phys) { + reg_vfi->bde.addrHigh = putPaddrHigh(phys); + reg_vfi->bde.addrLow = putPaddrLow(phys); + reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); + reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + } bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID); /* Only FC supports upd bit */ diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 4fb3581d4614..3fa65338d3f5 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -231,13 +231,15 @@ lpfc_mem_free(struct lpfc_hba *phba) if (phba->lpfc_hbq_pool) pci_pool_destroy(phba->lpfc_hbq_pool); phba->lpfc_hbq_pool = NULL; - mempool_destroy(phba->rrq_pool); + + if (phba->rrq_pool) + mempool_destroy(phba->rrq_pool); phba->rrq_pool = NULL; /* Free NLP memory pool */ mempool_destroy(phba->nlp_mem_pool); phba->nlp_mem_pool = NULL; - if (phba->sli_rev == LPFC_SLI_REV4) { + if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { mempool_destroy(phba->active_rrq_pool); phba->active_rrq_pool = NULL; } diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 193733e8c823..56a3df4fddb0 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -1512,6 +1512,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, if ((mb = phba->sli.mbox_active)) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { + ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; lpfc_nlp_put(ndlp); mb->context2 = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -1527,6 +1528,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, __lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } + ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; lpfc_nlp_put(ndlp); list_del(&mb->list); phba->sli.mboxq_cnt--; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 2207726b88ee..70edf21ae1b9 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -2000,10 +2000,9 @@ lpfc_sli_hbqbuf_get(struct list_head *rb_list) * @phba: Pointer to HBA context object. * @tag: Tag of the hbq buffer. * - * This function is called with hbalock held. This function searches - * for the hbq buffer associated with the given tag in the hbq buffer - * list. If it finds the hbq buffer, it returns the hbq_buffer other wise - * it returns NULL. + * This function searches for the hbq buffer associated with the given tag in + * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer + * otherwise it returns NULL. **/ static struct hbq_dmabuf * lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) @@ -2012,8 +2011,6 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) struct hbq_dmabuf *hbq_buf; uint32_t hbqno; - lockdep_assert_held(&phba->hbalock); - hbqno = tag >> 16; if (hbqno >= LPFC_MAX_HBQS) return NULL; @@ -2211,6 +2208,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) rpi = pmb->u.mb.un.varWords[0]; vpi = pmb->u.mb.un.varRegLogin.vpi; lpfc_unreg_login(phba, vpi, rpi, pmb); + pmb->vport = vport; pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc != MBX_NOT_FINISHED) @@ -4688,6 +4686,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) break; } + phba->fcp_embed_io = 0; /* SLI4 FC support only */ rc = lpfc_sli_config_port(phba, mode); @@ -6320,10 +6319,12 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) mqe = &mboxq->u.mqe; phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); - if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) + if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { phba->hba_flag |= HBA_FCOE_MODE; - else + phba->fcp_embed_io = 0; /* SLI4 FC support only */ + } else { phba->hba_flag &= ~HBA_FCOE_MODE; + } if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == LPFC_DCBX_CEE_MODE) @@ -8218,12 +8219,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, else command_type = ELS_COMMAND_NON_FIP; + if (phba->fcp_embed_io) + memset(wqe, 0, sizeof(union lpfc_wqe128)); /* Some of the fields are in the right position already */ memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); - abort_tag = (uint32_t) iocbq->iotag; - xritag = iocbq->sli4_xritag; wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ wqe->generic.wqe_com.word10 = 0; + + abort_tag = (uint32_t) iocbq->iotag; + xritag = iocbq->sli4_xritag; /* words0-2 bpl convert bde */ if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / @@ -8372,11 +8376,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, iocbq->iocb.ulpFCP2Rcvy); bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); /* Always open the exchange */ - bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4); - bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); if (iocbq->iocb_flag & LPFC_IO_OAS) { @@ -8387,6 +8389,35 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, (phba->cfg_XLanePriority << 1)); } } + /* Note, word 10 is already initialized to 0 */ + + if (phba->fcp_embed_io) { + struct lpfc_scsi_buf *lpfc_cmd; + struct sli4_sge *sgl; + union lpfc_wqe128 *wqe128; + struct fcp_cmnd *fcp_cmnd; + uint32_t *ptr; + + /* 128 byte wqe support here */ + wqe128 = (union lpfc_wqe128 *)wqe; + + lpfc_cmd = iocbq->context1; + sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; + fcp_cmnd = lpfc_cmd->fcp_cmnd; + + /* Word 0-2 - FCP_CMND */ + wqe128->generic.bde.tus.f.bdeFlags = + BUFF_TYPE_BDE_IMMED; + wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len; + wqe128->generic.bde.addrHigh = 0; + wqe128->generic.bde.addrLow = 88; /* Word 22 */ + + bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1); + + /* Word 22-29 FCP CMND Payload */ + ptr = &wqe128->words[22]; + memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); + } break; case CMD_FCP_IREAD64_CR: /* word3 iocb=iotag wqe=payload_offset_len */ @@ -8401,11 +8432,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, iocbq->iocb.ulpFCP2Rcvy); bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); /* Always open the exchange */ - bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4); - bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); if (iocbq->iocb_flag & LPFC_IO_OAS) { @@ -8416,6 +8445,35 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, (phba->cfg_XLanePriority << 1)); } } + /* Note, word 10 is already initialized to 0 */ + + if (phba->fcp_embed_io) { + struct lpfc_scsi_buf *lpfc_cmd; + struct sli4_sge *sgl; + union lpfc_wqe128 *wqe128; + struct fcp_cmnd *fcp_cmnd; + uint32_t *ptr; + + /* 128 byte wqe support here */ + wqe128 = (union lpfc_wqe128 *)wqe; + + lpfc_cmd = iocbq->context1; + sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; + fcp_cmnd = lpfc_cmd->fcp_cmnd; + + /* Word 0-2 - FCP_CMND */ + wqe128->generic.bde.tus.f.bdeFlags = + BUFF_TYPE_BDE_IMMED; + wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len; + wqe128->generic.bde.addrHigh = 0; + wqe128->generic.bde.addrLow = 88; /* Word 22 */ + + bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1); + + /* Word 22-29 FCP CMND Payload */ + ptr = &wqe128->words[22]; + memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); + } break; case CMD_FCP_ICMND64_CR: /* word3 iocb=iotag wqe=payload_offset_len */ @@ -8427,13 +8485,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, /* word3 iocb=IO_TAG wqe=reserved */ bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); /* Always open the exchange */ - bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE); - bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, iocbq->iocb.ulpFCP2Rcvy); if (iocbq->iocb_flag & LPFC_IO_OAS) { @@ -8444,6 +8500,35 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, (phba->cfg_XLanePriority << 1)); } } + /* Note, word 10 is already initialized to 0 */ + + if (phba->fcp_embed_io) { + struct lpfc_scsi_buf *lpfc_cmd; + struct sli4_sge *sgl; + union lpfc_wqe128 *wqe128; + struct fcp_cmnd *fcp_cmnd; + uint32_t *ptr; + + /* 128 byte wqe support here */ + wqe128 = (union lpfc_wqe128 *)wqe; + + lpfc_cmd = iocbq->context1; + sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; + fcp_cmnd = lpfc_cmd->fcp_cmnd; + + /* Word 0-2 - FCP_CMND */ + wqe128->generic.bde.tus.f.bdeFlags = + BUFF_TYPE_BDE_IMMED; + wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len; + wqe128->generic.bde.addrHigh = 0; + wqe128->generic.bde.addrLow = 88; /* Word 22 */ + + bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1); + + /* Word 22-29 FCP CMND Payload */ + ptr = &wqe128->words[22]; + memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); + } break; case CMD_GEN_REQUEST64_CR: /* For this command calculate the xmit length of the @@ -8675,12 +8760,19 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { struct lpfc_sglq *sglq; - union lpfc_wqe wqe; + union lpfc_wqe *wqe; + union lpfc_wqe128 wqe128; struct lpfc_queue *wq; struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; lockdep_assert_held(&phba->hbalock); + /* + * The WQE can be either 64 or 128 bytes, + * so allocate space on the stack assuming the largest. + */ + wqe = (union lpfc_wqe *)&wqe128; + if (piocb->sli4_xritag == NO_XRI) { if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) @@ -8727,7 +8819,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, return IOCB_ERROR; } - if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) + if (lpfc_sli4_iocb2wqe(phba, piocb, wqe)) return IOCB_ERROR; if ((piocb->iocb_flag & LPFC_IO_FCP) || @@ -8737,12 +8829,12 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, } else { wq = phba->sli4_hba.oas_wq; } - if (lpfc_sli4_wq_put(wq, &wqe)) + if (lpfc_sli4_wq_put(wq, wqe)) return IOCB_ERROR; } else { if (unlikely(!phba->sli4_hba.els_wq)) return IOCB_ERROR; - if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) + if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe)) return IOCB_ERROR; } lpfc_sli_ringtxcmpl_put(phba, pring, piocb); @@ -8757,9 +8849,9 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, * pointer from the lpfc_hba struct. * * Return codes: - * IOCB_ERROR - Error - * IOCB_SUCCESS - Success - * IOCB_BUSY - Busy + * IOCB_ERROR - Error + * IOCB_SUCCESS - Success + * IOCB_BUSY - Busy **/ int __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 4dc22562aaf1..fa0d531bf351 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2015 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "11.0.0.10." +#define LPFC_DRIVER_VERSION "11.1.0.0." #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ @@ -30,4 +30,4 @@ #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ LPFC_DRIVER_VERSION -#define LPFC_COPYRIGHT "Copyright(c) 2004-2015 Emulex. All rights reserved." +#define LPFC_COPYRIGHT "Copyright(c) 2004-2016 Emulex. All rights reserved." diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index b3f85def18cc..c27f4b724547 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2013 Emulex. All rights reserved. * + * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -395,7 +395,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) /* At this point we are fully registered with SCSI Layer. */ vport->load_flag |= FC_ALLOW_FDMI; - if (phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) { + if (phba->cfg_enable_SmartSAN || + (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { /* Setup appropriate attribute masks */ vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask; vport->fdmi_port_mask = phba->pport->fdmi_port_mask; diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index bb2381314a2b..a590089b9397 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -28,24 +28,23 @@ /* Definitions for the core NCR5380 driver. */ -#define PSEUDO_DMA - -#define NCR5380_implementation_fields unsigned char *pdma_base +#define NCR5380_implementation_fields unsigned char *pdma_base; \ + int pdma_residual #define NCR5380_read(reg) macscsi_read(instance, reg) #define NCR5380_write(reg, value) macscsi_write(instance, reg, value) -#define NCR5380_pread macscsi_pread -#define NCR5380_pwrite macscsi_pwrite -#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) +#define NCR5380_dma_xfer_len(instance, cmd, phase) \ + macscsi_dma_xfer_len(instance, cmd) +#define NCR5380_dma_recv_setup macscsi_pread +#define NCR5380_dma_send_setup macscsi_pwrite +#define NCR5380_dma_residual(instance) (hostdata->pdma_residual) #define NCR5380_intr macscsi_intr #define NCR5380_queue_command macscsi_queue_command #define NCR5380_abort macscsi_abort #define NCR5380_bus_reset macscsi_bus_reset #define NCR5380_info macscsi_info -#define NCR5380_show_info macscsi_show_info -#define NCR5380_write_info macscsi_write_info #include "NCR5380.h" @@ -57,8 +56,6 @@ static int setup_sg_tablesize = -1; module_param(setup_sg_tablesize, int, 0); static int setup_use_pdma = -1; module_param(setup_use_pdma, int, 0); -static int setup_use_tagged_queuing = -1; -module_param(setup_use_tagged_queuing, int, 0); static int setup_hostid = -1; module_param(setup_hostid, int, 0); static int setup_toshiba_delay = -1; @@ -97,8 +94,7 @@ static int __init mac_scsi_setup(char *str) setup_sg_tablesize = ints[3]; if (ints[0] >= 4) setup_hostid = ints[4]; - if (ints[0] >= 5) - setup_use_tagged_queuing = ints[5]; + /* ints[5] (use_tagged_queuing) is ignored */ if (ints[0] >= 6) setup_use_pdma = ints[6]; if (ints[0] >= 7) @@ -109,19 +105,9 @@ static int __init mac_scsi_setup(char *str) __setup("mac5380=", mac_scsi_setup); #endif /* !MODULE */ -#ifdef PSEUDO_DMA -/* - Pseudo-DMA: (Ove Edlund) - The code attempts to catch bus errors that occur if one for example - "trips over the cable". - XXX: Since bus errors in the PDMA routines never happen on my - computer, the bus error code is untested. - If the code works as intended, a bus error results in Pseudo-DMA - being disabled, meaning that the driver switches to slow handshake. - If bus errors are NOT extremely rare, this has to be changed. -*/ - -#define CP_IO_TO_MEM(s,d,len) \ +/* Pseudo DMA asm originally by Ove Edlund */ + +#define CP_IO_TO_MEM(s,d,n) \ __asm__ __volatile__ \ (" cmp.w #4,%2\n" \ " bls 8f\n" \ @@ -158,61 +144,73 @@ __asm__ __volatile__ \ " 9: \n" \ ".section .fixup,\"ax\"\n" \ " .even\n" \ - "90: moveq.l #1, %2\n" \ + "91: moveq.l #1, %2\n" \ + " jra 9b\n" \ + "94: moveq.l #4, %2\n" \ " jra 9b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 4\n" \ - " .long 1b,90b\n" \ - " .long 3b,90b\n" \ - " .long 31b,90b\n" \ - " .long 32b,90b\n" \ - " .long 33b,90b\n" \ - " .long 34b,90b\n" \ - " .long 35b,90b\n" \ - " .long 36b,90b\n" \ - " .long 37b,90b\n" \ - " .long 5b,90b\n" \ - " .long 7b,90b\n" \ + " .long 1b,91b\n" \ + " .long 3b,94b\n" \ + " .long 31b,94b\n" \ + " .long 32b,94b\n" \ + " .long 33b,94b\n" \ + " .long 34b,94b\n" \ + " .long 35b,94b\n" \ + " .long 36b,94b\n" \ + " .long 37b,94b\n" \ + " .long 5b,94b\n" \ + " .long 7b,91b\n" \ ".previous" \ - : "=a"(s), "=a"(d), "=d"(len) \ - : "0"(s), "1"(d), "2"(len) \ + : "=a"(s), "=a"(d), "=d"(n) \ + : "0"(s), "1"(d), "2"(n) \ : "d0") static int macscsi_pread(struct Scsi_Host *instance, unsigned char *dst, int len) { struct NCR5380_hostdata *hostdata = shost_priv(instance); - unsigned char *d; - unsigned char *s; - - s = hostdata->pdma_base + (INPUT_DATA_REG << 4); - d = dst; - - /* These conditions are derived from MacOS */ - - while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && - !(NCR5380_read(STATUS_REG) & SR_REQ)) - ; - - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && - (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { - pr_err("Error in macscsi_pread\n"); - return -1; + unsigned char *s = hostdata->pdma_base + (INPUT_DATA_REG << 4); + unsigned char *d = dst; + int n = len; + int transferred; + + while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, + BASR_DRQ | BASR_PHASE_MATCH, + BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) { + CP_IO_TO_MEM(s, d, n); + + transferred = d - dst - n; + hostdata->pdma_residual = len - transferred; + + /* No bus error. */ + if (n == 0) + return 0; + + /* Target changed phase early? */ + if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ, + BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0) + scmd_printk(KERN_ERR, hostdata->connected, + "%s: !REQ and !ACK\n", __func__); + if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) + return 0; + + dsprintk(NDEBUG_PSEUDO_DMA, instance, + "%s: bus error (%d/%d)\n", __func__, transferred, len); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); + d = dst + transferred; + n = len - transferred; } - CP_IO_TO_MEM(s, d, len); - - if (len != 0) { - pr_notice("Bus error in macscsi_pread\n"); - return -1; - } - - return 0; + scmd_printk(KERN_ERR, hostdata->connected, + "%s: phase mismatch or !DRQ\n", __func__); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); + return -1; } -#define CP_MEM_TO_IO(s,d,len) \ +#define CP_MEM_TO_IO(s,d,n) \ __asm__ __volatile__ \ (" cmp.w #4,%2\n" \ " bls 8f\n" \ @@ -249,59 +247,89 @@ __asm__ __volatile__ \ " 9: \n" \ ".section .fixup,\"ax\"\n" \ " .even\n" \ - "90: moveq.l #1, %2\n" \ + "91: moveq.l #1, %2\n" \ + " jra 9b\n" \ + "94: moveq.l #4, %2\n" \ " jra 9b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 4\n" \ - " .long 1b,90b\n" \ - " .long 3b,90b\n" \ - " .long 31b,90b\n" \ - " .long 32b,90b\n" \ - " .long 33b,90b\n" \ - " .long 34b,90b\n" \ - " .long 35b,90b\n" \ - " .long 36b,90b\n" \ - " .long 37b,90b\n" \ - " .long 5b,90b\n" \ - " .long 7b,90b\n" \ + " .long 1b,91b\n" \ + " .long 3b,94b\n" \ + " .long 31b,94b\n" \ + " .long 32b,94b\n" \ + " .long 33b,94b\n" \ + " .long 34b,94b\n" \ + " .long 35b,94b\n" \ + " .long 36b,94b\n" \ + " .long 37b,94b\n" \ + " .long 5b,94b\n" \ + " .long 7b,91b\n" \ ".previous" \ - : "=a"(s), "=a"(d), "=d"(len) \ - : "0"(s), "1"(d), "2"(len) \ + : "=a"(s), "=a"(d), "=d"(n) \ + : "0"(s), "1"(d), "2"(n) \ : "d0") static int macscsi_pwrite(struct Scsi_Host *instance, unsigned char *src, int len) { struct NCR5380_hostdata *hostdata = shost_priv(instance); - unsigned char *s; - unsigned char *d; - - s = src; - d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4); - - /* These conditions are derived from MacOS */ + unsigned char *s = src; + unsigned char *d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4); + int n = len; + int transferred; + + while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, + BASR_DRQ | BASR_PHASE_MATCH, + BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) { + CP_MEM_TO_IO(s, d, n); + + transferred = s - src - n; + hostdata->pdma_residual = len - transferred; + + /* Target changed phase early? */ + if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ, + BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0) + scmd_printk(KERN_ERR, hostdata->connected, + "%s: !REQ and !ACK\n", __func__); + if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) + return 0; + + /* No bus error. */ + if (n == 0) { + if (NCR5380_poll_politely(instance, TARGET_COMMAND_REG, + TCR_LAST_BYTE_SENT, + TCR_LAST_BYTE_SENT, HZ / 64) < 0) + scmd_printk(KERN_ERR, hostdata->connected, + "%s: Last Byte Sent timeout\n", __func__); + return 0; + } + + dsprintk(NDEBUG_PSEUDO_DMA, instance, + "%s: bus error (%d/%d)\n", __func__, transferred, len); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); + s = src + transferred; + n = len - transferred; + } - while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && - (!(NCR5380_read(STATUS_REG) & SR_REQ) || - (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))) - ; + scmd_printk(KERN_ERR, hostdata->connected, + "%s: phase mismatch or !DRQ\n", __func__); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance); - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)) { - pr_err("Error in macscsi_pwrite\n"); - return -1; - } + return -1; +} - CP_MEM_TO_IO(s, d, len); +static int macscsi_dma_xfer_len(struct Scsi_Host *instance, + struct scsi_cmnd *cmd) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); - if (len != 0) { - pr_notice("Bus error in macscsi_pwrite\n"); - return -1; - } + if (hostdata->flags & FLAG_NO_PSEUDO_DMA || + cmd->SCp.this_residual < 16) + return 0; - return 0; + return cmd->SCp.this_residual; } -#endif #include "NCR5380.c" @@ -311,8 +339,6 @@ static int macscsi_pwrite(struct Scsi_Host *instance, static struct scsi_host_template mac_scsi_template = { .module = THIS_MODULE, .proc_name = DRV_MODULE_NAME, - .show_info = macscsi_show_info, - .write_info = macscsi_write_info, .name = "Macintosh NCR5380 SCSI", .info = macscsi_info, .queuecommand = macscsi_queue_command, @@ -320,7 +346,7 @@ static struct scsi_host_template mac_scsi_template = { .eh_bus_reset_handler = macscsi_bus_reset, .can_queue = 16, .this_id = 7, - .sg_tablesize = SG_ALL, + .sg_tablesize = 1, .cmd_per_lun = 2, .use_clustering = DISABLE_CLUSTERING, .cmd_size = NCR5380_CMD_SIZE, @@ -338,9 +364,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev) if (!pio_mem) return -ENODEV; -#ifdef PSEUDO_DMA pdma_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); -#endif irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); @@ -358,8 +382,6 @@ static int __init mac_scsi_probe(struct platform_device *pdev) mac_scsi_template.sg_tablesize = setup_sg_tablesize; if (setup_hostid >= 0) mac_scsi_template.this_id = setup_hostid & 7; - if (setup_use_pdma < 0) - setup_use_pdma = 0; instance = scsi_host_alloc(&mac_scsi_template, sizeof(struct NCR5380_hostdata)); @@ -379,12 +401,9 @@ static int __init mac_scsi_probe(struct platform_device *pdev) } else host_flags |= FLAG_NO_PSEUDO_DMA; -#ifdef SUPPORT_TAGS - host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0; -#endif host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0; - error = NCR5380_init(instance, host_flags); + error = NCR5380_init(instance, host_flags | FLAG_LATE_DMA_SETUP); if (error) goto fail_init; diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index fce414a2cd76..ca86c885dfaa 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -35,8 +35,8 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "06.810.09.00-rc1" -#define MEGASAS_RELDATE "Jan. 28, 2016" +#define MEGASAS_VERSION "06.811.02.00-rc1" +#define MEGASAS_RELDATE "April 12, 2016" /* * Device IDs @@ -1344,6 +1344,8 @@ struct megasas_ctrl_info { #define SCAN_PD_CHANNEL 0x1 #define SCAN_VD_CHANNEL 0x2 +#define MEGASAS_KDUMP_QUEUE_DEPTH 100 + enum MR_SCSI_CMD_TYPE { READ_WRITE_LDIO = 0, NON_READ_WRITE_LDIO = 1, diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index e6ebc7ae2df1..f4b0690450d2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2670,17 +2670,6 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) } /** - * megasas_reset_device - Device reset handler entry point - */ -static int megasas_reset_device(struct scsi_cmnd *scmd) -{ - /* - * First wait for all commands to complete - */ - return megasas_generic_reset(scmd); -} - -/** * megasas_reset_bus_host - Bus & host reset handler entry point */ static int megasas_reset_bus_host(struct scsi_cmnd *scmd) @@ -2702,6 +2691,50 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd) } /** + * megasas_task_abort - Issues task abort request to firmware + * (supported only for fusion adapters) + * @scmd: SCSI command pointer + */ +static int megasas_task_abort(struct scsi_cmnd *scmd) +{ + int ret; + struct megasas_instance *instance; + + instance = (struct megasas_instance *)scmd->device->host->hostdata; + + if (instance->ctrl_context) + ret = megasas_task_abort_fusion(scmd); + else { + sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); + ret = FAILED; + } + + return ret; +} + +/** + * megasas_reset_target: Issues target reset request to firmware + * (supported only for fusion adapters) + * @scmd: SCSI command pointer + */ +static int megasas_reset_target(struct scsi_cmnd *scmd) +{ + int ret; + struct megasas_instance *instance; + + instance = (struct megasas_instance *)scmd->device->host->hostdata; + + if (instance->ctrl_context) + ret = megasas_reset_target_fusion(scmd); + else { + sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); + ret = FAILED; + } + + return ret; +} + +/** * megasas_bios_param - Returns disk geometry for a disk * @sdev: device handle * @bdev: block device @@ -2969,8 +3002,8 @@ static struct scsi_host_template megasas_template = { .slave_alloc = megasas_slave_alloc, .slave_destroy = megasas_slave_destroy, .queuecommand = megasas_queue_command, - .eh_device_reset_handler = megasas_reset_device, - .eh_bus_reset_handler = megasas_reset_bus_host, + .eh_target_reset_handler = megasas_reset_target, + .eh_abort_handler = megasas_task_abort, .eh_host_reset_handler = megasas_reset_bus_host, .eh_timed_out = megasas_reset_timer, .shost_attrs = megaraid_host_attrs, @@ -5152,7 +5185,7 @@ static int megasas_init_fw(struct megasas_instance *instance) instance->instancet->enable_intr(instance); - dev_err(&instance->pdev->dev, "INIT adapter done\n"); + dev_info(&instance->pdev->dev, "INIT adapter done\n"); megasas_setup_jbod_map(instance); @@ -5598,14 +5631,6 @@ static int megasas_io_attach(struct megasas_instance *instance) host->max_lun = MEGASAS_MAX_LUN; host->max_cmd_len = 16; - /* Fusion only supports host reset */ - if (instance->ctrl_context) { - host->hostt->eh_device_reset_handler = NULL; - host->hostt->eh_bus_reset_handler = NULL; - host->hostt->eh_target_reset_handler = megasas_reset_target_fusion; - host->hostt->eh_abort_handler = megasas_task_abort_fusion; - } - /* * Notify the mid-layer about the new controller */ @@ -5761,13 +5786,6 @@ static int megasas_probe_one(struct pci_dev *pdev, break; } - instance->system_info_buf = pci_zalloc_consistent(pdev, - sizeof(struct MR_DRV_SYSTEM_INFO), - &instance->system_info_h); - - if (!instance->system_info_buf) - dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n"); - /* Crash dump feature related initialisation*/ instance->drv_buf_index = 0; instance->drv_buf_alloc = 0; @@ -5777,14 +5795,6 @@ static int megasas_probe_one(struct pci_dev *pdev, spin_lock_init(&instance->crashdump_lock); instance->crash_dump_buf = NULL; - if (!reset_devices) - instance->crash_dump_buf = pci_alloc_consistent(pdev, - CRASH_DMA_BUF_SIZE, - &instance->crash_dump_h); - if (!instance->crash_dump_buf) - dev_err(&pdev->dev, "Can't allocate Firmware " - "crash dump DMA buffer\n"); - megasas_poll_wait_aen = 0; instance->flag_ieee = 0; instance->ev = NULL; @@ -5803,11 +5813,26 @@ static int megasas_probe_one(struct pci_dev *pdev, goto fail_alloc_dma_buf; } - instance->pd_info = pci_alloc_consistent(pdev, - sizeof(struct MR_PD_INFO), &instance->pd_info_h); + if (!reset_devices) { + instance->system_info_buf = pci_zalloc_consistent(pdev, + sizeof(struct MR_DRV_SYSTEM_INFO), + &instance->system_info_h); + if (!instance->system_info_buf) + dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n"); + + instance->pd_info = pci_alloc_consistent(pdev, + sizeof(struct MR_PD_INFO), &instance->pd_info_h); - if (!instance->pd_info) - dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n"); + if (!instance->pd_info) + dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n"); + + instance->crash_dump_buf = pci_alloc_consistent(pdev, + CRASH_DMA_BUF_SIZE, + &instance->crash_dump_h); + if (!instance->crash_dump_buf) + dev_err(&pdev->dev, "Can't allocate Firmware " + "crash dump DMA buffer\n"); + } /* * Initialize locks and queues @@ -7174,6 +7199,16 @@ static int __init megasas_init(void) int rval; /* + * Booted in kdump kernel, minimize memory footprints by + * disabling few features + */ + if (reset_devices) { + msix_vectors = 1; + rdpq_enable = 0; + dual_qdepth_disable = 1; + } + + /* * Announce driver version and other information */ pr_info("megasas: %s\n", MEGASAS_VERSION); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 98a848bdfdc2..ec837544f784 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -257,6 +257,9 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c if (!instance->is_rdpq) instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024); + if (reset_devices) + instance->max_fw_cmds = min(instance->max_fw_cmds, + (u16)MEGASAS_KDUMP_QUEUE_DEPTH); /* * Reduce the max supported cmds by 1. This is to ensure that the * reply_q_sz (1 more than the max cmd that driver may send) @@ -851,7 +854,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) ret = 1; goto fail_fw_init; } - dev_err(&instance->pdev->dev, "Init cmd success\n"); + dev_info(&instance->pdev->dev, "Init cmd success\n"); ret = 0; @@ -2759,6 +2762,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, dev_warn(&instance->pdev->dev, "Found FW in FAULT state," " will reset adapter scsi%d.\n", instance->host->host_no); + megasas_complete_cmd_dpc_fusion((unsigned long)instance); retval = 1; goto out; } @@ -2766,6 +2770,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, if (reason == MFI_IO_TIMEOUT_OCR) { dev_info(&instance->pdev->dev, "MFI IO is timed out, initiating OCR\n"); + megasas_complete_cmd_dpc_fusion((unsigned long)instance); retval = 1; goto out; } diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h index dfad5b8c1890..a9a659fc2812 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2.h @@ -8,7 +8,7 @@ * scatter/gather formats. * Creation Date: June 21, 2006 * - * mpi2.h Version: 02.00.39 + * mpi2.h Version: 02.00.42 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -100,6 +100,9 @@ * Added MPI2_DIAG_SBR_RELOAD. * 03-19-15 02.00.38 Bumped MPI2_HEADER_VERSION_UNIT. * 05-25-15 02.00.39 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT + * 01-01-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT * -------------------------------------------------------------------------- */ @@ -139,7 +142,7 @@ #define MPI2_VERSION_02_06 (0x0206) /*Unit and Dev versioning for this MPI header set */ -#define MPI2_HEADER_VERSION_UNIT (0x27) +#define MPI2_HEADER_VERSION_UNIT (0x2A) #define MPI2_HEADER_VERSION_DEV (0x00) #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h index 9cf09bf7c4a8..95356a82ee99 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h @@ -6,7 +6,7 @@ * Title: MPI Configuration messages and pages * Creation Date: November 10, 2006 * - * mpi2_cnfg.h Version: 02.00.33 + * mpi2_cnfg.h Version: 02.00.35 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -183,9 +183,12 @@ * Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG. * Added AdapterOrderAux fields to BIOS Page 3. * 03-16-15 02.00.31 Updated for MPI v2.6. + * Added Flags field to IO Unit Page 7. * Added new SAS Phy Event codes * 05-25-15 02.00.33 Added more defines for the BiosOptions field of * MPI2_CONFIG_PAGE_BIOS_1. + * 08-25-15 02.00.34 Bumped Header Version. + * 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4. * -------------------------------------------------------------------------- */ @@ -958,13 +961,16 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { U8 Reserved3; /*0x17 */ U32 BoardPowerRequirement; /*0x18 */ U32 PCISlotPowerAllocation; /*0x1C */ - U32 Reserved6; /* 0x20 */ - U32 Reserved7; /* 0x24 */ +/* reserved prior to MPI v2.6 */ + U8 Flags; /* 0x20 */ + U8 Reserved6; /* 0x21 */ + U16 Reserved7; /* 0x22 */ + U32 Reserved8; /* 0x24 */ } MPI2_CONFIG_PAGE_IO_UNIT_7, *PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t; -#define MPI2_IOUNITPAGE7_PAGEVERSION (0x04) +#define MPI2_IOUNITPAGE7_PAGEVERSION (0x05) /*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */ #define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0) @@ -1045,6 +1051,8 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { #define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01) #define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02) +/* defines for IO Unit Page 7 Flags field */ +#define MPI2_IOUNITPAGE7_FLAG_CABLE_POWER_EXC (0x01) /*IO Unit Page 8 */ @@ -2271,7 +2279,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4 { U8 BootDeviceWaitTime; /*0x24 */ U8 - Reserved4; /*0x25 */ + SATADeviceWaitTime; /*0x25 */ U16 Reserved5; /*0x26 */ U8 diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h index c38f624b859d..bba56b61d36c 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h @@ -6,7 +6,7 @@ * Title: MPI SCSI initiator mode messages and structures * Creation Date: June 23, 2006 * - * mpi2_init.h Version: 02.00.17 + * mpi2_init.h Version: 02.00.20 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -51,6 +51,9 @@ * Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH. * Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and * MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF. + * 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset. + * 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message. + * 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message. * -------------------------------------------------------------------------- */ @@ -359,8 +362,14 @@ typedef struct _MPI2_SCSI_IO_REPLY { U16 TaskTag; /*0x20 */ U16 SCSIStatusQualifier; /* 0x22 */ U32 BidirectionalTransferCount; /*0x24 */ - U32 EEDPErrorOffset; /*0x28 *//*MPI 2.5 only; Reserved in MPI 2.0*/ - U32 Reserved6; /*0x2C */ + /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U32 EEDPErrorOffset; /* 0x28 */ + /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U16 EEDPObservedAppTag; /* 0x2C */ + /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U16 EEDPObservedGuard; /* 0x2E */ + /* MPI 2.5+ only; Reserved in MPI 2.0 */ + U32 EEDPObservedRefTag; /* 0x30 */ } MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY, Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t; diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h index cf510ed91924..8bae305bc156 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h @@ -6,7 +6,7 @@ * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages * Creation Date: October 11, 2006 * - * mpi2_ioc.h Version: 02.00.26 + * mpi2_ioc.h Version: 02.00.27 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -134,9 +134,13 @@ * Added Encrypted Hash Extended Image. * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. * 11-18-14 02.00.25 Updated copyright information. - * 03-16-15 02.00.26 Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and + * 03-16-15 02.00.26 Updated for MPI v2.6. + * Added MPI2_EVENT_ACTIVE_CABLE_EXCEPTION and + * MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT. + * Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and * MPI26_FW_HEADER_PID_FAMILY_3516_SAS. * Added MPI26_CTRL_OP_SHUTDOWN. + * 08-25-15 02.00.27 Added IC ARCH Class based signature defines * -------------------------------------------------------------------------- */ @@ -168,7 +172,7 @@ typedef struct _MPI2_IOC_INIT_REQUEST { U16 MsgVersion; /*0x0C */ U16 HeaderVersion; /*0x0E */ U32 Reserved5; /*0x10 */ - U16 Reserved6; /*0x14 */ + U16 ConfigurationFlags; /* 0x14 */ U8 HostPageSize; /*0x16 */ U8 HostMSIxVectors; /*0x17 */ U16 Reserved8; /*0x18 */ @@ -516,6 +520,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY { #define MPI2_EVENT_TEMP_THRESHOLD (0x0027) #define MPI2_EVENT_HOST_MESSAGE (0x0028) #define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029) +#define MPI2_EVENT_ACTIVE_CABLE_EXCEPTION (0x0034) #define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E) #define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F) @@ -580,7 +585,7 @@ typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE { } MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE, Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t; -/*Power Performance Change Event */ +/*Power Performance Change Event data */ typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE { U8 CurrentPowerMode; /*0x00 */ @@ -605,6 +610,21 @@ typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE { #define MPI2_EVENT_PM_MODE_REDUCED_POWER (0x05) #define MPI2_EVENT_PM_MODE_STANDBY (0x06) +/* Active Cable Exception Event data */ + +typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT { + U32 ActiveCablePowerRequirement; /* 0x00 */ + U8 ReasonCode; /* 0x04 */ + U8 ReceptacleID; /* 0x05 */ + U16 Reserved1; /* 0x06 */ +} MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, + *PTR_MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, + Mpi26EventDataActiveCableExcept_t, + *pMpi26EventDataActiveCableExcept_t; + +/* defines for ReasonCode field */ +#define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00) + /*Hard Reset Received Event data */ typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED { @@ -1366,7 +1386,16 @@ typedef struct _MPI2_FW_IMAGE_HEADER { /*Signature0 field */ #define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04) #define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A) -#define MPI26_FW_HEADER_SIGNATURE0 (0x5AEAA55A) +/* Last byte is defined by architecture */ +#define MPI26_FW_HEADER_SIGNATURE0_BASE (0x5AEAA500) +#define MPI26_FW_HEADER_SIGNATURE0_ARC_0 (0x5A) +#define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00) +#define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01) +/* legacy (0x5AEAA55A) */ +#define MPI26_FW_HEADER_SIGNATURE0 \ + (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0) +#define MPI26_FW_HEADER_SIGNATURE0_3516 \ + (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1) /*Signature1 field */ #define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08) @@ -1778,6 +1807,7 @@ typedef struct _MPI26_IOUNIT_CONTROL_REQUEST { #define MPI26_CTRL_OP_SAS_PHY_LINK_RESET (0x06) #define MPI26_CTRL_OP_SAS_PHY_HARD_RESET (0x07) #define MPI26_CTRL_OP_PHY_CLEAR_ERROR_LOG (0x08) +#define MPI26_CTRL_OP_LINK_CLEAR_ERROR_LOG (0x09) #define MPI26_CTRL_OP_SAS_SEND_PRIMITIVE (0x0A) #define MPI26_CTRL_OP_FORCE_FULL_DISCOVERY (0x0B) #define MPI26_CTRL_OP_REMOVE_DEVICE (0x0D) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 8c44b9c424af..751f13edece0 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -57,6 +57,7 @@ #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/time.h> +#include <linux/ktime.h> #include <linux/kthread.h> #include <linux/aer.h> @@ -654,6 +655,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, case MPI2_EVENT_TEMP_THRESHOLD: desc = "Temperature Threshold"; break; + case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: + desc = "Active cable exception"; + break; } if (!desc) @@ -1100,18 +1104,16 @@ _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) } /** - * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues + * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts * @ioc: per adapter object - * Context: ISR conext + * Context: non ISR conext * - * Called when a Task Management request has completed. We want - * to flush the other reply queues so all the outstanding IO has been - * completed back to OS before we process the TM completetion. + * Called when a Task Management request has completed. * * Return nothing. */ void -mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc) +mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) { struct adapter_reply_queue *reply_q; @@ -1122,12 +1124,13 @@ mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc) return; list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { - if (ioc->shost_recovery) + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery) return; /* TMs are on msix_index == 0 */ if (reply_q->msix_index == 0) continue; - _base_interrupt(reply_q->vector, (void *)reply_q); + synchronize_irq(reply_q->vector); } } @@ -3207,10 +3210,10 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) sg_tablesize = MPT_MIN_PHYS_SEGMENTS; else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) { sg_tablesize = min_t(unsigned short, sg_tablesize, - SCSI_MAX_SG_CHAIN_SEGMENTS); + SG_MAX_SEGMENTS); pr_warn(MPT3SAS_FMT "sg_tablesize(%u) is bigger than kernel" - " defined SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name, + " defined SG_CHUNK_SIZE(%u)\n", ioc->name, sg_tablesize, MPT_MAX_PHYS_SEGMENTS); } ioc->shost->sg_tablesize = sg_tablesize; @@ -4387,7 +4390,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) Mpi2IOCInitRequest_t mpi_request; Mpi2IOCInitReply_t mpi_reply; int i, r = 0; - struct timeval current_time; + ktime_t current_time; u16 ioc_status; u32 reply_post_free_array_sz = 0; Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL; @@ -4449,9 +4452,8 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) /* This time stamp specifies number of milliseconds * since epoch ~ midnight January 1, 1970. */ - do_gettimeofday(¤t_time); - mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 + - (current_time.tv_usec / 1000)); + current_time = ktime_get_real(); + mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time)); if (ioc->logging_level & MPT_DEBUG_INIT) { __le32 *mfp; @@ -5424,6 +5426,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); + if (ioc->hba_mpi_version_belonged == MPI26_VERSION) + _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION); r = _base_make_ioc_operational(ioc, CAN_SLEEP); if (r) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 32580b514b18..892c9be008b5 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -73,8 +73,8 @@ #define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" -#define MPT3SAS_DRIVER_VERSION "12.100.00.00" -#define MPT3SAS_MAJOR_VERSION 12 +#define MPT3SAS_DRIVER_VERSION "13.100.00.00" +#define MPT3SAS_MAJOR_VERSION 13 #define MPT3SAS_MINOR_VERSION 100 #define MPT3SAS_BUILD_VERSION 0 #define MPT3SAS_RELEASE_VERSION 00 @@ -90,7 +90,7 @@ /* * Set MPT3SAS_SG_DEPTH value based on user input. */ -#define MPT_MAX_PHYS_SEGMENTS SCSI_MAX_SG_SEGMENTS +#define MPT_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE #define MPT_MIN_PHYS_SEGMENTS 16 #ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE @@ -112,6 +112,8 @@ #define MPT3SAS_SAS_QUEUE_DEPTH 254 #define MPT3SAS_RAID_QUEUE_DEPTH 128 +#define MPT3SAS_RAID_MAX_SECTORS 8192 + #define MPT_NAME_LENGTH 32 /* generic length of strings */ #define MPT_STRING_LENGTH 64 @@ -1234,7 +1236,8 @@ void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid); void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid); __le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid); -void mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc); + +void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc); /* hi-priority queue */ u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index e0e4920d0fa6..6a4df5a315e9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -174,13 +174,13 @@ struct sense_info { * struct fw_event_work - firmware event struct * @list: link list framework * @work: work object (ioc->fault_reset_work_q) - * @cancel_pending_work: flag set during reset handling * @ioc: per adapter object * @device_handle: device handle * @VF_ID: virtual function id * @VP_ID: virtual port id * @ignore: flag meaning this event has been marked to ignore - * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h + * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h + * @refcount: kref for this event * @event_data: reply event data payload follows * * This object stored on ioc->fw_event_list. @@ -188,8 +188,6 @@ struct sense_info { struct fw_event_work { struct list_head list; struct work_struct work; - u8 cancel_pending_work; - struct delayed_work delayed_work; struct MPT3SAS_ADAPTER *ioc; u16 device_handle; @@ -1911,6 +1909,14 @@ scsih_slave_configure(struct scsi_device *sdev) (unsigned long long)raid_device->wwid, raid_device->num_pds, ds); + if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) { + blk_queue_max_hw_sectors(sdev->request_queue, + MPT3SAS_RAID_MAX_SECTORS); + sdev_printk(KERN_INFO, sdev, + "Set queue's max_sector to: %u\n", + MPT3SAS_RAID_MAX_SECTORS); + } + scsih_change_queue_depth(sdev, qdepth); /* raid transport support */ @@ -2118,7 +2124,6 @@ _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) return 1; if (ioc->tm_cmds.smid != smid) return 1; - mpt3sas_base_flush_reply_queues(ioc); ioc->tm_cmds.status |= MPT3_CMD_COMPLETE; mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); if (mpi_reply) { @@ -2303,6 +2308,9 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, } } + /* sync IRQs in case those were busy during flush. */ + mpt3sas_base_sync_reply_irqs(ioc); + if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); mpi_reply = ioc->tm_cmds.reply; @@ -2804,12 +2812,12 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) /* * Wait on the fw_event to complete. If this returns 1, then * the event was never executed, and we need a put for the - * reference the delayed_work had on the fw_event. + * reference the work had on the fw_event. * * If it did execute, we wait for it to finish, and the put will * happen from _firmware_event_work() */ - if (cancel_delayed_work_sync(&fw_event->delayed_work)) + if (cancel_work_sync(&fw_event->work)) fw_event_work_put(fw_event); fw_event_work_put(fw_event); @@ -3961,7 +3969,7 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; mpi_request->CDB.EEDP32.PrimaryReferenceTag = - cpu_to_be32(scsi_get_lba(scmd)); + cpu_to_be32(scsi_prot_ref_tag(scmd)); break; case SCSI_PROT_DIF_TYPE3: @@ -7850,6 +7858,7 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, Mpi2EventNotificationReply_t *mpi_reply; u16 event; u16 sz; + Mpi26EventDataActiveCableExcept_t *ActiveCableEventData; /* events turned off due to host reset or driver unloading */ if (ioc->remove_host || ioc->pci_error_recovery) @@ -7962,6 +7971,18 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, (Mpi2EventDataTemperature_t *) mpi_reply->EventData); break; + case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: + ActiveCableEventData = + (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; + if (ActiveCableEventData->ReasonCode == + MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) + pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d", + ioc->name, ActiveCableEventData->ReceptacleID); + pr_info("cannot be powered and devices connected to this active cable"); + pr_info("will not be seen. This active cable"); + pr_info("requires %d mW of power", + ActiveCableEventData->ActiveCablePowerRequirement); + break; default: /* ignore the rest */ return 1; diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index c7c250519c4b..8280046fd1f0 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -704,24 +704,7 @@ static struct pci_device_id mvs_pci_table[] = { .class_mask = 0, .driver_data = chip_9445, }, - { - .vendor = PCI_VENDOR_ID_MARVELL_EXT, - .device = 0x9485, - .subvendor = PCI_ANY_ID, - .subdevice = 0x9480, - .class = 0, - .class_mask = 0, - .driver_data = chip_9485, - }, - { - .vendor = PCI_VENDOR_ID_MARVELL_EXT, - .device = 0x9485, - .subvendor = PCI_ANY_ID, - .subdevice = 0x9485, - .class = 0, - .class_mask = 0, - .driver_data = chip_9485, - }, + { PCI_VDEVICE(MARVELL_EXT, 0x9485), chip_9485 }, /* Marvell 9480/9485 (any vendor/model) */ { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */ { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c index 512037e27783..2f689ae7a803 100644 --- a/drivers/scsi/pas16.c +++ b/drivers/scsi/pas16.c @@ -1,5 +1,3 @@ -#define PSEUDO_DMA - /* * This driver adapted from Drew Eckhardt's Trantor T128 driver * @@ -77,7 +75,6 @@ #include <scsi/scsi_host.h> #include "pas16.h" -#define AUTOPROBE_IRQ #include "NCR5380.h" @@ -377,7 +374,7 @@ static int __init pas16_detect(struct scsi_host_template *tpnt) instance->io_port = io_port; - if (NCR5380_init(instance, 0)) + if (NCR5380_init(instance, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP)) goto out_unregister; NCR5380_maybe_reset_bus(instance); @@ -460,7 +457,7 @@ static int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev, } /* - * Function : int NCR5380_pread (struct Scsi_Host *instance, + * Function : int pas16_pread (struct Scsi_Host *instance, * unsigned char *dst, int len) * * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to @@ -472,14 +469,14 @@ static int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev, * timeout. */ -static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, - int len) { +static inline int pas16_pread(struct Scsi_Host *instance, + unsigned char *dst, int len) +{ register unsigned char *d = dst; register unsigned short reg = (unsigned short) (instance->io_port + P_DATA_REG_OFFSET); register int i = len; int ii = 0; - struct NCR5380_hostdata *hostdata = shost_priv(instance); while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) ) ++ii; @@ -492,13 +489,11 @@ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, instance->host_no); return -1; } - if (ii > hostdata->spin_max_r) - hostdata->spin_max_r = ii; return 0; } /* - * Function : int NCR5380_pwrite (struct Scsi_Host *instance, + * Function : int pas16_pwrite (struct Scsi_Host *instance, * unsigned char *src, int len) * * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from @@ -510,13 +505,13 @@ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, * timeout. */ -static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src, - int len) { +static inline int pas16_pwrite(struct Scsi_Host *instance, + unsigned char *src, int len) +{ register unsigned char *s = src; register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET); register int i = len; int ii = 0; - struct NCR5380_hostdata *hostdata = shost_priv(instance); while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) ) ++ii; @@ -529,8 +524,6 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src instance->host_no); return -1; } - if (ii > hostdata->spin_max_w) - hostdata->spin_max_w = ii; return 0; } @@ -550,8 +543,6 @@ static struct scsi_host_template driver_template = { .detect = pas16_detect, .release = pas16_release, .proc_name = "pas16", - .show_info = pas16_show_info, - .write_info = pas16_write_info, .info = pas16_info, .queuecommand = pas16_queue_command, .eh_abort_handler = pas16_abort, diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h index d37527717225..9fe7f33660b4 100644 --- a/drivers/scsi/pas16.h +++ b/drivers/scsi/pas16.h @@ -103,14 +103,15 @@ #define NCR5380_write(reg, value) ( outb((value),PAS16_io_port(reg)) ) #define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) +#define NCR5380_dma_recv_setup pas16_pread +#define NCR5380_dma_send_setup pas16_pwrite +#define NCR5380_dma_residual(instance) (0) #define NCR5380_intr pas16_intr #define NCR5380_queue_command pas16_queue_command #define NCR5380_abort pas16_abort #define NCR5380_bus_reset pas16_bus_reset #define NCR5380_info pas16_info -#define NCR5380_show_info pas16_show_info -#define NCR5380_write_info pas16_write_info /* 15 14 12 10 7 5 3 1101 0100 1010 1000 */ diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 062ab34b86f8..6bd7bf4f4a81 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -418,8 +418,6 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { pm8001_ha->io_mem[logicalBar].membase = pci_resource_start(pdev, bar); - pm8001_ha->io_mem[logicalBar].membase &= - (u32)PCI_BASE_ADDRESS_MEM_MASK; pm8001_ha->io_mem[logicalBar].memsize = pci_resource_len(pdev, bar); pm8001_ha->io_mem[logicalBar].memvirtaddr = diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index b5029e543b91..15dff7099955 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -6,6 +6,7 @@ */ #include "qla_def.h" #include <linux/delay.h> +#include <linux/ktime.h> #include <linux/pci.h> #include <linux/ratelimit.h> #include <linux/vmalloc.h> @@ -1812,7 +1813,6 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) struct host_system_info *phost_info; struct register_host_info *preg_hsi; struct new_utsname *p_sysid = NULL; - struct timeval tv; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) @@ -1886,8 +1886,7 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) p_sysid->domainname, DOMNAME_LENGTH); strncpy(phost_info->hostdriver, QLA2XXX_VERSION, VERSION_LENGTH); - do_gettimeofday(&tv); - preg_hsi->utc = (uint64_t)tv.tv_sec; + preg_hsi->utc = (uint64_t)ktime_get_real_seconds(); ql_dbg(ql_dbg_init, vha, 0x0149, "ISP%04X: Host registration with firmware\n", ha->pdev->device); diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index b6b4cfdd7620..54380b434b30 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -1229,7 +1229,7 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) if (buf == NULL) { ql_log(ql_log_fatal, vha, 0x010c, "Unable to allocate memory.\n"); - return -1; + return -ENOMEM; } for (i = 0; i < n; i++) { diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index f3d69a98c725..0f9ba41e27d8 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -6,23 +6,15 @@ * anything out of the ordinary is seen. * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * - * This version is more generic, simulating a variable number of disk - * (or disk like devices) sharing a common amount of RAM. To be more - * realistic, the simulated devices have the transport attributes of - * SAS disks. + * Copyright (C) 2001 - 2016 Douglas Gilbert * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. * * For documentation see http://sg.danny.cz/sg/sdebug26.html * - * D. Gilbert (dpg) work for Magneto-Optical device test [20010421] - * dpg: work for devfs large number of disks [20010809] - * forked for lk 2.5 series [20011216, 20020101] - * use vmalloc() more inquiry+mode_sense [20020302] - * add timers for delayed responses [20020721] - * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031] - * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118] - * dpg: change style of boot options to "scsi_debug.num_tgts=2" and - * module options to "modprobe scsi_debug num_tgts=2" [20021221] */ @@ -32,7 +24,7 @@ #include <linux/kernel.h> #include <linux/errno.h> -#include <linux/timer.h> +#include <linux/jiffies.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/string.h> @@ -49,6 +41,7 @@ #include <linux/interrupt.h> #include <linux/atomic.h> #include <linux/hrtimer.h> +#include <linux/uuid.h> #include <net/checksum.h> @@ -66,8 +59,9 @@ #include "sd.h" #include "scsi_logging.h" -#define SCSI_DEBUG_VERSION "1.85" -static const char *scsi_debug_version_date = "20141022"; +/* make sure inq_product_rev string corresponds to this version */ +#define SDEBUG_VERSION "1.86" +static const char *sdebug_version_date = "20160430"; #define MY_NAME "scsi_debug" @@ -102,7 +96,6 @@ static const char *scsi_debug_version_date = "20141022"; /* Additional Sense Code Qualifier (ASCQ) */ #define ACK_NAK_TO 0x3 - /* Default values for driver parameters */ #define DEF_NUM_HOST 1 #define DEF_NUM_TGTS 1 @@ -111,7 +104,7 @@ static const char *scsi_debug_version_date = "20141022"; * (id 0) containing 1 logical unit (lun 0). That is 1 device. */ #define DEF_ATO 1 -#define DEF_DELAY 1 /* if > 0 unit is a jiffy */ +#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */ #define DEF_DEV_SIZE_MB 8 #define DEF_DIF 0 #define DEF_DIX 0 @@ -131,9 +124,9 @@ static const char *scsi_debug_version_date = "20141022"; #define DEF_OPTS 0 #define DEF_OPT_BLKS 1024 #define DEF_PHYSBLK_EXP 0 -#define DEF_PTYPE 0 +#define DEF_PTYPE TYPE_DISK #define DEF_REMOVABLE false -#define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */ +#define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */ #define DEF_SECTOR_SIZE 512 #define DEF_UNMAP_ALIGNMENT 0 #define DEF_UNMAP_GRANULARITY 1 @@ -143,43 +136,54 @@ static const char *scsi_debug_version_date = "20141022"; #define DEF_VPD_USE_HOSTNO 1 #define DEF_WRITESAME_LENGTH 0xFFFF #define DEF_STRICT 0 -#define DELAY_OVERRIDDEN -9999 - -/* bit mask values for scsi_debug_opts */ -#define SCSI_DEBUG_OPT_NOISE 1 -#define SCSI_DEBUG_OPT_MEDIUM_ERR 2 -#define SCSI_DEBUG_OPT_TIMEOUT 4 -#define SCSI_DEBUG_OPT_RECOVERED_ERR 8 -#define SCSI_DEBUG_OPT_TRANSPORT_ERR 16 -#define SCSI_DEBUG_OPT_DIF_ERR 32 -#define SCSI_DEBUG_OPT_DIX_ERR 64 -#define SCSI_DEBUG_OPT_MAC_TIMEOUT 128 -#define SCSI_DEBUG_OPT_SHORT_TRANSFER 0x100 -#define SCSI_DEBUG_OPT_Q_NOISE 0x200 -#define SCSI_DEBUG_OPT_ALL_TSF 0x400 -#define SCSI_DEBUG_OPT_RARE_TSF 0x800 -#define SCSI_DEBUG_OPT_N_WCE 0x1000 -#define SCSI_DEBUG_OPT_RESET_NOISE 0x2000 -#define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000 -#define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000) +#define DEF_STATISTICS false +#define DEF_SUBMIT_QUEUES 1 +#define DEF_UUID_CTL 0 +#define JDELAY_OVERRIDDEN -9999 + +#define SDEBUG_LUN_0_VAL 0 + +/* bit mask values for sdebug_opts */ +#define SDEBUG_OPT_NOISE 1 +#define SDEBUG_OPT_MEDIUM_ERR 2 +#define SDEBUG_OPT_TIMEOUT 4 +#define SDEBUG_OPT_RECOVERED_ERR 8 +#define SDEBUG_OPT_TRANSPORT_ERR 16 +#define SDEBUG_OPT_DIF_ERR 32 +#define SDEBUG_OPT_DIX_ERR 64 +#define SDEBUG_OPT_MAC_TIMEOUT 128 +#define SDEBUG_OPT_SHORT_TRANSFER 0x100 +#define SDEBUG_OPT_Q_NOISE 0x200 +#define SDEBUG_OPT_ALL_TSF 0x400 +#define SDEBUG_OPT_RARE_TSF 0x800 +#define SDEBUG_OPT_N_WCE 0x1000 +#define SDEBUG_OPT_RESET_NOISE 0x2000 +#define SDEBUG_OPT_NO_CDB_NOISE 0x4000 +#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \ + SDEBUG_OPT_RESET_NOISE) +#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \ + SDEBUG_OPT_TRANSPORT_ERR | \ + SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \ + SDEBUG_OPT_SHORT_TRANSFER) /* When "every_nth" > 0 then modulo "every_nth" commands: - * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set + * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set * - a RECOVERED_ERROR is simulated on successful read and write - * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set. + * commands if SDEBUG_OPT_RECOVERED_ERR is set. * - a TRANSPORT_ERROR is simulated on successful read and write - * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set. + * commands if SDEBUG_OPT_TRANSPORT_ERR is set. * * When "every_nth" < 0 then after "- every_nth" commands: - * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set + * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set * - a RECOVERED_ERROR is simulated on successful read and write - * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set. + * commands if SDEBUG_OPT_RECOVERED_ERR is set. * - a TRANSPORT_ERROR is simulated on successful read and write - * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set. - * This will continue until some other action occurs (e.g. the user - * writing a new value (other than -1 or 1) to every_nth via sysfs). + * commands if _DEBUG_OPT_TRANSPORT_ERR is set. + * This will continue on every subsequent command until some other action + * occurs (e.g. the user * writing a new value (other than -1 or 1) to + * every_nth via sysfs). */ -/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in +/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in * priority order. In the subset implemented here lower numbers have higher * priority. The UA numbers should be a sequence starting from 0 with * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */ @@ -192,11 +196,7 @@ static const char *scsi_debug_version_date = "20141022"; #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6 #define SDEBUG_NUM_UAS 7 -/* for check_readiness() */ -#define UAS_ONLY 1 /* check for UAs only */ -#define UAS_TUR 0 /* if no UAs then check if media access possible */ - -/* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this +/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this * sector on read commands: */ #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */ #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */ @@ -205,21 +205,108 @@ static const char *scsi_debug_version_date = "20141022"; * or "peripheral device" addressing (value 0) */ #define SAM2_LUN_ADDRESS_METHOD 0 -/* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued - * (for response) at one time. Can be reduced by max_queue option. Command - * responses are not queued when delay=0 and ndelay=0. The per-device - * DEF_CMD_PER_LUN can be changed via sysfs: - * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed - * SCSI_DEBUG_CANQUEUE. */ -#define SCSI_DEBUG_CANQUEUE_WORDS 9 /* a WORD is bits in a long */ -#define SCSI_DEBUG_CANQUEUE (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG) +/* SDEBUG_CANQUEUE is the maximum number of commands that can be queued + * (for response) per submit queue at one time. Can be reduced by max_queue + * option. Command responses are not queued when jdelay=0 and ndelay=0. The + * per-device DEF_CMD_PER_LUN can be changed via sysfs: + * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth + * but cannot exceed SDEBUG_CANQUEUE . + */ +#define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */ +#define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG) #define DEF_CMD_PER_LUN 255 -#if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE -#warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE" -#endif +#define F_D_IN 1 +#define F_D_OUT 2 +#define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */ +#define F_D_UNKN 8 +#define F_RL_WLUN_OK 0x10 +#define F_SKIP_UA 0x20 +#define F_DELAY_OVERR 0x40 +#define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */ +#define F_SA_HIGH 0x100 /* as used by variable length cdbs */ +#define F_INV_OP 0x200 +#define F_FAKE_RW 0x400 +#define F_M_ACCESS 0x800 /* media access */ + +#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR) +#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW) +#define FF_SA (F_SA_HIGH | F_SA_LOW) + +#define SDEBUG_MAX_PARTS 4 + +#define SDEBUG_MAX_CMD_LEN 32 + + +struct sdebug_dev_info { + struct list_head dev_list; + unsigned int channel; + unsigned int target; + u64 lun; + uuid_be lu_name; + struct sdebug_host_info *sdbg_host; + unsigned long uas_bm[1]; + atomic_t num_in_q; + atomic_t stopped; + bool used; +}; + +struct sdebug_host_info { + struct list_head host_list; + struct Scsi_Host *shost; + struct device dev; + struct list_head dev_info_list; +}; + +#define to_sdebug_host(d) \ + container_of(d, struct sdebug_host_info, dev) + +struct sdebug_defer { + struct hrtimer hrt; + struct execute_work ew; + int sqa_idx; /* index of sdebug_queue array */ + int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */ + int issuing_cpu; +}; + +struct sdebug_queued_cmd { + /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue + * instance indicates this slot is in use. + */ + struct sdebug_defer *sd_dp; + struct scsi_cmnd *a_cmnd; + unsigned int inj_recovered:1; + unsigned int inj_transport:1; + unsigned int inj_dif:1; + unsigned int inj_dix:1; + unsigned int inj_short:1; +}; + +struct sdebug_queue { + struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE]; + unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS]; + spinlock_t qc_lock; + atomic_t blocked; /* to temporarily stop more being queued */ +}; + +static atomic_t sdebug_cmnd_count; /* number of incoming commands */ +static atomic_t sdebug_completions; /* count of deferred completions */ +static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */ +static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */ + +struct opcode_info_t { + u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */ + /* for terminating element */ + u8 opcode; /* if num_attached > 0, preferred */ + u16 sa; /* service action */ + u32 flags; /* OR-ed set of SDEB_F_* */ + int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); + const struct opcode_info_t *arrp; /* num_attached elements or NULL */ + u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */ + /* ignore cdb bytes after position 15 */ +}; -/* SCSI opcodes (first byte of cdb) mapped onto these indexes */ +/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */ enum sdeb_opcode_index { SDEB_I_INVALID_OPCODE = 0, SDEB_I_INQUIRY = 1, @@ -254,6 +341,7 @@ enum sdeb_opcode_index { SDEB_I_LAST_ELEMENT = 30, /* keep this last */ }; + static const unsigned char opcode_ind_arr[256] = { /* 0x0; 0x0->0x1f: 6 byte cdbs */ SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE, @@ -274,7 +362,7 @@ static const unsigned char opcode_ind_arr[256] = { 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE, 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0, -/* 0x60; 0x60->0x7d are reserved */ +/* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, SDEB_I_VARIABLE_LEN, @@ -297,24 +385,6 @@ static const unsigned char opcode_ind_arr[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; -#define F_D_IN 1 -#define F_D_OUT 2 -#define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */ -#define F_D_UNKN 8 -#define F_RL_WLUN_OK 0x10 -#define F_SKIP_UA 0x20 -#define F_DELAY_OVERR 0x40 -#define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */ -#define F_SA_HIGH 0x100 /* as used by variable length cdbs */ -#define F_INV_OP 0x200 -#define F_FAKE_RW 0x400 -#define F_M_ACCESS 0x800 /* media access */ - -#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR) -#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW) -#define FF_SA (F_SA_HIGH | F_SA_LOW) - -struct sdebug_dev_info; static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *); @@ -337,18 +407,6 @@ static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *); -struct opcode_info_t { - u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff - * for terminating element */ - u8 opcode; /* if num_attached > 0, preferred */ - u16 sa; /* service action */ - u32 flags; /* OR-ed set of SDEB_F_* */ - int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); - const struct opcode_info_t *arrp; /* num_attached elements or NULL */ - u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */ - /* ignore cdb bytes after position 15 */ -}; - static const struct opcode_info_t msense_iarr[1] = { {0, 0x1a, 0, F_D_IN, NULL, NULL, {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, @@ -509,61 +567,52 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = { {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; -struct sdebug_scmd_extra_t { - bool inj_recovered; - bool inj_transport; - bool inj_dif; - bool inj_dix; - bool inj_short; -}; - -static int scsi_debug_add_host = DEF_NUM_HOST; -static int scsi_debug_ato = DEF_ATO; -static int scsi_debug_delay = DEF_DELAY; -static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB; -static int scsi_debug_dif = DEF_DIF; -static int scsi_debug_dix = DEF_DIX; -static int scsi_debug_dsense = DEF_D_SENSE; -static int scsi_debug_every_nth = DEF_EVERY_NTH; -static int scsi_debug_fake_rw = DEF_FAKE_RW; -static unsigned int scsi_debug_guard = DEF_GUARD; -static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; -static int scsi_debug_max_luns = DEF_MAX_LUNS; -static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE; +static int sdebug_add_host = DEF_NUM_HOST; +static int sdebug_ato = DEF_ATO; +static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */ +static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB; +static int sdebug_dif = DEF_DIF; +static int sdebug_dix = DEF_DIX; +static int sdebug_dsense = DEF_D_SENSE; +static int sdebug_every_nth = DEF_EVERY_NTH; +static int sdebug_fake_rw = DEF_FAKE_RW; +static unsigned int sdebug_guard = DEF_GUARD; +static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED; +static int sdebug_max_luns = DEF_MAX_LUNS; +static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */ static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */ -static int scsi_debug_ndelay = DEF_NDELAY; -static int scsi_debug_no_lun_0 = DEF_NO_LUN_0; -static int scsi_debug_no_uld = 0; -static int scsi_debug_num_parts = DEF_NUM_PARTS; -static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */ -static int scsi_debug_opt_blks = DEF_OPT_BLKS; -static int scsi_debug_opts = DEF_OPTS; -static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; -static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */ -static int scsi_debug_scsi_level = DEF_SCSI_LEVEL; -static int scsi_debug_sector_size = DEF_SECTOR_SIZE; -static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB; -static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; -static unsigned int scsi_debug_lbpu = DEF_LBPU; -static unsigned int scsi_debug_lbpws = DEF_LBPWS; -static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10; -static unsigned int scsi_debug_lbprz = DEF_LBPRZ; -static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT; -static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; -static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; -static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; -static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH; -static bool scsi_debug_removable = DEF_REMOVABLE; -static bool scsi_debug_clustering; -static bool scsi_debug_host_lock = DEF_HOST_LOCK; -static bool scsi_debug_strict = DEF_STRICT; +static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */ +static int sdebug_no_lun_0 = DEF_NO_LUN_0; +static int sdebug_no_uld; +static int sdebug_num_parts = DEF_NUM_PARTS; +static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */ +static int sdebug_opt_blks = DEF_OPT_BLKS; +static int sdebug_opts = DEF_OPTS; +static int sdebug_physblk_exp = DEF_PHYSBLK_EXP; +static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */ +static int sdebug_scsi_level = DEF_SCSI_LEVEL; +static int sdebug_sector_size = DEF_SECTOR_SIZE; +static int sdebug_virtual_gb = DEF_VIRTUAL_GB; +static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; +static unsigned int sdebug_lbpu = DEF_LBPU; +static unsigned int sdebug_lbpws = DEF_LBPWS; +static unsigned int sdebug_lbpws10 = DEF_LBPWS10; +static unsigned int sdebug_lbprz = DEF_LBPRZ; +static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT; +static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY; +static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; +static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC; +static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH; +static int sdebug_uuid_ctl = DEF_UUID_CTL; +static bool sdebug_removable = DEF_REMOVABLE; +static bool sdebug_clustering; +static bool sdebug_host_lock = DEF_HOST_LOCK; +static bool sdebug_strict = DEF_STRICT; static bool sdebug_any_injecting_opt; - -static atomic_t sdebug_cmnd_count; -static atomic_t sdebug_completions; -static atomic_t sdebug_a_tsf; /* counter of 'almost' TSFs */ - -#define DEV_READONLY(TGT) (0) +static bool sdebug_verbose; +static bool have_dif_prot; +static bool sdebug_statistics = DEF_STATISTICS; +static bool sdebug_mq_active; static unsigned int sdebug_store_sectors; static sector_t sdebug_capacity; /* in sectors */ @@ -574,59 +623,10 @@ static int sdebug_heads; /* heads per disk */ static int sdebug_cylinders_per; /* cylinders per surface */ static int sdebug_sectors_per; /* sectors per cylinder */ -#define SDEBUG_MAX_PARTS 4 - -#define SCSI_DEBUG_MAX_CMD_LEN 32 - -static unsigned int scsi_debug_lbp(void) -{ - return ((0 == scsi_debug_fake_rw) && - (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10)); -} - -struct sdebug_dev_info { - struct list_head dev_list; - unsigned int channel; - unsigned int target; - u64 lun; - struct sdebug_host_info *sdbg_host; - unsigned long uas_bm[1]; - atomic_t num_in_q; - char stopped; /* TODO: should be atomic */ - bool used; -}; - -struct sdebug_host_info { - struct list_head host_list; - struct Scsi_Host *shost; - struct device dev; - struct list_head dev_info_list; -}; - -#define to_sdebug_host(d) \ - container_of(d, struct sdebug_host_info, dev) - static LIST_HEAD(sdebug_host_list); static DEFINE_SPINLOCK(sdebug_host_list_lock); - -struct sdebug_hrtimer { /* ... is derived from hrtimer */ - struct hrtimer hrt; /* must be first element */ - int qa_indx; -}; - -struct sdebug_queued_cmd { - /* in_use flagged by a bit in queued_in_use_bm[] */ - struct timer_list *cmnd_timerp; - struct tasklet_struct *tletp; - struct sdebug_hrtimer *sd_hrtp; - struct scsi_cmnd * a_cmnd; -}; -static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE]; -static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS]; - - -static unsigned char * fake_storep; /* ramdisk storage */ +static unsigned char *fake_storep; /* ramdisk storage */ static struct sd_dif_tuple *dif_storep; /* protection info */ static void *map_storep; /* provisioning map */ @@ -640,7 +640,9 @@ static int dix_writes; static int dix_reads; static int dif_errors; -static DEFINE_SPINLOCK(queued_arr_lock); +static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */ +static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */ + static DEFINE_RWLOCK(atomic_rw); static char sdebug_proc_name[] = MY_NAME; @@ -662,19 +664,22 @@ static const int illegal_condition_result = static const int device_qfull_result = (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL; -static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, - 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, - 0, 0, 0, 0}; -static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, - 0, 0, 0x2, 0x4b}; -static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, - 0, 0, 0x0, 0x0}; + +/* Only do the extra work involved in logical block provisioning if one or + * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing + * real reads and writes (i.e. not skipping them for speed). + */ +static inline bool scsi_debug_lbp(void) +{ + return 0 == sdebug_fake_rw && + (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); +} static void *fake_store(unsigned long long lba) { lba = do_div(lba, sdebug_store_sectors); - return fake_storep + lba * scsi_debug_sector_size; + return fake_storep + lba * sdebug_sector_size; } static struct sd_dif_tuple *dif_store(sector_t sector) @@ -684,9 +689,6 @@ static struct sd_dif_tuple *dif_store(sector_t sector) return dif_storep + sector; } -static int sdebug_add_adapter(void); -static void sdebug_remove_adapter(void); - static void sdebug_max_tgts_luns(void) { struct sdebug_host_info *sdbg_host; @@ -696,11 +698,11 @@ static void sdebug_max_tgts_luns(void) list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { hpnt = sdbg_host->shost; if ((hpnt->this_id >= 0) && - (scsi_debug_num_tgts > hpnt->this_id)) - hpnt->max_id = scsi_debug_num_tgts + 1; + (sdebug_num_tgts > hpnt->this_id)) + hpnt->max_id = sdebug_num_tgts + 1; else - hpnt->max_id = scsi_debug_num_tgts; - /* scsi_debug_max_luns; */ + hpnt->max_id = sdebug_num_tgts; + /* sdebug_max_luns; */ hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; } spin_unlock(&sdebug_host_list_lock); @@ -709,9 +711,9 @@ static void sdebug_max_tgts_luns(void) enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1}; /* Set in_bit to -1 to indicate no bit position of invalid field */ -static void -mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d, - int in_byte, int in_bit) +static void mk_sense_invalid_fld(struct scsi_cmnd *scp, + enum sdeb_cmd_data c_d, + int in_byte, int in_bit) { unsigned char *sbuff; u8 sks[4]; @@ -725,8 +727,7 @@ mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d, } asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST; memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE); - scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST, - asc, 0); + scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0); memset(sks, 0, sizeof(sks)); sks[0] = 0x80; if (c_d) @@ -736,7 +737,7 @@ mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d, sks[0] |= 0x7 & in_bit; } put_unaligned_be16(in_byte, sks + 1); - if (scsi_debug_dsense) { + if (sdebug_dsense) { sl = sbuff[7] + 8; sbuff[7] = sl; sbuff[sl] = 0x2; @@ -744,7 +745,7 @@ mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d, memcpy(sbuff + sl + 4, sks, 3); } else memcpy(sbuff + 15, sks, 3); - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) + if (sdebug_verbose) sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq" "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n", my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit); @@ -762,23 +763,22 @@ static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq) } memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE); - scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq); + scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq); - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) + if (sdebug_verbose) sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n", my_name, key, asc, asq); } -static void -mk_sense_invalid_opcode(struct scsi_cmnd *scp) +static void mk_sense_invalid_opcode(struct scsi_cmnd *scp) { mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0); } static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) { - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) { + if (sdebug_verbose) { if (0x1261 == cmd) sdev_printk(KERN_INFO, dev, "%s: BLKFLSBUF [0x1261]\n", __func__); @@ -810,11 +810,9 @@ static void clear_luns_changed_on_target(struct sdebug_dev_info *devip) spin_unlock(&sdebug_host_list_lock); } -static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only, - struct sdebug_dev_info * devip) +static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { int k; - bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts); k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS); if (k != SDEBUG_NUM_UAS) { @@ -822,40 +820,41 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only, switch (k) { case SDEBUG_UA_POR: - mk_sense_buffer(SCpnt, UNIT_ATTENTION, - UA_RESET_ASC, POWER_ON_RESET_ASCQ); - if (debug) + mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC, + POWER_ON_RESET_ASCQ); + if (sdebug_verbose) cp = "power on reset"; break; case SDEBUG_UA_BUS_RESET: - mk_sense_buffer(SCpnt, UNIT_ATTENTION, - UA_RESET_ASC, BUS_RESET_ASCQ); - if (debug) + mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC, + BUS_RESET_ASCQ); + if (sdebug_verbose) cp = "bus reset"; break; case SDEBUG_UA_MODE_CHANGED: - mk_sense_buffer(SCpnt, UNIT_ATTENTION, - UA_CHANGED_ASC, MODE_CHANGED_ASCQ); - if (debug) + mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC, + MODE_CHANGED_ASCQ); + if (sdebug_verbose) cp = "mode parameters changed"; break; case SDEBUG_UA_CAPACITY_CHANGED: - mk_sense_buffer(SCpnt, UNIT_ATTENTION, - UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ); - if (debug) + mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC, + CAPACITY_CHANGED_ASCQ); + if (sdebug_verbose) cp = "capacity data changed"; break; case SDEBUG_UA_MICROCODE_CHANGED: - mk_sense_buffer(SCpnt, UNIT_ATTENTION, - TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ); - if (debug) + mk_sense_buffer(scp, UNIT_ATTENTION, + TARGET_CHANGED_ASC, + MICROCODE_CHANGED_ASCQ); + if (sdebug_verbose) cp = "microcode has been changed"; break; case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET: - mk_sense_buffer(SCpnt, UNIT_ATTENTION, + mk_sense_buffer(scp, UNIT_ATTENTION, TARGET_CHANGED_ASC, MICROCODE_CHANGED_WO_RESET_ASCQ); - if (debug) + if (sdebug_verbose) cp = "microcode has been changed without reset"; break; case SDEBUG_UA_LUNS_CHANGED: @@ -864,40 +863,30 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only, * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN * on the target, until a REPORT LUNS command is * received. SPC-4 behavior is to report it only once. - * NOTE: scsi_debug_scsi_level does not use the same + * NOTE: sdebug_scsi_level does not use the same * values as struct scsi_device->scsi_level. */ - if (scsi_debug_scsi_level >= 6) /* SPC-4 and above */ + if (sdebug_scsi_level >= 6) /* SPC-4 and above */ clear_luns_changed_on_target(devip); - mk_sense_buffer(SCpnt, UNIT_ATTENTION, + mk_sense_buffer(scp, UNIT_ATTENTION, TARGET_CHANGED_ASC, LUNS_CHANGED_ASCQ); - if (debug) + if (sdebug_verbose) cp = "reported luns data has changed"; break; default: - pr_warn("%s: unexpected unit attention code=%d\n", - __func__, k); - if (debug) + pr_warn("unexpected unit attention code=%d\n", k); + if (sdebug_verbose) cp = "unknown"; break; } clear_bit(k, devip->uas_bm); - if (debug) - sdev_printk(KERN_INFO, SCpnt->device, + if (sdebug_verbose) + sdev_printk(KERN_INFO, scp->device, "%s reports: Unit attention: %s\n", my_name, cp); return check_condition_result; } - if ((UAS_TUR == uas_only) && devip->stopped) { - mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY, - 0x2); - if (debug) - sdev_printk(KERN_INFO, SCpnt->device, - "%s reports: Not ready: %s\n", my_name, - "initializing command required"); - return check_condition_result; - } return 0; } @@ -911,7 +900,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, if (!sdb->length) return 0; if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE)) - return (DID_ERROR << 16); + return DID_ERROR << 16; act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents, arr, arr_len); @@ -935,13 +924,17 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, static const char * inq_vendor_id = "Linux "; static const char * inq_product_id = "scsi_debug "; -static const char *inq_product_rev = "0184"; /* version less '.' */ +static const char *inq_product_rev = "0186"; /* version less '.' */ +/* Use some locally assigned NAAs for SAS addresses. */ +static const u64 naa3_comp_a = 0x3222222000000000ULL; +static const u64 naa3_comp_b = 0x3333333000000000ULL; +static const u64 naa3_comp_c = 0x3111111000000000ULL; /* Device identification VPD page. Returns number of bytes placed in arr */ -static int inquiry_evpd_83(unsigned char * arr, int port_group_id, - int target_dev_id, int dev_id_num, - const char * dev_id_str, - int dev_id_str_len) +static int inquiry_vpd_83(unsigned char *arr, int port_group_id, + int target_dev_id, int dev_id_num, + const char *dev_id_str, int dev_id_str_len, + const uuid_be *lu_name) { int num, port_a; char b[32]; @@ -958,19 +951,25 @@ static int inquiry_evpd_83(unsigned char * arr, int port_group_id, arr[3] = num; num += 4; if (dev_id_num >= 0) { - /* NAA-5, Logical unit identifier (binary) */ - arr[num++] = 0x1; /* binary (not necessarily sas) */ - arr[num++] = 0x3; /* PIV=0, lu, naa */ - arr[num++] = 0x0; - arr[num++] = 0x8; - arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */ - arr[num++] = 0x33; - arr[num++] = 0x33; - arr[num++] = 0x30; - arr[num++] = (dev_id_num >> 24); - arr[num++] = (dev_id_num >> 16) & 0xff; - arr[num++] = (dev_id_num >> 8) & 0xff; - arr[num++] = dev_id_num & 0xff; + if (sdebug_uuid_ctl) { + /* Locally assigned UUID */ + arr[num++] = 0x1; /* binary (not necessarily sas) */ + arr[num++] = 0xa; /* PIV=0, lu, naa */ + arr[num++] = 0x0; + arr[num++] = 0x12; + arr[num++] = 0x10; /* uuid type=1, locally assigned */ + arr[num++] = 0x0; + memcpy(arr + num, lu_name, 16); + num += 16; + } else { + /* NAA-3, Logical unit identifier (binary) */ + arr[num++] = 0x1; /* binary (not necessarily sas) */ + arr[num++] = 0x3; /* PIV=0, lu, naa */ + arr[num++] = 0x0; + arr[num++] = 0x8; + put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num); + num += 8; + } /* Target relative port number */ arr[num++] = 0x61; /* proto=sas, binary */ arr[num++] = 0x94; /* PIV=1, target port, rel port */ @@ -981,47 +980,35 @@ static int inquiry_evpd_83(unsigned char * arr, int port_group_id, arr[num++] = 0x0; arr[num++] = 0x1; /* relative port A */ } - /* NAA-5, Target port identifier */ + /* NAA-3, Target port identifier */ arr[num++] = 0x61; /* proto=sas, binary */ arr[num++] = 0x93; /* piv=1, target port, naa */ arr[num++] = 0x0; arr[num++] = 0x8; - arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */ - arr[num++] = 0x22; - arr[num++] = 0x22; - arr[num++] = 0x20; - arr[num++] = (port_a >> 24); - arr[num++] = (port_a >> 16) & 0xff; - arr[num++] = (port_a >> 8) & 0xff; - arr[num++] = port_a & 0xff; - /* NAA-5, Target port group identifier */ + put_unaligned_be64(naa3_comp_a + port_a, arr + num); + num += 8; + /* NAA-3, Target port group identifier */ arr[num++] = 0x61; /* proto=sas, binary */ arr[num++] = 0x95; /* piv=1, target port group id */ arr[num++] = 0x0; arr[num++] = 0x4; arr[num++] = 0; arr[num++] = 0; - arr[num++] = (port_group_id >> 8) & 0xff; - arr[num++] = port_group_id & 0xff; - /* NAA-5, Target device identifier */ + put_unaligned_be16(port_group_id, arr + num); + num += 2; + /* NAA-3, Target device identifier */ arr[num++] = 0x61; /* proto=sas, binary */ arr[num++] = 0xa3; /* piv=1, target device, naa */ arr[num++] = 0x0; arr[num++] = 0x8; - arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */ - arr[num++] = 0x22; - arr[num++] = 0x22; - arr[num++] = 0x20; - arr[num++] = (target_dev_id >> 24); - arr[num++] = (target_dev_id >> 16) & 0xff; - arr[num++] = (target_dev_id >> 8) & 0xff; - arr[num++] = target_dev_id & 0xff; + put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num); + num += 8; /* SCSI name string: Target device identifier */ arr[num++] = 0x63; /* proto=sas, UTF-8 */ arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */ arr[num++] = 0x0; arr[num++] = 24; - memcpy(arr + num, "naa.52222220", 12); + memcpy(arr + num, "naa.32222220", 12); num += 12; snprintf(b, sizeof(b), "%08X", target_dev_id); memcpy(arr + num, b, 8); @@ -1031,7 +1018,6 @@ static int inquiry_evpd_83(unsigned char * arr, int port_group_id, return num; } - static unsigned char vpd84_data[] = { /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0, 0x22,0x22,0x22,0x0,0xbb,0x1, @@ -1039,14 +1025,14 @@ static unsigned char vpd84_data[] = { }; /* Software interface identification VPD page */ -static int inquiry_evpd_84(unsigned char * arr) +static int inquiry_vpd_84(unsigned char *arr) { memcpy(arr, vpd84_data, sizeof(vpd84_data)); return sizeof(vpd84_data); } /* Management network addresses VPD page */ -static int inquiry_evpd_85(unsigned char * arr) +static int inquiry_vpd_85(unsigned char *arr) { int num = 0; const char * na1 = "https://www.kernel.org/config"; @@ -1081,7 +1067,7 @@ static int inquiry_evpd_85(unsigned char * arr) } /* SCSI ports VPD page */ -static int inquiry_evpd_88(unsigned char * arr, int target_dev_id) +static int inquiry_vpd_88(unsigned char *arr, int target_dev_id) { int num = 0; int port_a, port_b; @@ -1101,15 +1087,8 @@ static int inquiry_evpd_88(unsigned char * arr, int target_dev_id) arr[num++] = 0x93; /* PIV=1, target port, NAA */ arr[num++] = 0x0; /* reserved */ arr[num++] = 0x8; /* length */ - arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */ - arr[num++] = 0x22; - arr[num++] = 0x22; - arr[num++] = 0x20; - arr[num++] = (port_a >> 24); - arr[num++] = (port_a >> 16) & 0xff; - arr[num++] = (port_a >> 8) & 0xff; - arr[num++] = port_a & 0xff; - + put_unaligned_be64(naa3_comp_a + port_a, arr + num); + num += 8; arr[num++] = 0x0; /* reserved */ arr[num++] = 0x0; /* reserved */ arr[num++] = 0x0; @@ -1123,14 +1102,8 @@ static int inquiry_evpd_88(unsigned char * arr, int target_dev_id) arr[num++] = 0x93; /* PIV=1, target port, NAA */ arr[num++] = 0x0; /* reserved */ arr[num++] = 0x8; /* length */ - arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */ - arr[num++] = 0x22; - arr[num++] = 0x22; - arr[num++] = 0x20; - arr[num++] = (port_b >> 24); - arr[num++] = (port_b >> 16) & 0xff; - arr[num++] = (port_b >> 8) & 0xff; - arr[num++] = port_b & 0xff; + put_unaligned_be64(naa3_comp_a + port_b, arr + num); + num += 8; return num; } @@ -1181,7 +1154,7 @@ static unsigned char vpd89_data[] = { }; /* ATA Information VPD page */ -static int inquiry_evpd_89(unsigned char * arr) +static int inquiry_vpd_89(unsigned char *arr) { memcpy(arr, vpd89_data, sizeof(vpd89_data)); return sizeof(vpd89_data); @@ -1196,47 +1169,42 @@ static unsigned char vpdb0_data[] = { }; /* Block limits VPD page (SBC-3) */ -static int inquiry_evpd_b0(unsigned char * arr) +static int inquiry_vpd_b0(unsigned char *arr) { unsigned int gran; memcpy(arr, vpdb0_data, sizeof(vpdb0_data)); /* Optimal transfer length granularity */ - gran = 1 << scsi_debug_physblk_exp; - arr[2] = (gran >> 8) & 0xff; - arr[3] = gran & 0xff; + gran = 1 << sdebug_physblk_exp; + put_unaligned_be16(gran, arr + 2); /* Maximum Transfer Length */ - if (sdebug_store_sectors > 0x400) { - arr[4] = (sdebug_store_sectors >> 24) & 0xff; - arr[5] = (sdebug_store_sectors >> 16) & 0xff; - arr[6] = (sdebug_store_sectors >> 8) & 0xff; - arr[7] = sdebug_store_sectors & 0xff; - } + if (sdebug_store_sectors > 0x400) + put_unaligned_be32(sdebug_store_sectors, arr + 4); /* Optimal Transfer Length */ - put_unaligned_be32(scsi_debug_opt_blks, &arr[8]); + put_unaligned_be32(sdebug_opt_blks, &arr[8]); - if (scsi_debug_lbpu) { + if (sdebug_lbpu) { /* Maximum Unmap LBA Count */ - put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]); + put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]); /* Maximum Unmap Block Descriptor Count */ - put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]); + put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]); } /* Unmap Granularity Alignment */ - if (scsi_debug_unmap_alignment) { - put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]); + if (sdebug_unmap_alignment) { + put_unaligned_be32(sdebug_unmap_alignment, &arr[28]); arr[28] |= 0x80; /* UGAVALID */ } /* Optimal Unmap Granularity */ - put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]); + put_unaligned_be32(sdebug_unmap_granularity, &arr[24]); /* Maximum WRITE SAME Length */ - put_unaligned_be64(scsi_debug_write_same_length, &arr[32]); + put_unaligned_be64(sdebug_write_same_length, &arr[32]); return 0x3c; /* Mandatory page length for Logical Block Provisioning */ @@ -1244,7 +1212,7 @@ static int inquiry_evpd_b0(unsigned char * arr) } /* Block device characteristics VPD page (SBC-3) */ -static int inquiry_evpd_b1(unsigned char *arr) +static int inquiry_vpd_b1(unsigned char *arr) { memset(arr, 0, 0x3c); arr[0] = 0; @@ -1255,24 +1223,22 @@ static int inquiry_evpd_b1(unsigned char *arr) return 0x3c; } -/* Logical block provisioning VPD page (SBC-3) */ -static int inquiry_evpd_b2(unsigned char *arr) +/* Logical block provisioning VPD page (SBC-4) */ +static int inquiry_vpd_b2(unsigned char *arr) { memset(arr, 0, 0x4); arr[0] = 0; /* threshold exponent */ - - if (scsi_debug_lbpu) + if (sdebug_lbpu) arr[1] = 1 << 7; - - if (scsi_debug_lbpws) + if (sdebug_lbpws) arr[1] |= 1 << 6; - - if (scsi_debug_lbpws10) + if (sdebug_lbpws10) arr[1] |= 1 << 5; - - if (scsi_debug_lbprz) - arr[1] |= 1 << 2; - + if (sdebug_lbprz && scsi_debug_lbp()) + arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */ + /* anc_sup=0; dp=0 (no provisioning group descriptor) */ + /* minimum_percentage=0; provisioning_type=0 (unknown) */ + /* threshold_percentage=0 */ return 0x4; } @@ -1285,19 +1251,20 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) unsigned char * arr; unsigned char *cmd = scp->cmnd; int alloc_len, n, ret; - bool have_wlun; + bool have_wlun, is_disk; - alloc_len = (cmd[3] << 8) + cmd[4]; + alloc_len = get_unaligned_be16(cmd + 3); arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC); if (! arr) return DID_REQUEUE << 16; - have_wlun = (scp->device->lun == SCSI_W_LUN_REPORT_LUNS); + is_disk = (sdebug_ptype == TYPE_DISK); + have_wlun = scsi_is_wlun(scp->device->lun); if (have_wlun) - pq_pdt = 0x1e; /* present, wlun */ - else if (scsi_debug_no_lun_0 && (0 == devip->lun)) - pq_pdt = 0x7f; /* not present, no device type */ + pq_pdt = TYPE_WLUN; /* present, wlun */ + else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL)) + pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */ else - pq_pdt = (scsi_debug_ptype & 0x1f); + pq_pdt = (sdebug_ptype & 0x1f); arr[0] = pq_pdt; if (0x2 & cmd[1]) { /* CMDDT bit set */ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1); @@ -1310,7 +1277,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) port_group_id = (((host_no + 1) & 0x7f) << 8) + (devip->channel & 0x7f); - if (0 == scsi_debug_vpd_use_hostno) + if (sdebug_vpd_use_hostno == 0) host_no = 0; lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) + (devip->target * 1000) + devip->lun); @@ -1328,11 +1295,12 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) arr[n++] = 0x86; /* extended inquiry */ arr[n++] = 0x87; /* mode page policy */ arr[n++] = 0x88; /* SCSI ports */ - arr[n++] = 0x89; /* ATA information */ - arr[n++] = 0xb0; /* Block limits (SBC) */ - arr[n++] = 0xb1; /* Block characteristics (SBC) */ - if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */ - arr[n++] = 0xb2; + if (is_disk) { /* SBC only */ + arr[n++] = 0x89; /* ATA information */ + arr[n++] = 0xb0; /* Block limits */ + arr[n++] = 0xb1; /* Block characteristics */ + arr[n++] = 0xb2; /* Logical Block Prov */ + } arr[3] = n - 4; /* number of supported VPD pages */ } else if (0x80 == cmd[2]) { /* unit serial number */ arr[1] = cmd[2]; /*sanity */ @@ -1340,21 +1308,22 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) memcpy(&arr[4], lu_id_str, len); } else if (0x83 == cmd[2]) { /* device identification */ arr[1] = cmd[2]; /*sanity */ - arr[3] = inquiry_evpd_83(&arr[4], port_group_id, - target_dev_id, lu_id_num, - lu_id_str, len); + arr[3] = inquiry_vpd_83(&arr[4], port_group_id, + target_dev_id, lu_id_num, + lu_id_str, len, + &devip->lu_name); } else if (0x84 == cmd[2]) { /* Software interface ident. */ arr[1] = cmd[2]; /*sanity */ - arr[3] = inquiry_evpd_84(&arr[4]); + arr[3] = inquiry_vpd_84(&arr[4]); } else if (0x85 == cmd[2]) { /* Management network addresses */ arr[1] = cmd[2]; /*sanity */ - arr[3] = inquiry_evpd_85(&arr[4]); + arr[3] = inquiry_vpd_85(&arr[4]); } else if (0x86 == cmd[2]) { /* extended inquiry */ arr[1] = cmd[2]; /*sanity */ arr[3] = 0x3c; /* number of following entries */ - if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) + if (sdebug_dif == SD_DIF_TYPE3_PROTECTION) arr[4] = 0x4; /* SPT: GRD_CHK:1 */ - else if (scsi_debug_dif) + else if (have_dif_prot) arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */ else arr[4] = 0x0; /* no protection stuff */ @@ -1368,39 +1337,38 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) arr[10] = 0x82; /* mlus, per initiator port */ } else if (0x88 == cmd[2]) { /* SCSI Ports */ arr[1] = cmd[2]; /*sanity */ - arr[3] = inquiry_evpd_88(&arr[4], target_dev_id); - } else if (0x89 == cmd[2]) { /* ATA information */ + arr[3] = inquiry_vpd_88(&arr[4], target_dev_id); + } else if (is_disk && 0x89 == cmd[2]) { /* ATA information */ arr[1] = cmd[2]; /*sanity */ - n = inquiry_evpd_89(&arr[4]); - arr[2] = (n >> 8); - arr[3] = (n & 0xff); - } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */ + n = inquiry_vpd_89(&arr[4]); + put_unaligned_be16(n, arr + 2); + } else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */ arr[1] = cmd[2]; /*sanity */ - arr[3] = inquiry_evpd_b0(&arr[4]); - } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */ + arr[3] = inquiry_vpd_b0(&arr[4]); + } else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */ arr[1] = cmd[2]; /*sanity */ - arr[3] = inquiry_evpd_b1(&arr[4]); - } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */ + arr[3] = inquiry_vpd_b1(&arr[4]); + } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */ arr[1] = cmd[2]; /*sanity */ - arr[3] = inquiry_evpd_b2(&arr[4]); + arr[3] = inquiry_vpd_b2(&arr[4]); } else { mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); kfree(arr); return check_condition_result; } - len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len); + len = min(get_unaligned_be16(arr + 2) + 4, alloc_len); ret = fill_from_dev_buffer(scp, arr, min(len, SDEBUG_MAX_INQ_ARR_SZ)); kfree(arr); return ret; } /* drops through here for a standard inquiry */ - arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */ - arr[2] = scsi_debug_scsi_level; + arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */ + arr[2] = sdebug_scsi_level; arr[3] = 2; /* response_data_format==2 */ arr[4] = SDEBUG_LONG_INQ_SZ - 5; - arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */ - if (0 == scsi_debug_vpd_use_hostno) + arr[5] = (int)have_dif_prot; /* PROTECT bit */ + if (sdebug_vpd_use_hostno == 0) arr[5] = 0x10; /* claim: implicit TGPS */ arr[6] = 0x10; /* claim: MultiP */ /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */ @@ -1409,21 +1377,26 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) memcpy(&arr[16], inq_product_id, 16); memcpy(&arr[32], inq_product_rev, 4); /* version descriptors (2 bytes each) follow */ - arr[58] = 0x0; arr[59] = 0xa2; /* SAM-5 rev 4 */ - arr[60] = 0x4; arr[61] = 0x68; /* SPC-4 rev 37 */ + put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */ + put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */ n = 62; - if (scsi_debug_ptype == 0) { - arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */ - } else if (scsi_debug_ptype == 1) { - arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */ - } - arr[n++] = 0x20; arr[n++] = 0xe6; /* SPL-3 rev 7 */ + if (is_disk) { /* SBC-4 no version claimed */ + put_unaligned_be16(0x600, arr + n); + n += 2; + } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */ + put_unaligned_be16(0x525, arr + n); + n += 2; + } + put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */ ret = fill_from_dev_buffer(scp, arr, min(alloc_len, SDEBUG_LONG_INQ_SZ)); kfree(arr); return ret; } +static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, + 0, 0, 0x0, 0x0}; + static int resp_requests(struct scsi_cmnd * scp, struct sdebug_dev_info * devip) { @@ -1452,7 +1425,7 @@ static int resp_requests(struct scsi_cmnd * scp, } } else { memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE); - if (arr[0] >= 0x70 && dsense == scsi_debug_dsense) + if (arr[0] >= 0x70 && dsense == sdebug_dsense) ; /* have sense and formats match */ else if (arr[0] <= 0x70) { if (dsense) { @@ -1489,24 +1462,25 @@ static int resp_start_stop(struct scsi_cmnd * scp, struct sdebug_dev_info * devip) { unsigned char *cmd = scp->cmnd; - int power_cond, start; + int power_cond, stop; power_cond = (cmd[4] & 0xf0) >> 4; if (power_cond) { mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7); return check_condition_result; } - start = cmd[4] & 1; - if (start == devip->stopped) - devip->stopped = !start; + stop = !(cmd[4] & 1); + atomic_xchg(&devip->stopped, stop); return 0; } static sector_t get_sdebug_capacity(void) { - if (scsi_debug_virtual_gb > 0) - return (sector_t)scsi_debug_virtual_gb * - (1073741824 / scsi_debug_sector_size); + static const unsigned int gibibyte = 1073741824; + + if (sdebug_virtual_gb > 0) + return (sector_t)sdebug_virtual_gb * + (gibibyte / sdebug_sector_size); else return sdebug_store_sectors; } @@ -1523,18 +1497,10 @@ static int resp_readcap(struct scsi_cmnd * scp, memset(arr, 0, SDEBUG_READCAP_ARR_SZ); if (sdebug_capacity < 0xffffffff) { capac = (unsigned int)sdebug_capacity - 1; - arr[0] = (capac >> 24); - arr[1] = (capac >> 16) & 0xff; - arr[2] = (capac >> 8) & 0xff; - arr[3] = capac & 0xff; - } else { - arr[0] = 0xff; - arr[1] = 0xff; - arr[2] = 0xff; - arr[3] = 0xff; - } - arr[6] = (scsi_debug_sector_size >> 8) & 0xff; - arr[7] = scsi_debug_sector_size & 0xff; + put_unaligned_be32(capac, arr + 0); + } else + put_unaligned_be32(0xffffffff, arr + 0); + put_unaligned_be16(sdebug_sector_size, arr + 6); return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ); } @@ -1544,34 +1510,31 @@ static int resp_readcap16(struct scsi_cmnd * scp, { unsigned char *cmd = scp->cmnd; unsigned char arr[SDEBUG_READCAP16_ARR_SZ]; - unsigned long long capac; - int k, alloc_len; + int alloc_len; - alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8) - + cmd[13]); + alloc_len = get_unaligned_be32(cmd + 10); /* following just in case virtual_gb changed */ sdebug_capacity = get_sdebug_capacity(); memset(arr, 0, SDEBUG_READCAP16_ARR_SZ); - capac = sdebug_capacity - 1; - for (k = 0; k < 8; ++k, capac >>= 8) - arr[7 - k] = capac & 0xff; - arr[8] = (scsi_debug_sector_size >> 24) & 0xff; - arr[9] = (scsi_debug_sector_size >> 16) & 0xff; - arr[10] = (scsi_debug_sector_size >> 8) & 0xff; - arr[11] = scsi_debug_sector_size & 0xff; - arr[13] = scsi_debug_physblk_exp & 0xf; - arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; + put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0); + put_unaligned_be32(sdebug_sector_size, arr + 8); + arr[13] = sdebug_physblk_exp & 0xf; + arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f; if (scsi_debug_lbp()) { arr[14] |= 0x80; /* LBPME */ - if (scsi_debug_lbprz) - arr[14] |= 0x40; /* LBPRZ */ + /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in + * the LB Provisioning VPD page is 3 bits. Note that lbprz=2 + * in the wider field maps to 0 in this field. + */ + if (sdebug_lbprz & 1) /* precisely what the draft requires */ + arr[14] |= 0x40; } - arr[15] = scsi_debug_lowest_aligned & 0xff; + arr[15] = sdebug_lowest_aligned & 0xff; - if (scsi_debug_dif) { - arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */ + if (have_dif_prot) { + arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */ arr[12] |= 1; /* PROT_EN */ } @@ -1590,9 +1553,7 @@ static int resp_report_tgtpgs(struct scsi_cmnd * scp, int n, ret, alen, rlen; int port_group_a, port_group_b, port_a, port_b; - alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8) - + cmd[9]); - + alen = get_unaligned_be32(cmd + 6); arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC); if (! arr) return DID_REQUEUE << 16; @@ -1605,49 +1566,46 @@ static int resp_report_tgtpgs(struct scsi_cmnd * scp, port_a = 0x1; /* relative port A */ port_b = 0x2; /* relative port B */ port_group_a = (((host_no + 1) & 0x7f) << 8) + - (devip->channel & 0x7f); + (devip->channel & 0x7f); port_group_b = (((host_no + 1) & 0x7f) << 8) + - (devip->channel & 0x7f) + 0x80; + (devip->channel & 0x7f) + 0x80; /* * The asymmetric access state is cycled according to the host_id. */ n = 4; - if (0 == scsi_debug_vpd_use_hostno) { - arr[n++] = host_no % 3; /* Asymm access state */ - arr[n++] = 0x0F; /* claim: all states are supported */ + if (sdebug_vpd_use_hostno == 0) { + arr[n++] = host_no % 3; /* Asymm access state */ + arr[n++] = 0x0F; /* claim: all states are supported */ } else { - arr[n++] = 0x0; /* Active/Optimized path */ - arr[n++] = 0x01; /* claim: only support active/optimized paths */ + arr[n++] = 0x0; /* Active/Optimized path */ + arr[n++] = 0x01; /* only support active/optimized paths */ } - arr[n++] = (port_group_a >> 8) & 0xff; - arr[n++] = port_group_a & 0xff; + put_unaligned_be16(port_group_a, arr + n); + n += 2; arr[n++] = 0; /* Reserved */ arr[n++] = 0; /* Status code */ arr[n++] = 0; /* Vendor unique */ arr[n++] = 0x1; /* One port per group */ arr[n++] = 0; /* Reserved */ arr[n++] = 0; /* Reserved */ - arr[n++] = (port_a >> 8) & 0xff; - arr[n++] = port_a & 0xff; + put_unaligned_be16(port_a, arr + n); + n += 2; arr[n++] = 3; /* Port unavailable */ arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */ - arr[n++] = (port_group_b >> 8) & 0xff; - arr[n++] = port_group_b & 0xff; + put_unaligned_be16(port_group_b, arr + n); + n += 2; arr[n++] = 0; /* Reserved */ arr[n++] = 0; /* Status code */ arr[n++] = 0; /* Vendor unique */ arr[n++] = 0x1; /* One port per group */ arr[n++] = 0; /* Reserved */ arr[n++] = 0; /* Reserved */ - arr[n++] = (port_b >> 8) & 0xff; - arr[n++] = port_b & 0xff; + put_unaligned_be16(port_b, arr + n); + n += 2; rlen = n - 4; - arr[0] = (rlen >> 24) & 0xff; - arr[1] = (rlen >> 16) & 0xff; - arr[2] = (rlen >> 8) & 0xff; - arr[3] = rlen & 0xff; + put_unaligned_be32(rlen, arr + 0); /* * Return the smallest value of either @@ -1662,8 +1620,8 @@ static int resp_report_tgtpgs(struct scsi_cmnd * scp, return ret; } -static int -resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +static int resp_rsup_opcodes(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) { bool rctd; u8 reporting_opts, req_opcode, sdeb_i, supp; @@ -1813,8 +1771,8 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) return errsts; } -static int -resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +static int resp_rsup_tmfs(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) { bool repd; u32 alloc_len, len; @@ -1871,17 +1829,19 @@ static int resp_format_pg(unsigned char * p, int pcontrol, int target) 0, 0, 0, 0, 0x40, 0, 0, 0}; memcpy(p, format_pg, sizeof(format_pg)); - p[10] = (sdebug_sectors_per >> 8) & 0xff; - p[11] = sdebug_sectors_per & 0xff; - p[12] = (scsi_debug_sector_size >> 8) & 0xff; - p[13] = scsi_debug_sector_size & 0xff; - if (scsi_debug_removable) + put_unaligned_be16(sdebug_sectors_per, p + 10); + put_unaligned_be16(sdebug_sector_size, p + 12); + if (sdebug_removable) p[20] |= 0x20; /* should agree with INQUIRY */ if (1 == pcontrol) memset(p + 2, 0, sizeof(format_pg) - 2); return sizeof(format_pg); } +static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, + 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, + 0, 0, 0, 0}; + static int resp_caching_pg(unsigned char * p, int pcontrol, int target) { /* Caching page for mode_sense */ unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0, @@ -1889,7 +1849,7 @@ static int resp_caching_pg(unsigned char * p, int pcontrol, int target) unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0}; - if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts) + if (SDEBUG_OPT_N_WCE & sdebug_opts) caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */ memcpy(p, caching_pg, sizeof(caching_pg)); if (1 == pcontrol) @@ -1899,6 +1859,9 @@ static int resp_caching_pg(unsigned char * p, int pcontrol, int target) return sizeof(caching_pg); } +static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, + 0, 0, 0x2, 0x4b}; + static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target) { /* Control mode page for mode_sense */ unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0, @@ -1906,12 +1869,12 @@ static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target) unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, 0, 0, 0x2, 0x4b}; - if (scsi_debug_dsense) + if (sdebug_dsense) ctrl_m_pg[2] |= 0x4; else ctrl_m_pg[2] &= ~0x4; - if (scsi_debug_ato) + if (sdebug_ato) ctrl_m_pg[5] |= 0x80; /* ATO=1 */ memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg)); @@ -1955,31 +1918,29 @@ static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target, { /* SAS phy control and discover mode page for mode_sense */ unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2, 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0, - 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0, - 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1, + 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ + 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ 0x2, 0, 0, 0, 0, 0, 0, 0, 0x88, 0x99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0, - 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0, - 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1, + 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ + 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */ 0x3, 0, 0, 0, 0, 0, 0, 0, 0x88, 0x99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; int port_a, port_b; + put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16); + put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24); + put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64); + put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72); port_a = target_dev_id + 1; port_b = port_a + 1; memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg)); - p[20] = (port_a >> 24); - p[21] = (port_a >> 16) & 0xff; - p[22] = (port_a >> 8) & 0xff; - p[23] = port_a & 0xff; - p[48 + 20] = (port_b >> 24); - p[48 + 21] = (port_b >> 16) & 0xff; - p[48 + 22] = (port_b >> 8) & 0xff; - p[48 + 23] = port_b & 0xff; + put_unaligned_be32(port_a, p + 20); + put_unaligned_be32(port_b, p + 48 + 20); if (1 == pcontrol) memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4); return sizeof(sas_pcd_m_pg); @@ -1999,29 +1960,30 @@ static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol) #define SDEBUG_MAX_MSENSE_SZ 256 -static int -resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +static int resp_mode_sense(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) { - unsigned char dbd, llbaa; int pcontrol, pcode, subpcode, bd_len; unsigned char dev_spec; - int k, alloc_len, msense_6, offset, len, target_dev_id; + int alloc_len, offset, len, target_dev_id; int target = scp->device->id; unsigned char * ap; unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; unsigned char *cmd = scp->cmnd; + bool dbd, llbaa, msense_6, is_disk, bad_pcode; - dbd = !!(cmd[1] & 0x8); + dbd = !!(cmd[1] & 0x8); /* disable block descriptors */ pcontrol = (cmd[2] & 0xc0) >> 6; pcode = cmd[2] & 0x3f; subpcode = cmd[3]; msense_6 = (MODE_SENSE == cmd[0]); - llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10); - if ((0 == scsi_debug_ptype) && (0 == dbd)) + llbaa = msense_6 ? false : !!(cmd[1] & 0x10); + is_disk = (sdebug_ptype == TYPE_DISK); + if (is_disk && !dbd) bd_len = llbaa ? 16 : 8; else bd_len = 0; - alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]); + alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7); memset(arr, 0, SDEBUG_MAX_MSENSE_SZ); if (0x3 == pcontrol) { /* Saving values not supported */ mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0); @@ -2029,9 +1991,9 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) } target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) + (devip->target * 1000) - 3; - /* set DPOFUA bit for disks */ - if (0 == scsi_debug_ptype) - dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10; + /* for disks set DPOFUA bit and clear write protect (WP) bit */ + if (is_disk) + dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */ else dev_spec = 0x0; if (msense_6) { @@ -2050,30 +2012,16 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) sdebug_capacity = get_sdebug_capacity(); if (8 == bd_len) { - if (sdebug_capacity > 0xfffffffe) { - ap[0] = 0xff; - ap[1] = 0xff; - ap[2] = 0xff; - ap[3] = 0xff; - } else { - ap[0] = (sdebug_capacity >> 24) & 0xff; - ap[1] = (sdebug_capacity >> 16) & 0xff; - ap[2] = (sdebug_capacity >> 8) & 0xff; - ap[3] = sdebug_capacity & 0xff; - } - ap[6] = (scsi_debug_sector_size >> 8) & 0xff; - ap[7] = scsi_debug_sector_size & 0xff; + if (sdebug_capacity > 0xfffffffe) + put_unaligned_be32(0xffffffff, ap + 0); + else + put_unaligned_be32(sdebug_capacity, ap + 0); + put_unaligned_be16(sdebug_sector_size, ap + 6); offset += bd_len; ap = arr + offset; } else if (16 == bd_len) { - unsigned long long capac = sdebug_capacity; - - for (k = 0; k < 8; ++k, capac >>= 8) - ap[7 - k] = capac & 0xff; - ap[12] = (scsi_debug_sector_size >> 24) & 0xff; - ap[13] = (scsi_debug_sector_size >> 16) & 0xff; - ap[14] = (scsi_debug_sector_size >> 8) & 0xff; - ap[15] = scsi_debug_sector_size & 0xff; + put_unaligned_be64((u64)sdebug_capacity, ap + 0); + put_unaligned_be32(sdebug_sector_size, ap + 12); offset += bd_len; ap = arr + offset; } @@ -2083,6 +2031,8 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); return check_condition_result; } + bad_pcode = false; + switch (pcode) { case 0x1: /* Read-Write error recovery page, direct access */ len = resp_err_recov_pg(ap, pcontrol, target); @@ -2093,12 +2043,18 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) offset += len; break; case 0x3: /* Format device page, direct access */ - len = resp_format_pg(ap, pcontrol, target); - offset += len; + if (is_disk) { + len = resp_format_pg(ap, pcontrol, target); + offset += len; + } else + bad_pcode = true; break; case 0x8: /* Caching page, direct access */ - len = resp_caching_pg(ap, pcontrol, target); - offset += len; + if (is_disk) { + len = resp_caching_pg(ap, pcontrol, target); + offset += len; + } else + bad_pcode = true; break; case 0xa: /* Control Mode page, all devices */ len = resp_ctrl_m_pg(ap, pcontrol, target); @@ -2127,8 +2083,12 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) if ((0 == subpcode) || (0xff == subpcode)) { len = resp_err_recov_pg(ap, pcontrol, target); len += resp_disconnect_pg(ap + len, pcontrol, target); - len += resp_format_pg(ap + len, pcontrol, target); - len += resp_caching_pg(ap + len, pcontrol, target); + if (is_disk) { + len += resp_format_pg(ap + len, pcontrol, + target); + len += resp_caching_pg(ap + len, pcontrol, + target); + } len += resp_ctrl_m_pg(ap + len, pcontrol, target); len += resp_sas_sf_m_pg(ap + len, pcontrol, target); if (0xff == subpcode) { @@ -2137,29 +2097,31 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) len += resp_sas_sha_m_spg(ap + len, pcontrol); } len += resp_iec_m_pg(ap + len, pcontrol, target); + offset += len; } else { mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); return check_condition_result; } - offset += len; break; default: + bad_pcode = true; + break; + } + if (bad_pcode) { mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5); return check_condition_result; } if (msense_6) arr[0] = offset - 1; - else { - arr[0] = ((offset - 2) >> 8) & 0xff; - arr[1] = (offset - 2) & 0xff; - } + else + put_unaligned_be16((offset - 2), arr + 0); return fill_from_dev_buffer(scp, arr, min(alloc_len, offset)); } #define SDEBUG_MAX_MSELECT_SZ 512 -static int -resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +static int resp_mode_select(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) { int pf, sp, ps, md_len, bd_len, off, spf, pg_len; int param_len, res, mpage; @@ -2170,21 +2132,20 @@ resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) memset(arr, 0, sizeof(arr)); pf = cmd[1] & 0x10; sp = cmd[1] & 0x1; - param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]); + param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7); if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) { mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1); return check_condition_result; } res = fetch_to_dev_buffer(scp, arr, param_len); if (-1 == res) - return (DID_ERROR << 16); - else if ((res < param_len) && - (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) + return DID_ERROR << 16; + else if (sdebug_verbose && (res < param_len)) sdev_printk(KERN_INFO, scp->device, "%s: cdb indicated=%d, IO sent=%d bytes\n", __func__, param_len, res); - md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2); - bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]); + md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2); + bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6); if (md_len > 2) { mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); return check_condition_result; @@ -2197,7 +2158,7 @@ resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) return check_condition_result; } spf = !!(arr[off] & 0x40); - pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) : + pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) : (arr[off + 1] + 2); if ((pg_len + off) > param_len) { mk_sense_buffer(scp, ILLEGAL_REQUEST, @@ -2216,7 +2177,7 @@ resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) if (ctrl_m_pg[1] == arr[off + 1]) { memcpy(ctrl_m_pg + 2, arr + off + 2, sizeof(ctrl_m_pg) - 2); - scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4); + sdebug_dsense = !!(ctrl_m_pg[2] & 0x4); goto set_mode_changed_ua; } break; @@ -2279,7 +2240,7 @@ static int resp_log_sense(struct scsi_cmnd * scp, pcontrol = (cmd[2] & 0xc0) >> 6; pcode = cmd[2] & 0x3f; subpcode = cmd[3] & 0xff; - alloc_len = (cmd[7] << 8) + cmd[8]; + alloc_len = get_unaligned_be16(cmd + 7); arr[0] = pcode; if (0 == subpcode) { switch (pcode) { @@ -2336,7 +2297,7 @@ static int resp_log_sense(struct scsi_cmnd * scp, mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); return check_condition_result; } - len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len); + len = min(get_unaligned_be16(arr + 2) + 4, alloc_len); return fill_from_dev_buffer(scp, arr, min(len, SDEBUG_MAX_INQ_ARR_SZ)); } @@ -2358,8 +2319,8 @@ static int check_device_access_params(struct scsi_cmnd *scp, } /* Returns number of bytes copied or -1 if error. */ -static int -do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write) +static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, + bool do_write) { int ret; u64 block, rest = 0; @@ -2384,15 +2345,15 @@ do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write) rest = block + num - sdebug_store_sectors; ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, - fake_storep + (block * scsi_debug_sector_size), - (num - rest) * scsi_debug_sector_size, 0, do_write); - if (ret != (num - rest) * scsi_debug_sector_size) + fake_storep + (block * sdebug_sector_size), + (num - rest) * sdebug_sector_size, 0, do_write); + if (ret != (num - rest) * sdebug_sector_size) return ret; if (rest) { ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents, - fake_storep, rest * scsi_debug_sector_size, - (num - rest) * scsi_debug_sector_size, do_write); + fake_storep, rest * sdebug_sector_size, + (num - rest) * sdebug_sector_size, do_write); } return ret; @@ -2401,13 +2362,12 @@ do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write) /* If fake_store(lba,num) compares equal to arr(num), then copy top half of * arr into fake_store(lba,num) and return true. If comparison fails then * return false. */ -static bool -comp_write_worker(u64 lba, u32 num, const u8 *arr) +static bool comp_write_worker(u64 lba, u32 num, const u8 *arr) { bool res; u64 block, rest = 0; u32 store_blks = sdebug_store_sectors; - u32 lb_size = scsi_debug_sector_size; + u32 lb_size = sdebug_sector_size; block = do_div(lba, store_blks); if (block + num > store_blks) @@ -2434,7 +2394,7 @@ static __be16 dif_compute_csum(const void *buf, int len) { __be16 csum; - if (scsi_debug_guard) + if (sdebug_guard) csum = (__force __be16)ip_compute_csum(buf, len); else csum = cpu_to_be16(crc_t10dif(buf, len)); @@ -2445,7 +2405,7 @@ static __be16 dif_compute_csum(const void *buf, int len) static int dif_verify(struct sd_dif_tuple *sdt, const void *data, sector_t sector, u32 ei_lba) { - __be16 csum = dif_compute_csum(data, scsi_debug_sector_size); + __be16 csum = dif_compute_csum(data, sdebug_sector_size); if (sdt->guard_tag != csum) { pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n", @@ -2454,13 +2414,13 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data, be16_to_cpu(csum)); return 0x01; } - if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION && + if (sdebug_dif == SD_DIF_TYPE1_PROTECTION && be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { pr_err("REF check failed on sector %lu\n", (unsigned long)sector); return 0x03; } - if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && + if (sdebug_dif == SD_DIF_TYPE2_PROTECTION && be32_to_cpu(sdt->ref_tag) != ei_lba) { pr_err("REF check failed on sector %lu\n", (unsigned long)sector); @@ -2541,10 +2501,10 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, return 0; } -static int -resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { u8 *cmd = scp->cmnd; + struct sdebug_queued_cmd *sqcp; u64 lba; u32 num; u32 ei_lba; @@ -2591,40 +2551,43 @@ resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) check_prot = false; break; } - if (check_prot) { - if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && + if (unlikely(have_dif_prot && check_prot)) { + if (sdebug_dif == SD_DIF_TYPE2_PROTECTION && (cmd[1] & 0xe0)) { mk_sense_invalid_opcode(scp); return check_condition_result; } - if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION || - scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) && + if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION || + sdebug_dif == SD_DIF_TYPE3_PROTECTION) && (cmd[1] & 0xe0) == 0) sdev_printk(KERN_ERR, scp->device, "Unprotected RD " "to DIF device\n"); } - if (sdebug_any_injecting_opt) { - struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); + if (unlikely(sdebug_any_injecting_opt)) { + sqcp = (struct sdebug_queued_cmd *)scp->host_scribble; - if (ep->inj_short) - num /= 2; - } + if (sqcp) { + if (sqcp->inj_short) + num /= 2; + } + } else + sqcp = NULL; /* inline check_device_access_params() */ - if (lba + num > sdebug_capacity) { + if (unlikely(lba + num > sdebug_capacity)) { mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); return check_condition_result; } /* transfer length excessive (tie in to block limits VPD page) */ - if (num > sdebug_store_sectors) { + if (unlikely(num > sdebug_store_sectors)) { /* needs work to find which cdb byte 'num' comes from */ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return check_condition_result; } - if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) && - (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) && - ((lba + num) > OPT_MEDIUM_ERR_ADDR)) { + if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) && + (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) && + ((lba + num) > OPT_MEDIUM_ERR_ADDR))) { /* claim unrecoverable read error */ mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0); /* set info field and valid bit for fixed descriptor */ @@ -2641,7 +2604,7 @@ resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) read_lock_irqsave(&atomic_rw, iflags); /* DIX + T10 DIF */ - if (scsi_debug_dix && scsi_prot_sg_count(scp)) { + if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) { int prot_ret = prot_verify_read(scp, lba, num, ei_lba); if (prot_ret) { @@ -2653,27 +2616,25 @@ resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) ret = do_device_access(scp, lba, num, false); read_unlock_irqrestore(&atomic_rw, iflags); - if (ret == -1) + if (unlikely(ret == -1)) return DID_ERROR << 16; scsi_in(scp)->resid = scsi_bufflen(scp) - ret; - if (sdebug_any_injecting_opt) { - struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); - - if (ep->inj_recovered) { + if (unlikely(sqcp)) { + if (sqcp->inj_recovered) { mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0); return check_condition_result; - } else if (ep->inj_transport) { + } else if (sqcp->inj_transport) { mk_sense_buffer(scp, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO); return check_condition_result; - } else if (ep->inj_dif) { + } else if (sqcp->inj_dif) { /* Logical block guard check failed */ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); return illegal_condition_result; - } else if (ep->inj_dix) { + } else if (sqcp->inj_dix) { mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); return illegal_condition_result; } @@ -2750,13 +2711,13 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec, ret = dif_verify(sdt, daddr, sector, ei_lba); if (ret) { - dump_sector(daddr, scsi_debug_sector_size); + dump_sector(daddr, sdebug_sector_size); goto out; } sector++; ei_lba++; - dpage_offset += scsi_debug_sector_size; + dpage_offset += sdebug_sector_size; } diter.consumed = dpage_offset; sg_miter_stop(&diter); @@ -2777,24 +2738,18 @@ out: static unsigned long lba_to_map_index(sector_t lba) { - if (scsi_debug_unmap_alignment) { - lba += scsi_debug_unmap_granularity - - scsi_debug_unmap_alignment; - } - sector_div(lba, scsi_debug_unmap_granularity); - + if (sdebug_unmap_alignment) + lba += sdebug_unmap_granularity - sdebug_unmap_alignment; + sector_div(lba, sdebug_unmap_granularity); return lba; } static sector_t map_index_to_lba(unsigned long index) { - sector_t lba = index * scsi_debug_unmap_granularity; - - if (scsi_debug_unmap_alignment) { - lba -= scsi_debug_unmap_granularity - - scsi_debug_unmap_alignment; - } + sector_t lba = index * sdebug_unmap_granularity; + if (sdebug_unmap_alignment) + lba -= sdebug_unmap_granularity - sdebug_unmap_alignment; return lba; } @@ -2815,7 +2770,6 @@ static unsigned int map_state(sector_t lba, unsigned int *num) end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next)); *num = end - lba; - return mapped; } @@ -2841,27 +2795,27 @@ static void unmap_region(sector_t lba, unsigned int len) unsigned long index = lba_to_map_index(lba); if (lba == map_index_to_lba(index) && - lba + scsi_debug_unmap_granularity <= end && + lba + sdebug_unmap_granularity <= end && index < map_size) { clear_bit(index, map_storep); - if (scsi_debug_lbprz) { + if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */ memset(fake_storep + - lba * scsi_debug_sector_size, 0, - scsi_debug_sector_size * - scsi_debug_unmap_granularity); + lba * sdebug_sector_size, + (sdebug_lbprz & 1) ? 0 : 0xff, + sdebug_sector_size * + sdebug_unmap_granularity); } if (dif_storep) { memset(dif_storep + lba, 0xff, sizeof(*dif_storep) * - scsi_debug_unmap_granularity); + sdebug_unmap_granularity); } } lba = map_index_to_lba(index + 1); } } -static int -resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { u8 *cmd = scp->cmnd; u64 lba; @@ -2910,26 +2864,26 @@ resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) check_prot = false; break; } - if (check_prot) { - if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && + if (unlikely(have_dif_prot && check_prot)) { + if (sdebug_dif == SD_DIF_TYPE2_PROTECTION && (cmd[1] & 0xe0)) { mk_sense_invalid_opcode(scp); return check_condition_result; } - if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION || - scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) && + if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION || + sdebug_dif == SD_DIF_TYPE3_PROTECTION) && (cmd[1] & 0xe0) == 0) sdev_printk(KERN_ERR, scp->device, "Unprotected WR " "to DIF device\n"); } /* inline check_device_access_params() */ - if (lba + num > sdebug_capacity) { + if (unlikely(lba + num > sdebug_capacity)) { mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); return check_condition_result; } /* transfer length excessive (tie in to block limits VPD page) */ - if (num > sdebug_store_sectors) { + if (unlikely(num > sdebug_store_sectors)) { /* needs work to find which cdb byte 'num' comes from */ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return check_condition_result; @@ -2938,7 +2892,7 @@ resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) write_lock_irqsave(&atomic_rw, iflags); /* DIX + T10 DIF */ - if (scsi_debug_dix && scsi_prot_sg_count(scp)) { + if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) { int prot_ret = prot_verify_write(scp, lba, num, ei_lba); if (prot_ret) { @@ -2949,43 +2903,46 @@ resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) } ret = do_device_access(scp, lba, num, true); - if (scsi_debug_lbp()) + if (unlikely(scsi_debug_lbp())) map_region(lba, num); write_unlock_irqrestore(&atomic_rw, iflags); - if (-1 == ret) - return (DID_ERROR << 16); - else if ((ret < (num * scsi_debug_sector_size)) && - (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) + if (unlikely(-1 == ret)) + return DID_ERROR << 16; + else if (unlikely(sdebug_verbose && + (ret < (num * sdebug_sector_size)))) sdev_printk(KERN_INFO, scp->device, "%s: write: cdb indicated=%u, IO sent=%d bytes\n", - my_name, num * scsi_debug_sector_size, ret); + my_name, num * sdebug_sector_size, ret); - if (sdebug_any_injecting_opt) { - struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); + if (unlikely(sdebug_any_injecting_opt)) { + struct sdebug_queued_cmd *sqcp = + (struct sdebug_queued_cmd *)scp->host_scribble; - if (ep->inj_recovered) { - mk_sense_buffer(scp, RECOVERED_ERROR, - THRESHOLD_EXCEEDED, 0); - return check_condition_result; - } else if (ep->inj_dif) { - /* Logical block guard check failed */ - mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); - return illegal_condition_result; - } else if (ep->inj_dix) { - mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); - return illegal_condition_result; + if (sqcp) { + if (sqcp->inj_recovered) { + mk_sense_buffer(scp, RECOVERED_ERROR, + THRESHOLD_EXCEEDED, 0); + return check_condition_result; + } else if (sqcp->inj_dif) { + /* Logical block guard check failed */ + mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); + return illegal_condition_result; + } else if (sqcp->inj_dix) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); + return illegal_condition_result; + } } } return 0; } -static int -resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba, - bool unmap, bool ndob) +static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, + u32 ei_lba, bool unmap, bool ndob) { unsigned long iflags; unsigned long long i; int ret; + u64 lba_off; ret = check_device_access_params(scp, lba, num); if (ret) @@ -2998,31 +2955,29 @@ resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba, goto out; } + lba_off = lba * sdebug_sector_size; /* if ndob then zero 1 logical block, else fetch 1 logical block */ if (ndob) { - memset(fake_storep + (lba * scsi_debug_sector_size), 0, - scsi_debug_sector_size); + memset(fake_storep + lba_off, 0, sdebug_sector_size); ret = 0; } else - ret = fetch_to_dev_buffer(scp, fake_storep + - (lba * scsi_debug_sector_size), - scsi_debug_sector_size); + ret = fetch_to_dev_buffer(scp, fake_storep + lba_off, + sdebug_sector_size); if (-1 == ret) { write_unlock_irqrestore(&atomic_rw, iflags); - return (DID_ERROR << 16); - } else if ((ret < (num * scsi_debug_sector_size)) && - (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) + return DID_ERROR << 16; + } else if (sdebug_verbose && (ret < (num * sdebug_sector_size))) sdev_printk(KERN_INFO, scp->device, "%s: %s: cdb indicated=%u, IO sent=%d bytes\n", my_name, "write same", - num * scsi_debug_sector_size, ret); + num * sdebug_sector_size, ret); /* Copy first sector to remaining blocks */ for (i = 1 ; i < num ; i++) - memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size), - fake_storep + (lba * scsi_debug_sector_size), - scsi_debug_sector_size); + memcpy(fake_storep + ((lba + i) * sdebug_sector_size), + fake_storep + lba_off, + sdebug_sector_size); if (scsi_debug_lbp()) map_region(lba, num); @@ -3032,8 +2987,8 @@ out: return 0; } -static int -resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +static int resp_write_same_10(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) { u8 *cmd = scp->cmnd; u32 lba; @@ -3042,7 +2997,7 @@ resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) bool unmap = false; if (cmd[1] & 0x8) { - if (scsi_debug_lbpws10 == 0) { + if (sdebug_lbpws10 == 0) { mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3); return check_condition_result; } else @@ -3050,15 +3005,15 @@ resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) } lba = get_unaligned_be32(cmd + 2); num = get_unaligned_be16(cmd + 7); - if (num > scsi_debug_write_same_length) { + if (num > sdebug_write_same_length) { mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); return check_condition_result; } return resp_write_same(scp, lba, num, ei_lba, unmap, false); } -static int -resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +static int resp_write_same_16(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) { u8 *cmd = scp->cmnd; u64 lba; @@ -3068,7 +3023,7 @@ resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) bool ndob = false; if (cmd[1] & 0x8) { /* UNMAP */ - if (scsi_debug_lbpws == 0) { + if (sdebug_lbpws == 0) { mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3); return check_condition_result; } else @@ -3078,7 +3033,7 @@ resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) ndob = true; lba = get_unaligned_be64(cmd + 2); num = get_unaligned_be32(cmd + 10); - if (num > scsi_debug_write_same_length) { + if (num > sdebug_write_same_length) { mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1); return check_condition_result; } @@ -3088,8 +3043,8 @@ resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) /* Note the mode field is in the same position as the (lower) service action * field. For the Report supported operation codes command, SPC-4 suggests * each mode of this command should be reported separately; for future. */ -static int -resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +static int resp_write_buffer(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) { u8 *cmd = scp->cmnd; struct scsi_device *sdp = scp->device; @@ -3134,15 +3089,15 @@ resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) return 0; } -static int -resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +static int resp_comp_write(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) { u8 *cmd = scp->cmnd; u8 *arr; u8 *fake_storep_hold; u64 lba; u32 dnum; - u32 lb_size = scsi_debug_sector_size; + u32 lb_size = sdebug_sector_size; u8 num; unsigned long iflags; int ret; @@ -3152,13 +3107,13 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) num = cmd[13]; /* 1 to a maximum of 255 logical blocks */ if (0 == num) return 0; /* degenerate case, not an error */ - if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && + if (sdebug_dif == SD_DIF_TYPE2_PROTECTION && (cmd[1] & 0xe0)) { mk_sense_invalid_opcode(scp); return check_condition_result; } - if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION || - scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) && + if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION || + sdebug_dif == SD_DIF_TYPE3_PROTECTION) && (cmd[1] & 0xe0) == 0) sdev_printk(KERN_ERR, scp->device, "Unprotected WR " "to DIF device\n"); @@ -3193,8 +3148,7 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) if (ret == -1) { retval = DID_ERROR << 16; goto cleanup; - } else if ((ret < (dnum * lb_size)) && - (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) + } else if (sdebug_verbose && (ret < (dnum * lb_size))) sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb " "indicated=%u, IO sent=%d bytes\n", my_name, dnum * lb_size, ret); @@ -3217,8 +3171,7 @@ struct unmap_block_desc { __be32 __reserved; }; -static int -resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { unsigned char *buf; struct unmap_block_desc *desc; @@ -3233,12 +3186,12 @@ resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) BUG_ON(scsi_bufflen(scp) != payload_len); descriptors = (payload_len - 8) / 16; - if (descriptors > scsi_debug_unmap_max_desc) { + if (descriptors > sdebug_unmap_max_desc) { mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); return check_condition_result; } - buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC); + buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC); if (!buf) { mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, INSUFF_RES_ASCQ); @@ -3276,8 +3229,8 @@ out: #define SDEBUG_GET_LBA_STATUS_LEN 32 -static int -resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +static int resp_get_lba_status(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) { u8 *cmd = scp->cmnd; u64 lba; @@ -3316,63 +3269,94 @@ resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN); } -#define SDEBUG_RLUN_ARR_SZ 256 - -static int resp_report_luns(struct scsi_cmnd * scp, - struct sdebug_dev_info * devip) +/* Even though each pseudo target has a REPORT LUNS "well known logical unit" + * (W-LUN), the normal Linux scanning logic does not associate it with a + * device (e.g. /dev/sg7). The following magic will make that association: + * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan" + * where <n> is a host number. If there are multiple targets in a host then + * the above will associate a W-LUN to each target. To only get a W-LUN + * for target 2, then use "echo '- 2 49409' > scan" . + */ +static int resp_report_luns(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) { + unsigned char *cmd = scp->cmnd; unsigned int alloc_len; - int lun_cnt, i, upper, num, n, want_wlun, shortish; + unsigned char select_report; u64 lun; - unsigned char *cmd = scp->cmnd; - int select_report = (int)cmd[2]; - struct scsi_lun *one_lun; - unsigned char arr[SDEBUG_RLUN_ARR_SZ]; - unsigned char * max_addr; + struct scsi_lun *lun_p; + u8 *arr; + unsigned int lun_cnt; /* normal LUN count (max: 256) */ + unsigned int wlun_cnt; /* report luns W-LUN count */ + unsigned int tlun_cnt; /* total LUN count */ + unsigned int rlen; /* response length (in bytes) */ + int i, res; clear_luns_changed_on_target(devip); - alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24); - shortish = (alloc_len < 4); - if (shortish || (select_report > 2)) { - mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1); + + select_report = cmd[2]; + alloc_len = get_unaligned_be32(cmd + 6); + + if (alloc_len < 4) { + pr_err("alloc len too small %d\n", alloc_len); + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); return check_condition_result; } - /* can produce response with up to 16k luns (lun 0 to lun 16383) */ - memset(arr, 0, SDEBUG_RLUN_ARR_SZ); - lun_cnt = scsi_debug_max_luns; - if (1 == select_report) + + switch (select_report) { + case 0: /* all LUNs apart from W-LUNs */ + lun_cnt = sdebug_max_luns; + wlun_cnt = 0; + break; + case 1: /* only W-LUNs */ lun_cnt = 0; - else if (scsi_debug_no_lun_0 && (lun_cnt > 0)) + wlun_cnt = 1; + break; + case 2: /* all LUNs */ + lun_cnt = sdebug_max_luns; + wlun_cnt = 1; + break; + case 0x10: /* only administrative LUs */ + case 0x11: /* see SPC-5 */ + case 0x12: /* only subsiduary LUs owned by referenced LU */ + default: + pr_debug("select report invalid %d\n", select_report); + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); + return check_condition_result; + } + + if (sdebug_no_lun_0 && (lun_cnt > 0)) --lun_cnt; - want_wlun = (select_report > 0) ? 1 : 0; - num = lun_cnt + want_wlun; - arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff; - arr[3] = (sizeof(struct scsi_lun) * num) & 0xff; - n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) / - sizeof(struct scsi_lun)), num); - if (n < num) { - want_wlun = 0; - lun_cnt = n; - } - one_lun = (struct scsi_lun *) &arr[8]; - max_addr = arr + SDEBUG_RLUN_ARR_SZ; - for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0); - ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr)); - i++, lun++) { - upper = (lun >> 8) & 0x3f; - if (upper) - one_lun[i].scsi_lun[0] = - (upper | (SAM2_LUN_ADDRESS_METHOD << 6)); - one_lun[i].scsi_lun[1] = lun & 0xff; - } - if (want_wlun) { - one_lun[i].scsi_lun[0] = (SCSI_W_LUN_REPORT_LUNS >> 8) & 0xff; - one_lun[i].scsi_lun[1] = SCSI_W_LUN_REPORT_LUNS & 0xff; - i++; - } - alloc_len = (unsigned char *)(one_lun + i) - arr; - return fill_from_dev_buffer(scp, arr, - min((int)alloc_len, SDEBUG_RLUN_ARR_SZ)); + + tlun_cnt = lun_cnt + wlun_cnt; + + rlen = (tlun_cnt * sizeof(struct scsi_lun)) + 8; + arr = vmalloc(rlen); + if (!arr) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, + INSUFF_RES_ASCQ); + return check_condition_result; + } + memset(arr, 0, rlen); + pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n", + select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0); + + /* luns start at byte 8 in response following the header */ + lun_p = (struct scsi_lun *)&arr[8]; + + /* LUNs use single level peripheral device addressing method */ + lun = sdebug_no_lun_0 ? 1 : 0; + for (i = 0; i < lun_cnt; i++) + int_to_scsilun(lun++, lun_p++); + + if (wlun_cnt) + int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p++); + + put_unaligned_be32(rlen - 8, &arr[0]); + + res = fill_from_dev_buffer(scp, arr, rlen); + vfree(arr); + return res; } static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, @@ -3385,7 +3369,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, struct sg_mapping_iter miter; /* better not to use temporary buffer. */ - buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC); + buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC); if (!buf) { mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, INSUFF_RES_ASCQ); @@ -3411,8 +3395,8 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, return 0; } -static int -resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +static int resp_xdwriteread_10(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) { u8 *cmd = scp->cmnd; u64 lba; @@ -3437,41 +3421,66 @@ resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) return resp_xdwriteread(scp, lba, num, devip); } -/* When timer or tasklet goes off this function is called. */ -static void sdebug_q_cmd_complete(unsigned long indx) +static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd) { - int qa_indx; + struct sdebug_queue *sqp = sdebug_q_arr; + + if (sdebug_mq_active) { + u32 tag = blk_mq_unique_tag(cmnd->request); + u16 hwq = blk_mq_unique_tag_to_hwq(tag); + + if (unlikely(hwq >= submit_queues)) { + pr_warn("Unexpected hwq=%d, apply modulo\n", hwq); + hwq %= submit_queues; + } + pr_debug("tag=%u, hwq=%d\n", tag, hwq); + return sqp + hwq; + } else + return sqp; +} + +/* Queued (deferred) command completions converge here. */ +static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp) +{ + int qc_idx; int retiring = 0; unsigned long iflags; + struct sdebug_queue *sqp; struct sdebug_queued_cmd *sqcp; struct scsi_cmnd *scp; struct sdebug_dev_info *devip; - atomic_inc(&sdebug_completions); - qa_indx = indx; - if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { - pr_err("wild qa_indx=%d\n", qa_indx); + qc_idx = sd_dp->qc_idx; + sqp = sdebug_q_arr + sd_dp->sqa_idx; + if (sdebug_statistics) { + atomic_inc(&sdebug_completions); + if (raw_smp_processor_id() != sd_dp->issuing_cpu) + atomic_inc(&sdebug_miss_cpus); + } + if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) { + pr_err("wild qc_idx=%d\n", qc_idx); return; } - spin_lock_irqsave(&queued_arr_lock, iflags); - sqcp = &queued_arr[qa_indx]; + spin_lock_irqsave(&sqp->qc_lock, iflags); + sqcp = &sqp->qc_arr[qc_idx]; scp = sqcp->a_cmnd; - if (NULL == scp) { - spin_unlock_irqrestore(&queued_arr_lock, iflags); - pr_err("scp is NULL\n"); + if (unlikely(scp == NULL)) { + spin_unlock_irqrestore(&sqp->qc_lock, iflags); + pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n", + sd_dp->sqa_idx, qc_idx); return; } devip = (struct sdebug_dev_info *)scp->device->hostdata; - if (devip) + if (likely(devip)) atomic_dec(&devip->num_in_q); else pr_err("devip=NULL\n"); - if (atomic_read(&retired_max_queue) > 0) + if (unlikely(atomic_read(&retired_max_queue) > 0)) retiring = 1; sqcp->a_cmnd = NULL; - if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) { - spin_unlock_irqrestore(&queued_arr_lock, iflags); + if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) { + spin_unlock_irqrestore(&sqp->qc_lock, iflags); pr_err("Unexpected completion\n"); return; } @@ -3480,105 +3489,71 @@ static void sdebug_q_cmd_complete(unsigned long indx) int k, retval; retval = atomic_read(&retired_max_queue); - if (qa_indx >= retval) { - spin_unlock_irqrestore(&queued_arr_lock, iflags); + if (qc_idx >= retval) { + spin_unlock_irqrestore(&sqp->qc_lock, iflags); pr_err("index %d too large\n", retval); return; } - k = find_last_bit(queued_in_use_bm, retval); - if ((k < scsi_debug_max_queue) || (k == retval)) + k = find_last_bit(sqp->in_use_bm, retval); + if ((k < sdebug_max_queue) || (k == retval)) atomic_set(&retired_max_queue, 0); else atomic_set(&retired_max_queue, k + 1); } - spin_unlock_irqrestore(&queued_arr_lock, iflags); + spin_unlock_irqrestore(&sqp->qc_lock, iflags); scp->scsi_done(scp); /* callback to mid level */ } /* When high resolution timer goes off this function is called. */ -static enum hrtimer_restart -sdebug_q_cmd_hrt_complete(struct hrtimer *timer) +static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer) { - int qa_indx; - int retiring = 0; - unsigned long iflags; - struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer; - struct sdebug_queued_cmd *sqcp; - struct scsi_cmnd *scp; - struct sdebug_dev_info *devip; - - atomic_inc(&sdebug_completions); - qa_indx = sd_hrtp->qa_indx; - if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { - pr_err("wild qa_indx=%d\n", qa_indx); - goto the_end; - } - spin_lock_irqsave(&queued_arr_lock, iflags); - sqcp = &queued_arr[qa_indx]; - scp = sqcp->a_cmnd; - if (NULL == scp) { - spin_unlock_irqrestore(&queued_arr_lock, iflags); - pr_err("scp is NULL\n"); - goto the_end; - } - devip = (struct sdebug_dev_info *)scp->device->hostdata; - if (devip) - atomic_dec(&devip->num_in_q); - else - pr_err("devip=NULL\n"); - if (atomic_read(&retired_max_queue) > 0) - retiring = 1; - - sqcp->a_cmnd = NULL; - if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) { - spin_unlock_irqrestore(&queued_arr_lock, iflags); - pr_err("Unexpected completion\n"); - goto the_end; - } - - if (unlikely(retiring)) { /* user has reduced max_queue */ - int k, retval; - - retval = atomic_read(&retired_max_queue); - if (qa_indx >= retval) { - spin_unlock_irqrestore(&queued_arr_lock, iflags); - pr_err("index %d too large\n", retval); - goto the_end; - } - k = find_last_bit(queued_in_use_bm, retval); - if ((k < scsi_debug_max_queue) || (k == retval)) - atomic_set(&retired_max_queue, 0); - else - atomic_set(&retired_max_queue, k + 1); - } - spin_unlock_irqrestore(&queued_arr_lock, iflags); - scp->scsi_done(scp); /* callback to mid level */ -the_end: + struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer, + hrt); + sdebug_q_cmd_complete(sd_dp); return HRTIMER_NORESTART; } -static struct sdebug_dev_info * -sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags) +/* When work queue schedules work, it calls this function. */ +static void sdebug_q_cmd_wq_complete(struct work_struct *work) +{ + struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer, + ew.work); + sdebug_q_cmd_complete(sd_dp); +} + +static bool got_shared_uuid; +static uuid_be shared_uuid; + +static struct sdebug_dev_info *sdebug_device_create( + struct sdebug_host_info *sdbg_host, gfp_t flags) { struct sdebug_dev_info *devip; devip = kzalloc(sizeof(*devip), flags); if (devip) { + if (sdebug_uuid_ctl == 1) + uuid_be_gen(&devip->lu_name); + else if (sdebug_uuid_ctl == 2) { + if (got_shared_uuid) + devip->lu_name = shared_uuid; + else { + uuid_be_gen(&shared_uuid); + got_shared_uuid = true; + devip->lu_name = shared_uuid; + } + } devip->sdbg_host = sdbg_host; list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list); } return devip; } -static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev) +static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev) { - struct sdebug_host_info * sdbg_host; - struct sdebug_dev_info * open_devip = NULL; - struct sdebug_dev_info * devip = - (struct sdebug_dev_info *)sdev->hostdata; + struct sdebug_host_info *sdbg_host; + struct sdebug_dev_info *open_devip = NULL; + struct sdebug_dev_info *devip; - if (devip) - return devip; sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host); if (!sdbg_host) { pr_err("Host info NULL\n"); @@ -3614,7 +3589,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev) static int scsi_debug_slave_alloc(struct scsi_device *sdp) { - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) + if (sdebug_verbose) pr_info("slave_alloc <%u %u %u %llu>\n", sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue); @@ -3623,19 +3598,22 @@ static int scsi_debug_slave_alloc(struct scsi_device *sdp) static int scsi_debug_slave_configure(struct scsi_device *sdp) { - struct sdebug_dev_info *devip; + struct sdebug_dev_info *devip = + (struct sdebug_dev_info *)sdp->hostdata; - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) + if (sdebug_verbose) pr_info("slave_configure <%u %u %u %llu>\n", sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); - if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN) - sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN; - devip = devInfoReg(sdp); - if (NULL == devip) - return 1; /* no resources, will be marked offline */ + if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN) + sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN; + if (devip == NULL) { + devip = find_build_dev_info(sdp); + if (devip == NULL) + return 1; /* no resources, will be marked offline */ + } sdp->hostdata = devip; blk_queue_max_segment_size(sdp->request_queue, -1U); - if (scsi_debug_no_uld) + if (sdebug_no_uld) sdp->no_uld_attach = 1; return 0; } @@ -3645,7 +3623,7 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp) struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) + if (sdebug_verbose) pr_info("slave_destroy <%u %u %u %llu>\n", sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); if (devip) { @@ -3655,135 +3633,130 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp) } } -/* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */ -static int stop_queued_cmnd(struct scsi_cmnd *cmnd) +static void stop_qc_helper(struct sdebug_defer *sd_dp) +{ + if (!sd_dp) + return; + if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0)) + hrtimer_cancel(&sd_dp->hrt); + else if (sdebug_jdelay < 0) + cancel_work_sync(&sd_dp->ew.work); +} + +/* If @cmnd found deletes its timer or work queue and returns true; else + returns false */ +static bool stop_queued_cmnd(struct scsi_cmnd *cmnd) { unsigned long iflags; - int k, qmax, r_qmax; + int j, k, qmax, r_qmax; + struct sdebug_queue *sqp; struct sdebug_queued_cmd *sqcp; struct sdebug_dev_info *devip; - - spin_lock_irqsave(&queued_arr_lock, iflags); - qmax = scsi_debug_max_queue; - r_qmax = atomic_read(&retired_max_queue); - if (r_qmax > qmax) - qmax = r_qmax; - for (k = 0; k < qmax; ++k) { - if (test_bit(k, queued_in_use_bm)) { - sqcp = &queued_arr[k]; - if (cmnd == sqcp->a_cmnd) { + struct sdebug_defer *sd_dp; + + for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { + spin_lock_irqsave(&sqp->qc_lock, iflags); + qmax = sdebug_max_queue; + r_qmax = atomic_read(&retired_max_queue); + if (r_qmax > qmax) + qmax = r_qmax; + for (k = 0; k < qmax; ++k) { + if (test_bit(k, sqp->in_use_bm)) { + sqcp = &sqp->qc_arr[k]; + if (cmnd != sqcp->a_cmnd) + continue; + /* found */ devip = (struct sdebug_dev_info *) - cmnd->device->hostdata; + cmnd->device->hostdata; if (devip) atomic_dec(&devip->num_in_q); sqcp->a_cmnd = NULL; - spin_unlock_irqrestore(&queued_arr_lock, - iflags); - if (scsi_debug_ndelay > 0) { - if (sqcp->sd_hrtp) - hrtimer_cancel( - &sqcp->sd_hrtp->hrt); - } else if (scsi_debug_delay > 0) { - if (sqcp->cmnd_timerp) - del_timer_sync( - sqcp->cmnd_timerp); - } else if (scsi_debug_delay < 0) { - if (sqcp->tletp) - tasklet_kill(sqcp->tletp); - } - clear_bit(k, queued_in_use_bm); - return 1; + sd_dp = sqcp->sd_dp; + spin_unlock_irqrestore(&sqp->qc_lock, iflags); + stop_qc_helper(sd_dp); + clear_bit(k, sqp->in_use_bm); + return true; } } + spin_unlock_irqrestore(&sqp->qc_lock, iflags); } - spin_unlock_irqrestore(&queued_arr_lock, iflags); - return 0; + return false; } -/* Deletes (stops) timers or tasklets of all queued commands */ +/* Deletes (stops) timers or work queues of all queued commands */ static void stop_all_queued(void) { unsigned long iflags; - int k; + int j, k; + struct sdebug_queue *sqp; struct sdebug_queued_cmd *sqcp; struct sdebug_dev_info *devip; - - spin_lock_irqsave(&queued_arr_lock, iflags); - for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { - if (test_bit(k, queued_in_use_bm)) { - sqcp = &queued_arr[k]; - if (sqcp->a_cmnd) { + struct sdebug_defer *sd_dp; + + for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { + spin_lock_irqsave(&sqp->qc_lock, iflags); + for (k = 0; k < SDEBUG_CANQUEUE; ++k) { + if (test_bit(k, sqp->in_use_bm)) { + sqcp = &sqp->qc_arr[k]; + if (sqcp->a_cmnd == NULL) + continue; devip = (struct sdebug_dev_info *) sqcp->a_cmnd->device->hostdata; if (devip) atomic_dec(&devip->num_in_q); sqcp->a_cmnd = NULL; - spin_unlock_irqrestore(&queued_arr_lock, - iflags); - if (scsi_debug_ndelay > 0) { - if (sqcp->sd_hrtp) - hrtimer_cancel( - &sqcp->sd_hrtp->hrt); - } else if (scsi_debug_delay > 0) { - if (sqcp->cmnd_timerp) - del_timer_sync( - sqcp->cmnd_timerp); - } else if (scsi_debug_delay < 0) { - if (sqcp->tletp) - tasklet_kill(sqcp->tletp); - } - clear_bit(k, queued_in_use_bm); - spin_lock_irqsave(&queued_arr_lock, iflags); + sd_dp = sqcp->sd_dp; + spin_unlock_irqrestore(&sqp->qc_lock, iflags); + stop_qc_helper(sd_dp); + clear_bit(k, sqp->in_use_bm); + spin_lock_irqsave(&sqp->qc_lock, iflags); } } + spin_unlock_irqrestore(&sqp->qc_lock, iflags); } - spin_unlock_irqrestore(&queued_arr_lock, iflags); } /* Free queued command memory on heap */ static void free_all_queued(void) { - unsigned long iflags; - int k; + int j, k; + struct sdebug_queue *sqp; struct sdebug_queued_cmd *sqcp; - spin_lock_irqsave(&queued_arr_lock, iflags); - for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { - sqcp = &queued_arr[k]; - kfree(sqcp->cmnd_timerp); - sqcp->cmnd_timerp = NULL; - kfree(sqcp->tletp); - sqcp->tletp = NULL; - kfree(sqcp->sd_hrtp); - sqcp->sd_hrtp = NULL; + for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { + for (k = 0; k < SDEBUG_CANQUEUE; ++k) { + sqcp = &sqp->qc_arr[k]; + kfree(sqcp->sd_dp); + sqcp->sd_dp = NULL; + } } - spin_unlock_irqrestore(&queued_arr_lock, iflags); } static int scsi_debug_abort(struct scsi_cmnd *SCpnt) { + bool ok; + ++num_aborts; if (SCpnt) { - if (SCpnt->device && - (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)) - sdev_printk(KERN_INFO, SCpnt->device, "%s\n", - __func__); - stop_queued_cmnd(SCpnt); + ok = stop_queued_cmnd(SCpnt); + if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts)) + sdev_printk(KERN_INFO, SCpnt->device, + "%s: command%s found\n", __func__, + ok ? "" : " not"); } return SUCCESS; } static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt) { - struct sdebug_dev_info * devip; - ++num_dev_resets; if (SCpnt && SCpnt->device) { struct scsi_device *sdp = SCpnt->device; + struct sdebug_dev_info *devip = + (struct sdebug_dev_info *)sdp->hostdata; - if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts) + if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) sdev_printk(KERN_INFO, sdp, "%s\n", __func__); - devip = devInfoReg(sdp); if (devip) set_bit(SDEBUG_UA_POR, devip->uas_bm); } @@ -3804,7 +3777,7 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt) sdp = SCpnt->device; if (!sdp) goto lie; - if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts) + if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) sdev_printk(KERN_INFO, sdp, "%s\n", __func__); hp = sdp->host; if (!hp) @@ -3819,7 +3792,7 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt) ++k; } } - if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts) + if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) sdev_printk(KERN_INFO, sdp, "%s: %d device(s) found in target\n", __func__, k); lie: @@ -3838,7 +3811,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt) if (!(SCpnt && SCpnt->device)) goto lie; sdp = SCpnt->device; - if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts) + if (SDEBUG_OPT_ALL_NOISE & sdebug_opts) sdev_printk(KERN_INFO, sdp, "%s\n", __func__); hp = sdp->host; if (hp) { @@ -3852,7 +3825,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt) } } } - if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts) + if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) sdev_printk(KERN_INFO, sdp, "%s: %d device(s) found in host\n", __func__, k); lie: @@ -3866,7 +3839,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt) int k = 0; ++num_host_resets; - if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)) + if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts)) sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__); spin_lock(&sdebug_host_list_lock); list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { @@ -3878,7 +3851,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt) } spin_unlock(&sdebug_host_list_lock); stop_all_queued(); - if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts) + if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) sdev_printk(KERN_INFO, SCpnt->device, "%s: %d device(s) found\n", __func__, k); return SUCCESS; @@ -3893,22 +3866,22 @@ static void __init sdebug_build_parts(unsigned char *ramp, int heads_by_sects, start_sec, end_sec; /* assume partition table already zeroed */ - if ((scsi_debug_num_parts < 1) || (store_size < 1048576)) + if ((sdebug_num_parts < 1) || (store_size < 1048576)) return; - if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) { - scsi_debug_num_parts = SDEBUG_MAX_PARTS; + if (sdebug_num_parts > SDEBUG_MAX_PARTS) { + sdebug_num_parts = SDEBUG_MAX_PARTS; pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS); } num_sectors = (int)sdebug_store_sectors; sectors_per_part = (num_sectors - sdebug_sectors_per) - / scsi_debug_num_parts; + / sdebug_num_parts; heads_by_sects = sdebug_heads * sdebug_sectors_per; starts[0] = sdebug_sectors_per; - for (k = 1; k < scsi_debug_num_parts; ++k) + for (k = 1; k < sdebug_num_parts; ++k) starts[k] = ((k * sectors_per_part) / heads_by_sects) * heads_by_sects; - starts[scsi_debug_num_parts] = num_sectors; - starts[scsi_debug_num_parts + 1] = 0; + starts[sdebug_num_parts] = num_sectors; + starts[sdebug_num_parts + 1] = 0; ramp[510] = 0x55; /* magic partition markings */ ramp[511] = 0xAA; @@ -3934,67 +3907,118 @@ static void __init sdebug_build_parts(unsigned char *ramp, } } -static int -schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, - int scsi_result, int delta_jiff) +static void block_unblock_all_queues(bool block) +{ + int j; + struct sdebug_queue *sqp; + + for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) + atomic_set(&sqp->blocked, (int)block); +} + +/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1 + * commands will be processed normally before triggers occur. + */ +static void tweak_cmnd_count(void) +{ + int count, modulo; + + modulo = abs(sdebug_every_nth); + if (modulo < 2) + return; + block_unblock_all_queues(true); + count = atomic_read(&sdebug_cmnd_count); + atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo); + block_unblock_all_queues(false); +} + +static void clear_queue_stats(void) +{ + atomic_set(&sdebug_cmnd_count, 0); + atomic_set(&sdebug_completions, 0); + atomic_set(&sdebug_miss_cpus, 0); + atomic_set(&sdebug_a_tsf, 0); +} + +static void setup_inject(struct sdebug_queue *sqp, + struct sdebug_queued_cmd *sqcp) +{ + if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) + return; + sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts); + sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts); + sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts); + sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts); + sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts); +} + +/* Complete the processing of the thread that queued a SCSI command to this + * driver. It either completes the command by calling cmnd_done() or + * schedules a hr timer or work queue then returns 0. Returns + * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources. + */ +static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, + int scsi_result, int delta_jiff) { unsigned long iflags; int k, num_in_q, qdepth, inject; - struct sdebug_queued_cmd *sqcp = NULL; + struct sdebug_queue *sqp; + struct sdebug_queued_cmd *sqcp; struct scsi_device *sdp; + struct sdebug_defer *sd_dp; - /* this should never happen */ - if (WARN_ON(!cmnd)) - return SCSI_MLQUEUE_HOST_BUSY; - - if (NULL == devip) { - pr_warn("called devip == NULL\n"); - /* no particularly good error to report back */ - return SCSI_MLQUEUE_HOST_BUSY; + if (unlikely(devip == NULL)) { + if (scsi_result == 0) + scsi_result = DID_NO_CONNECT << 16; + goto respond_in_thread; } - sdp = cmnd->device; - if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) + if (unlikely(sdebug_verbose && scsi_result)) sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n", __func__, scsi_result); if (delta_jiff == 0) goto respond_in_thread; /* schedule the response at a later time if resources permit */ - spin_lock_irqsave(&queued_arr_lock, iflags); + sqp = get_queue(cmnd); + spin_lock_irqsave(&sqp->qc_lock, iflags); + if (unlikely(atomic_read(&sqp->blocked))) { + spin_unlock_irqrestore(&sqp->qc_lock, iflags); + return SCSI_MLQUEUE_HOST_BUSY; + } num_in_q = atomic_read(&devip->num_in_q); qdepth = cmnd->device->queue_depth; inject = 0; - if ((qdepth > 0) && (num_in_q >= qdepth)) { + if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) { if (scsi_result) { - spin_unlock_irqrestore(&queued_arr_lock, iflags); + spin_unlock_irqrestore(&sqp->qc_lock, iflags); goto respond_in_thread; } else scsi_result = device_qfull_result; - } else if ((scsi_debug_every_nth != 0) && - (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) && - (scsi_result == 0)) { + } else if (unlikely(sdebug_every_nth && + (SDEBUG_OPT_RARE_TSF & sdebug_opts) && + (scsi_result == 0))) { if ((num_in_q == (qdepth - 1)) && (atomic_inc_return(&sdebug_a_tsf) >= - abs(scsi_debug_every_nth))) { + abs(sdebug_every_nth))) { atomic_set(&sdebug_a_tsf, 0); inject = 1; scsi_result = device_qfull_result; } } - k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue); - if (k >= scsi_debug_max_queue) { - spin_unlock_irqrestore(&queued_arr_lock, iflags); + k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue); + if (unlikely(k >= sdebug_max_queue)) { + spin_unlock_irqrestore(&sqp->qc_lock, iflags); if (scsi_result) goto respond_in_thread; - else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts) + else if (SDEBUG_OPT_ALL_TSF & sdebug_opts) scsi_result = device_qfull_result; - if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) + if (SDEBUG_OPT_Q_NOISE & sdebug_opts) sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded, %s\n", - __func__, scsi_debug_max_queue, + __func__, sdebug_max_queue, (scsi_result ? "status: TASK SET FULL" : "report: host busy")); if (scsi_result) @@ -4002,55 +4026,56 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, else return SCSI_MLQUEUE_HOST_BUSY; } - __set_bit(k, queued_in_use_bm); + __set_bit(k, sqp->in_use_bm); atomic_inc(&devip->num_in_q); - sqcp = &queued_arr[k]; + sqcp = &sqp->qc_arr[k]; sqcp->a_cmnd = cmnd; + cmnd->host_scribble = (unsigned char *)sqcp; cmnd->result = scsi_result; - spin_unlock_irqrestore(&queued_arr_lock, iflags); - if (delta_jiff > 0) { - if (NULL == sqcp->cmnd_timerp) { - sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list), - GFP_ATOMIC); - if (NULL == sqcp->cmnd_timerp) - return SCSI_MLQUEUE_HOST_BUSY; - init_timer(sqcp->cmnd_timerp); - } - sqcp->cmnd_timerp->function = sdebug_q_cmd_complete; - sqcp->cmnd_timerp->data = k; - sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff; - add_timer(sqcp->cmnd_timerp); - } else if (scsi_debug_ndelay > 0) { - ktime_t kt = ktime_set(0, scsi_debug_ndelay); - struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp; - - if (NULL == sd_hp) { - sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC); - if (NULL == sd_hp) + sd_dp = sqcp->sd_dp; + spin_unlock_irqrestore(&sqp->qc_lock, iflags); + if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt)) + setup_inject(sqp, sqcp); + if (delta_jiff > 0 || sdebug_ndelay > 0) { + ktime_t kt; + + if (delta_jiff > 0) { + struct timespec ts; + + jiffies_to_timespec(delta_jiff, &ts); + kt = ktime_set(ts.tv_sec, ts.tv_nsec); + } else + kt = ktime_set(0, sdebug_ndelay); + if (NULL == sd_dp) { + sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC); + if (NULL == sd_dp) return SCSI_MLQUEUE_HOST_BUSY; - sqcp->sd_hrtp = sd_hp; - hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC, - HRTIMER_MODE_REL); - sd_hp->hrt.function = sdebug_q_cmd_hrt_complete; - sd_hp->qa_indx = k; + sqcp->sd_dp = sd_dp; + hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + sd_dp->hrt.function = sdebug_q_cmd_hrt_complete; + sd_dp->sqa_idx = sqp - sdebug_q_arr; + sd_dp->qc_idx = k; } - hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL); - } else { /* delay < 0 */ - if (NULL == sqcp->tletp) { - sqcp->tletp = kmalloc(sizeof(*sqcp->tletp), - GFP_ATOMIC); - if (NULL == sqcp->tletp) + if (sdebug_statistics) + sd_dp->issuing_cpu = raw_smp_processor_id(); + hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED); + } else { /* jdelay < 0, use work queue */ + if (NULL == sd_dp) { + sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC); + if (NULL == sd_dp) return SCSI_MLQUEUE_HOST_BUSY; - tasklet_init(sqcp->tletp, - sdebug_q_cmd_complete, k); + sqcp->sd_dp = sd_dp; + sd_dp->sqa_idx = sqp - sdebug_q_arr; + sd_dp->qc_idx = k; + INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete); } - if (-1 == delta_jiff) - tasklet_hi_schedule(sqcp->tletp); - else - tasklet_schedule(sqcp->tletp); + if (sdebug_statistics) + sd_dp->issuing_cpu = raw_smp_processor_id(); + schedule_work(&sd_dp->ew.work); } - if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) && - (scsi_result == device_qfull_result)) + if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && + (scsi_result == device_qfull_result))) sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__, num_in_q, (inject ? "<inject> " : ""), @@ -4069,52 +4094,55 @@ respond_in_thread: /* call back to mid-layer using invocation thread */ as it can when the corresponding attribute in the /sys/bus/pseudo/drivers/scsi_debug directory is changed. */ -module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR); -module_param_named(ato, scsi_debug_ato, int, S_IRUGO); -module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR); -module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR); -module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO); -module_param_named(dif, scsi_debug_dif, int, S_IRUGO); -module_param_named(dix, scsi_debug_dix, int, S_IRUGO); -module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR); -module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR); -module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR); -module_param_named(guard, scsi_debug_guard, uint, S_IRUGO); -module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR); -module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO); -module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO); -module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO); -module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO); -module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); -module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR); -module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR); -module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR); -module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR); -module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO); -module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO); -module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR); -module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO); -module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR); -module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); -module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR); -module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR); -module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO); -module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO); -module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR); -module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO); -module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO); -module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO); -module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO); -module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR); -module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int, +module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR); +module_param_named(ato, sdebug_ato, int, S_IRUGO); +module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR); +module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR); +module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO); +module_param_named(dif, sdebug_dif, int, S_IRUGO); +module_param_named(dix, sdebug_dix, int, S_IRUGO); +module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR); +module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR); +module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR); +module_param_named(guard, sdebug_guard, uint, S_IRUGO); +module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR); +module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO); +module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO); +module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO); +module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO); +module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO); +module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR); +module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR); +module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR); +module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR); +module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO); +module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO); +module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR); +module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO); +module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR); +module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO); +module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR); +module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR); +module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO); +module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO); +module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR); +module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR); +module_param_named(submit_queues, submit_queues, int, S_IRUGO); +module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO); +module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO); +module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO); +module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO); +module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR); +module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO); +module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int, S_IRUGO | S_IWUSR); -module_param_named(write_same_length, scsi_debug_write_same_length, int, +module_param_named(write_same_length, sdebug_write_same_length, int, S_IRUGO | S_IWUSR); MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); MODULE_DESCRIPTION("SCSI debug adapter driver"); MODULE_LICENSE("GPL"); -MODULE_VERSION(SCSI_DEBUG_VERSION); +MODULE_VERSION(SDEBUG_VERSION); MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)"); MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); @@ -4127,11 +4155,12 @@ MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)"); MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)"); MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)"); MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); -MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)"); +MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)"); MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); -MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)"); +MODULE_PARM_DESC(lbprz, + "on read unmapped LBs return 0 when 1 (def), return 0xff when 2"); MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))"); @@ -4145,30 +4174,42 @@ MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); MODULE_PARM_DESC(removable, "claim to have removable media (def=0)"); -MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])"); +MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])"); MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); +MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)"); MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)"); +MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)"); MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)"); MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)"); +MODULE_PARM_DESC(uuid_ctl, + "1->use uuid for lu name, 0->don't, 2->all use same (def=0)"); MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)"); MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)"); -static char sdebug_info[256]; +#define SDEBUG_INFO_LEN 256 +static char sdebug_info[SDEBUG_INFO_LEN]; static const char * scsi_debug_info(struct Scsi_Host * shp) { - sprintf(sdebug_info, "scsi_debug, version %s [%s], " - "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION, - scsi_debug_version_date, scsi_debug_dev_size_mb, - scsi_debug_opts); + int k; + + k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n", + my_name, SDEBUG_VERSION, sdebug_version_date); + if (k >= (SDEBUG_INFO_LEN - 1)) + return sdebug_info; + scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k, + " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d", + sdebug_dev_size_mb, sdebug_opts, submit_queues, + "statistics", (int)sdebug_statistics); return sdebug_info; } /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */ -static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length) +static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, + int length) { char arr[16]; int opts; @@ -4180,9 +4221,11 @@ static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int lengt arr[minLen] = '\0'; if (1 != sscanf(arr, "%d", &opts)) return -EINVAL; - scsi_debug_opts = opts; - if (scsi_debug_every_nth != 0) - atomic_set(&sdebug_cmnd_count, 0); + sdebug_opts = opts; + sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts); + sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts); + if (sdebug_every_nth != 0) + tweak_cmnd_count(); return length; } @@ -4191,69 +4234,83 @@ static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int lengt * output are not atomics so might be inaccurate in a busy system. */ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host) { - int f, l; - char b[32]; - - if (scsi_debug_every_nth > 0) - snprintf(b, sizeof(b), " (curr:%d)", - ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ? - atomic_read(&sdebug_a_tsf) : - atomic_read(&sdebug_cmnd_count))); - else - b[0] = '\0'; - - seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n" - "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, " - "every_nth=%d%s\n" - "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n" - "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n" - "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, " - "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d " - "usec_in_jiffy=%lu\n", - SCSI_DEBUG_VERSION, scsi_debug_version_date, - scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts, - scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay, - scsi_debug_max_luns, atomic_read(&sdebug_completions), - scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads, - sdebug_sectors_per, num_aborts, num_dev_resets, - num_target_resets, num_bus_resets, num_host_resets, - dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000); - - f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue); - if (f != scsi_debug_max_queue) { - l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue); - seq_printf(m, " %s BUSY: first,last bits set: %d,%d\n", - "queued_in_use_bm", f, l); + int f, j, l; + struct sdebug_queue *sqp; + + seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n", + SDEBUG_VERSION, sdebug_version_date); + seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n", + sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb, + sdebug_opts, sdebug_every_nth); + seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n", + sdebug_jdelay, sdebug_ndelay, sdebug_max_luns, + sdebug_sector_size, "bytes"); + seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n", + sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per, + num_aborts); + seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n", + num_dev_resets, num_target_resets, num_bus_resets, + num_host_resets); + seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n", + dix_reads, dix_writes, dif_errors); + seq_printf(m, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n", + TICK_NSEC / 1000, "statistics", sdebug_statistics, + sdebug_mq_active); + seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n", + atomic_read(&sdebug_cmnd_count), + atomic_read(&sdebug_completions), + "miss_cpus", atomic_read(&sdebug_miss_cpus), + atomic_read(&sdebug_a_tsf)); + + seq_printf(m, "submit_queues=%d\n", submit_queues); + for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) { + seq_printf(m, " queue %d:\n", j); + f = find_first_bit(sqp->in_use_bm, sdebug_max_queue); + if (f != sdebug_max_queue) { + l = find_last_bit(sqp->in_use_bm, sdebug_max_queue); + seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n", + "first,last bits", f, l); + } } return 0; } static ssize_t delay_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay); } -/* Returns -EBUSY if delay is being changed and commands are queued */ +/* Returns -EBUSY if jdelay is being changed and commands are queued. The unit + * of delay is jiffies. + */ static ssize_t delay_store(struct device_driver *ddp, const char *buf, size_t count) { - int delay, res; + int jdelay, res; - if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) { + if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) { res = count; - if (scsi_debug_delay != delay) { - unsigned long iflags; - int k; - - spin_lock_irqsave(&queued_arr_lock, iflags); - k = find_first_bit(queued_in_use_bm, - scsi_debug_max_queue); - if (k != scsi_debug_max_queue) - res = -EBUSY; /* have queued commands */ - else { - scsi_debug_delay = delay; - scsi_debug_ndelay = 0; + if (sdebug_jdelay != jdelay) { + int j, k; + struct sdebug_queue *sqp; + + block_unblock_all_queues(true); + for (j = 0, sqp = sdebug_q_arr; j < submit_queues; + ++j, ++sqp) { + k = find_first_bit(sqp->in_use_bm, + sdebug_max_queue); + if (k != sdebug_max_queue) { + res = -EBUSY; /* queued commands */ + break; + } + } + if (res > 0) { + /* make sure sdebug_defer instances get + * re-allocated for new delay variant */ + free_all_queued(); + sdebug_jdelay = jdelay; + sdebug_ndelay = 0; } - spin_unlock_irqrestore(&queued_arr_lock, iflags); + block_unblock_all_queues(false); } return res; } @@ -4263,31 +4320,41 @@ static DRIVER_ATTR_RW(delay); static ssize_t ndelay_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay); } /* Returns -EBUSY if ndelay is being changed and commands are queued */ -/* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */ +/* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf, - size_t count) + size_t count) { - unsigned long iflags; - int ndelay, res, k; + int ndelay, res; if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) && - (ndelay >= 0) && (ndelay < 1000000000)) { + (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) { res = count; - if (scsi_debug_ndelay != ndelay) { - spin_lock_irqsave(&queued_arr_lock, iflags); - k = find_first_bit(queued_in_use_bm, - scsi_debug_max_queue); - if (k != scsi_debug_max_queue) - res = -EBUSY; /* have queued commands */ - else { - scsi_debug_ndelay = ndelay; - scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN - : DEF_DELAY; + if (sdebug_ndelay != ndelay) { + int j, k; + struct sdebug_queue *sqp; + + block_unblock_all_queues(true); + for (j = 0, sqp = sdebug_q_arr; j < submit_queues; + ++j, ++sqp) { + k = find_first_bit(sqp->in_use_bm, + sdebug_max_queue); + if (k != sdebug_max_queue) { + res = -EBUSY; /* queued commands */ + break; + } + } + if (res > 0) { + /* make sure sdebug_defer instances get + * re-allocated for new delay variant */ + free_all_queued(); + sdebug_ndelay = ndelay; + sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN + : DEF_JDELAY; } - spin_unlock_irqrestore(&queued_arr_lock, iflags); + block_unblock_all_queues(false); } return res; } @@ -4297,7 +4364,7 @@ static DRIVER_ATTR_RW(ndelay); static ssize_t opts_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts); + return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts); } static ssize_t opts_store(struct device_driver *ddp, const char *buf, @@ -4317,26 +4384,17 @@ static ssize_t opts_store(struct device_driver *ddp, const char *buf, } return -EINVAL; opts_done: - scsi_debug_opts = opts; - if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts) - sdebug_any_injecting_opt = true; - else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts) - sdebug_any_injecting_opt = true; - else if (SCSI_DEBUG_OPT_DIF_ERR & opts) - sdebug_any_injecting_opt = true; - else if (SCSI_DEBUG_OPT_DIX_ERR & opts) - sdebug_any_injecting_opt = true; - else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts) - sdebug_any_injecting_opt = true; - atomic_set(&sdebug_cmnd_count, 0); - atomic_set(&sdebug_a_tsf, 0); + sdebug_opts = opts; + sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts); + sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts); + tweak_cmnd_count(); return count; } static DRIVER_ATTR_RW(opts); static ssize_t ptype_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype); } static ssize_t ptype_store(struct device_driver *ddp, const char *buf, size_t count) @@ -4344,7 +4402,7 @@ static ssize_t ptype_store(struct device_driver *ddp, const char *buf, int n; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { - scsi_debug_ptype = n; + sdebug_ptype = n; return count; } return -EINVAL; @@ -4353,7 +4411,7 @@ static DRIVER_ATTR_RW(ptype); static ssize_t dsense_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense); } static ssize_t dsense_store(struct device_driver *ddp, const char *buf, size_t count) @@ -4361,7 +4419,7 @@ static ssize_t dsense_store(struct device_driver *ddp, const char *buf, int n; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { - scsi_debug_dsense = n; + sdebug_dsense = n; return count; } return -EINVAL; @@ -4370,7 +4428,7 @@ static DRIVER_ATTR_RW(dsense); static ssize_t fake_rw_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw); } static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf, size_t count) @@ -4379,11 +4437,11 @@ static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf, if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { n = (n > 0); - scsi_debug_fake_rw = (scsi_debug_fake_rw > 0); - if (scsi_debug_fake_rw != n) { + sdebug_fake_rw = (sdebug_fake_rw > 0); + if (sdebug_fake_rw != n) { if ((0 == n) && (NULL == fake_storep)) { unsigned long sz = - (unsigned long)scsi_debug_dev_size_mb * + (unsigned long)sdebug_dev_size_mb * 1048576; fake_storep = vmalloc(sz); @@ -4393,7 +4451,7 @@ static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf, } memset(fake_storep, 0, sz); } - scsi_debug_fake_rw = n; + sdebug_fake_rw = n; } return count; } @@ -4403,7 +4461,7 @@ static DRIVER_ATTR_RW(fake_rw); static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0); } static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf, size_t count) @@ -4411,7 +4469,7 @@ static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf, int n; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { - scsi_debug_no_lun_0 = n; + sdebug_no_lun_0 = n; return count; } return -EINVAL; @@ -4420,7 +4478,7 @@ static DRIVER_ATTR_RW(no_lun_0); static ssize_t num_tgts_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts); } static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf, size_t count) @@ -4428,7 +4486,7 @@ static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf, int n; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { - scsi_debug_num_tgts = n; + sdebug_num_tgts = n; sdebug_max_tgts_luns(); return count; } @@ -4438,19 +4496,19 @@ static DRIVER_ATTR_RW(num_tgts); static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb); } static DRIVER_ATTR_RO(dev_size_mb); static ssize_t num_parts_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts); } static DRIVER_ATTR_RO(num_parts); static ssize_t every_nth_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth); } static ssize_t every_nth_store(struct device_driver *ddp, const char *buf, size_t count) @@ -4458,8 +4516,12 @@ static ssize_t every_nth_store(struct device_driver *ddp, const char *buf, int nth; if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) { - scsi_debug_every_nth = nth; - atomic_set(&sdebug_cmnd_count, 0); + sdebug_every_nth = nth; + if (nth && !sdebug_statistics) { + pr_info("every_nth needs statistics=1, set it\n"); + sdebug_statistics = true; + } + tweak_cmnd_count(); return count; } return -EINVAL; @@ -4468,7 +4530,7 @@ static DRIVER_ATTR_RW(every_nth); static ssize_t max_luns_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns); } static ssize_t max_luns_store(struct device_driver *ddp, const char *buf, size_t count) @@ -4477,10 +4539,14 @@ static ssize_t max_luns_store(struct device_driver *ddp, const char *buf, bool changed; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { - changed = (scsi_debug_max_luns != n); - scsi_debug_max_luns = n; + if (n > 256) { + pr_warn("max_luns can be no more than 256\n"); + return -EINVAL; + } + changed = (sdebug_max_luns != n); + sdebug_max_luns = n; sdebug_max_tgts_luns(); - if (changed && (scsi_debug_scsi_level >= 5)) { /* >= SPC-3 */ + if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */ struct sdebug_host_info *sdhp; struct sdebug_dev_info *dp; @@ -4503,28 +4569,34 @@ static DRIVER_ATTR_RW(max_luns); static ssize_t max_queue_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue); } /* N.B. max_queue can be changed while there are queued commands. In flight * commands beyond the new max_queue will be completed. */ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf, size_t count) { - unsigned long iflags; - int n, k; + int j, n, k, a; + struct sdebug_queue *sqp; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) && - (n <= SCSI_DEBUG_CANQUEUE)) { - spin_lock_irqsave(&queued_arr_lock, iflags); - k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE); - scsi_debug_max_queue = n; - if (SCSI_DEBUG_CANQUEUE == k) + (n <= SDEBUG_CANQUEUE)) { + block_unblock_all_queues(true); + k = 0; + for (j = 0, sqp = sdebug_q_arr; j < submit_queues; + ++j, ++sqp) { + a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE); + if (a > k) + k = a; + } + sdebug_max_queue = n; + if (k == SDEBUG_CANQUEUE) atomic_set(&retired_max_queue, 0); else if (k >= n) atomic_set(&retired_max_queue, k + 1); else atomic_set(&retired_max_queue, 0); - spin_unlock_irqrestore(&queued_arr_lock, iflags); + block_unblock_all_queues(false); return count; } return -EINVAL; @@ -4533,19 +4605,19 @@ static DRIVER_ATTR_RW(max_queue); static ssize_t no_uld_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld); } static DRIVER_ATTR_RO(no_uld); static ssize_t scsi_level_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level); } static DRIVER_ATTR_RO(scsi_level); static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb); } static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf, size_t count) @@ -4554,8 +4626,8 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf, bool changed; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { - changed = (scsi_debug_virtual_gb != n); - scsi_debug_virtual_gb = n; + changed = (sdebug_virtual_gb != n); + sdebug_virtual_gb = n; sdebug_capacity = get_sdebug_capacity(); if (changed) { struct sdebug_host_info *sdhp; @@ -4580,9 +4652,12 @@ static DRIVER_ATTR_RW(virtual_gb); static ssize_t add_host_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host); } +static int sdebug_add_adapter(void); +static void sdebug_remove_adapter(void); + static ssize_t add_host_store(struct device_driver *ddp, const char *buf, size_t count) { @@ -4605,7 +4680,7 @@ static DRIVER_ATTR_RW(add_host); static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno); } static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf, size_t count) @@ -4613,40 +4688,68 @@ static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf, int n; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { - scsi_debug_vpd_use_hostno = n; + sdebug_vpd_use_hostno = n; return count; } return -EINVAL; } static DRIVER_ATTR_RW(vpd_use_hostno); +static ssize_t statistics_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics); +} +static ssize_t statistics_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int n; + + if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) { + if (n > 0) + sdebug_statistics = true; + else { + clear_queue_stats(); + sdebug_statistics = false; + } + return count; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(statistics); + static ssize_t sector_size_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size); + return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size); } static DRIVER_ATTR_RO(sector_size); +static ssize_t submit_queues_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues); +} +static DRIVER_ATTR_RO(submit_queues); + static ssize_t dix_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix); } static DRIVER_ATTR_RO(dix); static ssize_t dif_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif); } static DRIVER_ATTR_RO(dif); static ssize_t guard_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard); + return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard); } static DRIVER_ATTR_RO(guard); static ssize_t ato_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato); } static DRIVER_ATTR_RO(ato); @@ -4669,7 +4772,7 @@ static DRIVER_ATTR_RO(map); static ssize_t removable_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0); + return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0); } static ssize_t removable_store(struct device_driver *ddp, const char *buf, size_t count) @@ -4677,7 +4780,7 @@ static ssize_t removable_store(struct device_driver *ddp, const char *buf, int n; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { - scsi_debug_removable = (n > 0); + sdebug_removable = (n > 0); return count; } return -EINVAL; @@ -4686,32 +4789,17 @@ static DRIVER_ATTR_RW(removable); static ssize_t host_lock_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock); + return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock); } -/* Returns -EBUSY if host_lock is being changed and commands are queued */ +/* N.B. sdebug_host_lock does nothing, kept for backward compatibility */ static ssize_t host_lock_store(struct device_driver *ddp, const char *buf, size_t count) { - int n, res; + int n; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { - bool new_host_lock = (n > 0); - - res = count; - if (new_host_lock != scsi_debug_host_lock) { - unsigned long iflags; - int k; - - spin_lock_irqsave(&queued_arr_lock, iflags); - k = find_first_bit(queued_in_use_bm, - scsi_debug_max_queue); - if (k != scsi_debug_max_queue) - res = -EBUSY; /* have queued commands */ - else - scsi_debug_host_lock = new_host_lock; - spin_unlock_irqrestore(&queued_arr_lock, iflags); - } - return res; + sdebug_host_lock = (n > 0); + return count; } return -EINVAL; } @@ -4719,7 +4807,7 @@ static DRIVER_ATTR_RW(host_lock); static ssize_t strict_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict); + return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict); } static ssize_t strict_store(struct device_driver *ddp, const char *buf, size_t count) @@ -4727,13 +4815,19 @@ static ssize_t strict_store(struct device_driver *ddp, const char *buf, int n; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { - scsi_debug_strict = (n > 0); + sdebug_strict = (n > 0); return count; } return -EINVAL; } static DRIVER_ATTR_RW(strict); +static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl); +} +static DRIVER_ATTR_RO(uuid_ctl); + /* Note: The following array creates attribute files in the /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these @@ -4761,6 +4855,8 @@ static struct attribute *sdebug_drv_attrs[] = { &driver_attr_add_host.attr, &driver_attr_vpd_use_hostno.attr, &driver_attr_sector_size.attr, + &driver_attr_statistics.attr, + &driver_attr_submit_queues.attr, &driver_attr_dix.attr, &driver_attr_dif.attr, &driver_attr_guard.attr, @@ -4770,6 +4866,7 @@ static struct attribute *sdebug_drv_attrs[] = { &driver_attr_host_lock.attr, &driver_attr_ndelay.attr, &driver_attr_strict.attr, + &driver_attr_uuid_ctl.attr, NULL, }; ATTRIBUTE_GROUPS(sdebug_drv); @@ -4783,33 +4880,33 @@ static int __init scsi_debug_init(void) int k; int ret; - atomic_set(&sdebug_cmnd_count, 0); - atomic_set(&sdebug_completions, 0); atomic_set(&retired_max_queue, 0); - if (scsi_debug_ndelay >= 1000000000) { + if (sdebug_ndelay >= 1000 * 1000 * 1000) { pr_warn("ndelay must be less than 1 second, ignored\n"); - scsi_debug_ndelay = 0; - } else if (scsi_debug_ndelay > 0) - scsi_debug_delay = DELAY_OVERRIDDEN; + sdebug_ndelay = 0; + } else if (sdebug_ndelay > 0) + sdebug_jdelay = JDELAY_OVERRIDDEN; - switch (scsi_debug_sector_size) { + switch (sdebug_sector_size) { case 512: case 1024: case 2048: case 4096: break; default: - pr_err("invalid sector_size %d\n", scsi_debug_sector_size); + pr_err("invalid sector_size %d\n", sdebug_sector_size); return -EINVAL; } - switch (scsi_debug_dif) { + switch (sdebug_dif) { case SD_DIF_TYPE0_PROTECTION: + break; case SD_DIF_TYPE1_PROTECTION: case SD_DIF_TYPE2_PROTECTION: case SD_DIF_TYPE3_PROTECTION: + have_dif_prot = true; break; default: @@ -4817,39 +4914,53 @@ static int __init scsi_debug_init(void) return -EINVAL; } - if (scsi_debug_guard > 1) { + if (sdebug_guard > 1) { pr_err("guard must be 0 or 1\n"); return -EINVAL; } - if (scsi_debug_ato > 1) { + if (sdebug_ato > 1) { pr_err("ato must be 0 or 1\n"); return -EINVAL; } - if (scsi_debug_physblk_exp > 15) { - pr_err("invalid physblk_exp %u\n", scsi_debug_physblk_exp); + if (sdebug_physblk_exp > 15) { + pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp); return -EINVAL; } + if (sdebug_max_luns > 256) { + pr_warn("max_luns can be no more than 256, use default\n"); + sdebug_max_luns = DEF_MAX_LUNS; + } - if (scsi_debug_lowest_aligned > 0x3fff) { - pr_err("lowest_aligned too big: %u\n", - scsi_debug_lowest_aligned); + if (sdebug_lowest_aligned > 0x3fff) { + pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned); return -EINVAL; } - if (scsi_debug_dev_size_mb < 1) - scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ - sz = (unsigned long)scsi_debug_dev_size_mb * 1048576; - sdebug_store_sectors = sz / scsi_debug_sector_size; + if (submit_queues < 1) { + pr_err("submit_queues must be 1 or more\n"); + return -EINVAL; + } + sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue), + GFP_KERNEL); + if (sdebug_q_arr == NULL) + return -ENOMEM; + for (k = 0; k < submit_queues; ++k) + spin_lock_init(&sdebug_q_arr[k].qc_lock); + + if (sdebug_dev_size_mb < 1) + sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ + sz = (unsigned long)sdebug_dev_size_mb * 1048576; + sdebug_store_sectors = sz / sdebug_sector_size; sdebug_capacity = get_sdebug_capacity(); /* play around with geometry, don't waste too much on track 0 */ sdebug_heads = 8; sdebug_sectors_per = 32; - if (scsi_debug_dev_size_mb >= 256) + if (sdebug_dev_size_mb >= 256) sdebug_heads = 64; - else if (scsi_debug_dev_size_mb >= 16) + else if (sdebug_dev_size_mb >= 16) sdebug_heads = 32; sdebug_cylinders_per = (unsigned long)sdebug_capacity / (sdebug_sectors_per * sdebug_heads); @@ -4861,18 +4972,19 @@ static int __init scsi_debug_init(void) (sdebug_sectors_per * sdebug_heads); } - if (0 == scsi_debug_fake_rw) { + if (sdebug_fake_rw == 0) { fake_storep = vmalloc(sz); if (NULL == fake_storep) { pr_err("out of memory, 1\n"); - return -ENOMEM; + ret = -ENOMEM; + goto free_q_arr; } memset(fake_storep, 0, sz); - if (scsi_debug_num_parts > 0) + if (sdebug_num_parts > 0) sdebug_build_parts(fake_storep, sz); } - if (scsi_debug_dix) { + if (sdebug_dix) { int dif_size; dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple); @@ -4891,20 +5003,21 @@ static int __init scsi_debug_init(void) /* Logical Block Provisioning */ if (scsi_debug_lbp()) { - scsi_debug_unmap_max_blocks = - clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU); + sdebug_unmap_max_blocks = + clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU); - scsi_debug_unmap_max_desc = - clamp(scsi_debug_unmap_max_desc, 0U, 256U); + sdebug_unmap_max_desc = + clamp(sdebug_unmap_max_desc, 0U, 256U); - scsi_debug_unmap_granularity = - clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU); + sdebug_unmap_granularity = + clamp(sdebug_unmap_granularity, 1U, 0xffffffffU); - if (scsi_debug_unmap_alignment && - scsi_debug_unmap_granularity <= - scsi_debug_unmap_alignment) { + if (sdebug_unmap_alignment && + sdebug_unmap_granularity <= + sdebug_unmap_alignment) { pr_err("ERR: unmap_granularity <= unmap_alignment\n"); - return -EINVAL; + ret = -EINVAL; + goto free_vm; } map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; @@ -4921,7 +5034,7 @@ static int __init scsi_debug_init(void) bitmap_zero(map_storep, map_size); /* Map first 1KB for partition table */ - if (scsi_debug_num_parts) + if (sdebug_num_parts) map_region(0, 2); } @@ -4942,8 +5055,8 @@ static int __init scsi_debug_init(void) goto bus_unreg; } - host_to_add = scsi_debug_add_host; - scsi_debug_add_host = 0; + host_to_add = sdebug_add_host; + sdebug_add_host = 0; for (k = 0; k < host_to_add; k++) { if (sdebug_add_adapter()) { @@ -4952,8 +5065,8 @@ static int __init scsi_debug_init(void) } } - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) - pr_info("built %d host(s)\n", scsi_debug_add_host); + if (sdebug_verbose) + pr_info("built %d host(s)\n", sdebug_add_host); return 0; @@ -4965,13 +5078,14 @@ free_vm: vfree(map_storep); vfree(dif_storep); vfree(fake_storep); - +free_q_arr: + kfree(sdebug_q_arr); return ret; } static void __exit scsi_debug_exit(void) { - int k = scsi_debug_add_host; + int k = sdebug_add_host; stop_all_queued(); free_all_queued(); @@ -4983,6 +5097,7 @@ static void __exit scsi_debug_exit(void) vfree(dif_storep); vfree(fake_storep); + kfree(sdebug_q_arr); } device_initcall(scsi_debug_init); @@ -5011,7 +5126,7 @@ static int sdebug_add_adapter(void) INIT_LIST_HEAD(&sdbg_host->dev_info_list); - devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns; + devs_per_host = sdebug_num_tgts * sdebug_max_luns; for (k = 0; k < devs_per_host; k++) { sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL); if (!sdbg_devinfo) { @@ -5028,14 +5143,14 @@ static int sdebug_add_adapter(void) sdbg_host->dev.bus = &pseudo_lld_bus; sdbg_host->dev.parent = pseudo_primary; sdbg_host->dev.release = &sdebug_release_adapter; - dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host); + dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host); error = device_register(&sdbg_host->dev); if (error) goto clean; - ++scsi_debug_add_host; + ++sdebug_add_host; return error; clean: @@ -5064,78 +5179,54 @@ static void sdebug_remove_adapter(void) if (!sdbg_host) return; - device_unregister(&sdbg_host->dev); - --scsi_debug_add_host; + device_unregister(&sdbg_host->dev); + --sdebug_add_host; } -static int -sdebug_change_qdepth(struct scsi_device *sdev, int qdepth) +static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth) { int num_in_q = 0; - unsigned long iflags; struct sdebug_dev_info *devip; - spin_lock_irqsave(&queued_arr_lock, iflags); + block_unblock_all_queues(true); devip = (struct sdebug_dev_info *)sdev->hostdata; if (NULL == devip) { - spin_unlock_irqrestore(&queued_arr_lock, iflags); + block_unblock_all_queues(false); return -ENODEV; } num_in_q = atomic_read(&devip->num_in_q); - spin_unlock_irqrestore(&queued_arr_lock, iflags); if (qdepth < 1) qdepth = 1; - /* allow to exceed max host queued_arr elements for testing */ - if (qdepth > SCSI_DEBUG_CANQUEUE + 10) - qdepth = SCSI_DEBUG_CANQUEUE + 10; + /* allow to exceed max host qc_arr elements for testing */ + if (qdepth > SDEBUG_CANQUEUE + 10) + qdepth = SDEBUG_CANQUEUE + 10; scsi_change_queue_depth(sdev, qdepth); - if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) { - sdev_printk(KERN_INFO, sdev, - "%s: qdepth=%d, num_in_q=%d\n", + if (SDEBUG_OPT_Q_NOISE & sdebug_opts) { + sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n", __func__, qdepth, num_in_q); } + block_unblock_all_queues(false); return sdev->queue_depth; } -static int -check_inject(struct scsi_cmnd *scp) +static bool fake_timeout(struct scsi_cmnd *scp) { - struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); - - memset(ep, 0, sizeof(struct sdebug_scmd_extra_t)); - - if (atomic_inc_return(&sdebug_cmnd_count) >= - abs(scsi_debug_every_nth)) { - atomic_set(&sdebug_cmnd_count, 0); - if (scsi_debug_every_nth < -1) - scsi_debug_every_nth = -1; - if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts) - return 1; /* ignore command causing timeout */ - else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts && + if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) { + if (sdebug_every_nth < -1) + sdebug_every_nth = -1; + if (SDEBUG_OPT_TIMEOUT & sdebug_opts) + return true; /* ignore command causing timeout */ + else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts && scsi_medium_access_command(scp)) - return 1; /* time out reads and writes */ - if (sdebug_any_injecting_opt) { - int opts = scsi_debug_opts; - - if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts) - ep->inj_recovered = true; - else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts) - ep->inj_transport = true; - else if (SCSI_DEBUG_OPT_DIF_ERR & opts) - ep->inj_dif = true; - else if (SCSI_DEBUG_OPT_DIX_ERR & opts) - ep->inj_dix = true; - else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts) - ep->inj_short = true; - } + return true; /* time out reads and writes */ } - return 0; + return false; } -static int -scsi_debug_queuecommand(struct scsi_cmnd *scp) +static int scsi_debug_queuecommand(struct Scsi_Host *shost, + struct scsi_cmnd *scp) { u8 sdeb_i; struct scsi_device *sdp = scp->device; @@ -5146,15 +5237,16 @@ scsi_debug_queuecommand(struct scsi_cmnd *scp) int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); int k, na; int errsts = 0; - int errsts_no_connect = DID_NO_CONNECT << 16; u32 flags; u16 sa; u8 opcode = cmd[0]; bool has_wlun_rl; - bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts); scsi_set_resid(scp, 0); - if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) { + if (sdebug_statistics) + atomic_inc(&sdebug_cmnd_count); + if (unlikely(sdebug_verbose && + !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) { char b[120]; int n, len, sb; @@ -5167,19 +5259,25 @@ scsi_debug_queuecommand(struct scsi_cmnd *scp) n += scnprintf(b + n, sb - n, "%02x ", (u32)cmd[k]); } - sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b); + if (sdebug_mq_active) + sdev_printk(KERN_INFO, sdp, "%s: tag=%u, cmd %s\n", + my_name, blk_mq_unique_tag(scp->request), + b); + else + sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, + b); } has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS); - if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl) - return schedule_resp(scp, NULL, errsts_no_connect, 0); + if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl)) + goto err_out; sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */ oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */ devip = (struct sdebug_dev_info *)sdp->hostdata; - if (!devip) { - devip = devInfoReg(sdp); + if (unlikely(!devip)) { + devip = find_build_dev_info(sdp); if (NULL == devip) - return schedule_resp(scp, NULL, errsts_no_connect, 0); + goto err_out; } na = oip->num_attached; r_pfp = oip->pfp; @@ -5211,18 +5309,18 @@ scsi_debug_queuecommand(struct scsi_cmnd *scp) } } /* else (when na==0) we assume the oip is a match */ flags = oip->flags; - if (F_INV_OP & flags) { + if (unlikely(F_INV_OP & flags)) { mk_sense_invalid_opcode(scp); goto check_cond; } - if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) { - if (debug) - sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: " - "0x%x not supported for wlun\n", opcode); + if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) { + if (sdebug_verbose) + sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n", + my_name, opcode, " supported for wlun"); mk_sense_invalid_opcode(scp); goto check_cond; } - if (scsi_debug_strict) { /* check cdb against mask */ + if (unlikely(sdebug_strict)) { /* check cdb against mask */ u8 rem; int j; @@ -5238,52 +5336,40 @@ scsi_debug_queuecommand(struct scsi_cmnd *scp) } } } - if (!(F_SKIP_UA & flags) && - SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) { - errsts = check_readiness(scp, UAS_ONLY, devip); + if (unlikely(!(F_SKIP_UA & flags) && + find_first_bit(devip->uas_bm, + SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) { + errsts = make_ua(scp, devip); if (errsts) goto check_cond; } - if ((F_M_ACCESS & flags) && devip->stopped) { + if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) { mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2); - if (debug) + if (sdebug_verbose) sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: " "%s\n", my_name, "initializing command " "required"); errsts = check_condition_result; goto fini; } - if (scsi_debug_fake_rw && (F_FAKE_RW & flags)) + if (sdebug_fake_rw && (F_FAKE_RW & flags)) goto fini; - if (scsi_debug_every_nth) { - if (check_inject(scp)) + if (unlikely(sdebug_every_nth)) { + if (fake_timeout(scp)) return 0; /* ignore command: make trouble */ } - if (oip->pfp) /* if this command has a resp_* function, call it */ - errsts = oip->pfp(scp, devip); + if (likely(oip->pfp)) + errsts = oip->pfp(scp, devip); /* calls a resp_* function */ else if (r_pfp) /* if leaf function ptr NULL, try the root's */ errsts = r_pfp(scp, devip); fini: return schedule_resp(scp, devip, errsts, - ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay)); + ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay)); check_cond: return schedule_resp(scp, devip, check_condition_result, 0); -} - -static int -sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd) -{ - if (scsi_debug_host_lock) { - unsigned long iflags; - int rc; - - spin_lock_irqsave(shost->host_lock, iflags); - rc = scsi_debug_queuecommand(cmd); - spin_unlock_irqrestore(shost->host_lock, iflags); - return rc; - } else - return scsi_debug_queuecommand(cmd); +err_out: + return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0); } static struct scsi_host_template sdebug_driver_template = { @@ -5296,36 +5382,34 @@ static struct scsi_host_template sdebug_driver_template = { .slave_configure = scsi_debug_slave_configure, .slave_destroy = scsi_debug_slave_destroy, .ioctl = scsi_debug_ioctl, - .queuecommand = sdebug_queuecommand_lock_or_not, + .queuecommand = scsi_debug_queuecommand, .change_queue_depth = sdebug_change_qdepth, .eh_abort_handler = scsi_debug_abort, .eh_device_reset_handler = scsi_debug_device_reset, .eh_target_reset_handler = scsi_debug_target_reset, .eh_bus_reset_handler = scsi_debug_bus_reset, .eh_host_reset_handler = scsi_debug_host_reset, - .can_queue = SCSI_DEBUG_CANQUEUE, + .can_queue = SDEBUG_CANQUEUE, .this_id = 7, - .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, + .sg_tablesize = SG_MAX_SEGMENTS, .cmd_per_lun = DEF_CMD_PER_LUN, .max_sectors = -1U, .use_clustering = DISABLE_CLUSTERING, .module = THIS_MODULE, .track_queue_depth = 1, - .cmd_size = sizeof(struct sdebug_scmd_extra_t), }; static int sdebug_driver_probe(struct device * dev) { int error = 0; - int opts; struct sdebug_host_info *sdbg_host; struct Scsi_Host *hpnt; - int host_prot; + int hprot; sdbg_host = to_sdebug_host(dev); - sdebug_driver_template.can_queue = scsi_debug_max_queue; - if (scsi_debug_clustering) + sdebug_driver_template.can_queue = sdebug_max_queue; + if (sdebug_clustering) sdebug_driver_template.use_clustering = ENABLE_CLUSTERING; hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); if (NULL == hpnt) { @@ -5333,72 +5417,75 @@ static int sdebug_driver_probe(struct device * dev) error = -ENODEV; return error; } + if (submit_queues > nr_cpu_ids) { + pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%d\n", + my_name, submit_queues, nr_cpu_ids); + submit_queues = nr_cpu_ids; + } + /* Decide whether to tell scsi subsystem that we want mq */ + /* Following should give the same answer for each host */ + sdebug_mq_active = shost_use_blk_mq(hpnt) && (submit_queues > 1); + if (sdebug_mq_active) + hpnt->nr_hw_queues = submit_queues; sdbg_host->shost = hpnt; *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host; - if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id)) - hpnt->max_id = scsi_debug_num_tgts + 1; + if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id)) + hpnt->max_id = sdebug_num_tgts + 1; else - hpnt->max_id = scsi_debug_num_tgts; - /* = scsi_debug_max_luns; */ + hpnt->max_id = sdebug_num_tgts; + /* = sdebug_max_luns; */ hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; - host_prot = 0; + hprot = 0; - switch (scsi_debug_dif) { + switch (sdebug_dif) { case SD_DIF_TYPE1_PROTECTION: - host_prot = SHOST_DIF_TYPE1_PROTECTION; - if (scsi_debug_dix) - host_prot |= SHOST_DIX_TYPE1_PROTECTION; + hprot = SHOST_DIF_TYPE1_PROTECTION; + if (sdebug_dix) + hprot |= SHOST_DIX_TYPE1_PROTECTION; break; case SD_DIF_TYPE2_PROTECTION: - host_prot = SHOST_DIF_TYPE2_PROTECTION; - if (scsi_debug_dix) - host_prot |= SHOST_DIX_TYPE2_PROTECTION; + hprot = SHOST_DIF_TYPE2_PROTECTION; + if (sdebug_dix) + hprot |= SHOST_DIX_TYPE2_PROTECTION; break; case SD_DIF_TYPE3_PROTECTION: - host_prot = SHOST_DIF_TYPE3_PROTECTION; - if (scsi_debug_dix) - host_prot |= SHOST_DIX_TYPE3_PROTECTION; + hprot = SHOST_DIF_TYPE3_PROTECTION; + if (sdebug_dix) + hprot |= SHOST_DIX_TYPE3_PROTECTION; break; default: - if (scsi_debug_dix) - host_prot |= SHOST_DIX_TYPE0_PROTECTION; + if (sdebug_dix) + hprot |= SHOST_DIX_TYPE0_PROTECTION; break; } - scsi_host_set_prot(hpnt, host_prot); + scsi_host_set_prot(hpnt, hprot); - pr_info("host protection%s%s%s%s%s%s%s\n", - (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", - (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", - (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", - (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", - (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", - (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", - (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); + if (have_dif_prot || sdebug_dix) + pr_info("host protection%s%s%s%s%s%s%s\n", + (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", + (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", + (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", + (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", + (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", + (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", + (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); - if (scsi_debug_guard == 1) + if (sdebug_guard == 1) scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP); else scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC); - opts = scsi_debug_opts; - if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts) - sdebug_any_injecting_opt = true; - else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts) - sdebug_any_injecting_opt = true; - else if (SCSI_DEBUG_OPT_DIF_ERR & opts) - sdebug_any_injecting_opt = true; - else if (SCSI_DEBUG_OPT_DIX_ERR & opts) - sdebug_any_injecting_opt = true; - else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts) - sdebug_any_injecting_opt = true; - + sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts); + sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts); + if (sdebug_every_nth) /* need stats counters for every_nth */ + sdebug_statistics = true; error = scsi_add_host(hpnt, &sdbg_host->dev); if (error) { pr_err("scsi_add_host failed\n"); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 8106515d1df8..b2e332af0f51 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -14,8 +14,6 @@ #include <linux/completion.h> #include <linux/kernel.h> #include <linux/export.h> -#include <linux/mempool.h> -#include <linux/slab.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/delay.h> @@ -40,39 +38,6 @@ #include "scsi_logging.h" -#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) -#define SG_MEMPOOL_SIZE 2 - -struct scsi_host_sg_pool { - size_t size; - char *name; - struct kmem_cache *slab; - mempool_t *pool; -}; - -#define SP(x) { .size = x, "sgpool-" __stringify(x) } -#if (SCSI_MAX_SG_SEGMENTS < 32) -#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) -#endif -static struct scsi_host_sg_pool scsi_sg_pools[] = { - SP(8), - SP(16), -#if (SCSI_MAX_SG_SEGMENTS > 32) - SP(32), -#if (SCSI_MAX_SG_SEGMENTS > 64) - SP(64), -#if (SCSI_MAX_SG_SEGMENTS > 128) - SP(128), -#if (SCSI_MAX_SG_SEGMENTS > 256) -#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX) -#endif -#endif -#endif -#endif - SP(SCSI_MAX_SG_SEGMENTS) -}; -#undef SP - struct kmem_cache *scsi_sdb_cache; /* @@ -553,66 +518,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost) scsi_run_queue(sdev->request_queue); } -static inline unsigned int scsi_sgtable_index(unsigned short nents) -{ - unsigned int index; - - BUG_ON(nents > SCSI_MAX_SG_SEGMENTS); - - if (nents <= 8) - index = 0; - else - index = get_count_order(nents) - 3; - - return index; -} - -static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) -{ - struct scsi_host_sg_pool *sgp; - - sgp = scsi_sg_pools + scsi_sgtable_index(nents); - mempool_free(sgl, sgp->pool); -} - -static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) -{ - struct scsi_host_sg_pool *sgp; - - sgp = scsi_sg_pools + scsi_sgtable_index(nents); - return mempool_alloc(sgp->pool, gfp_mask); -} - -static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) -{ - if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS) - return; - __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); -} - -static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq) -{ - struct scatterlist *first_chunk = NULL; - int ret; - - BUG_ON(!nents); - - if (mq) { - if (nents <= SCSI_MAX_SG_SEGMENTS) { - sdb->table.nents = sdb->table.orig_nents = nents; - sg_init_table(sdb->table.sgl, nents); - return 0; - } - first_chunk = sdb->table.sgl; - } - - ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, - first_chunk, GFP_ATOMIC, scsi_sg_alloc); - if (unlikely(ret)) - scsi_free_sgtable(sdb, mq); - return ret; -} - static void scsi_uninit_cmd(struct scsi_cmnd *cmd) { if (cmd->request->cmd_type == REQ_TYPE_FS) { @@ -625,12 +530,17 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd) static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) { + struct scsi_data_buffer *sdb; + if (cmd->sdb.table.nents) - scsi_free_sgtable(&cmd->sdb, true); - if (cmd->request->next_rq && cmd->request->next_rq->special) - scsi_free_sgtable(cmd->request->next_rq->special, true); + sg_free_table_chained(&cmd->sdb.table, true); + if (cmd->request->next_rq) { + sdb = cmd->request->next_rq->special; + if (sdb) + sg_free_table_chained(&sdb->table, true); + } if (scsi_prot_sg_count(cmd)) - scsi_free_sgtable(cmd->prot_sdb, true); + sg_free_table_chained(&cmd->prot_sdb->table, true); } static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) @@ -669,19 +579,19 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) static void scsi_release_buffers(struct scsi_cmnd *cmd) { if (cmd->sdb.table.nents) - scsi_free_sgtable(&cmd->sdb, false); + sg_free_table_chained(&cmd->sdb.table, false); memset(&cmd->sdb, 0, sizeof(cmd->sdb)); if (scsi_prot_sg_count(cmd)) - scsi_free_sgtable(cmd->prot_sdb, false); + sg_free_table_chained(&cmd->prot_sdb->table, false); } static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) { struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; - scsi_free_sgtable(bidi_sdb, false); + sg_free_table_chained(&bidi_sdb->table, false); kmem_cache_free(scsi_sdb_cache, bidi_sdb); cmd->request->next_rq->special = NULL; } @@ -1085,8 +995,8 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb) /* * If sg table allocation fails, requeue request later. */ - if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, - req->mq_ctx != NULL))) + if (unlikely(sg_alloc_table_chained(&sdb->table, req->nr_phys_segments, + sdb->table.sgl))) return BLKPREP_DEFER; /* @@ -1158,7 +1068,8 @@ int scsi_init_io(struct scsi_cmnd *cmd) ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); - if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) { + if (sg_alloc_table_chained(&prot_sdb->table, ivecs, + prot_sdb->table.sgl)) { error = BLKPREP_DEFER; goto err_exit; } @@ -1932,7 +1843,7 @@ static int scsi_mq_prep_fn(struct request *req) if (scsi_host_get_prot(shost)) { cmd->prot_sdb = (void *)sg + min_t(unsigned int, - shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) * + shost->sg_tablesize, SG_CHUNK_SIZE) * sizeof(struct scatterlist); memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); @@ -2105,7 +2016,7 @@ static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) * this limit is imposed by hardware restrictions */ blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, - SCSI_MAX_SG_CHAIN_SEGMENTS)); + SG_MAX_SEGMENTS)); if (scsi_host_prot_dma(shost)) { shost->sg_prot_tablesize = @@ -2187,8 +2098,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) unsigned int cmd_size, sgl_size, tbl_size; tbl_size = shost->sg_tablesize; - if (tbl_size > SCSI_MAX_SG_SEGMENTS) - tbl_size = SCSI_MAX_SG_SEGMENTS; + if (tbl_size > SG_CHUNK_SIZE) + tbl_size = SG_CHUNK_SIZE; sgl_size = tbl_size * sizeof(struct scatterlist); cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; if (scsi_host_get_prot(shost)) @@ -2264,8 +2175,6 @@ EXPORT_SYMBOL(scsi_unblock_requests); int __init scsi_init_queue(void) { - int i; - scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", sizeof(struct scsi_data_buffer), 0, 0, NULL); @@ -2274,53 +2183,12 @@ int __init scsi_init_queue(void) return -ENOMEM; } - for (i = 0; i < SG_MEMPOOL_NR; i++) { - struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; - int size = sgp->size * sizeof(struct scatterlist); - - sgp->slab = kmem_cache_create(sgp->name, size, 0, - SLAB_HWCACHE_ALIGN, NULL); - if (!sgp->slab) { - printk(KERN_ERR "SCSI: can't init sg slab %s\n", - sgp->name); - goto cleanup_sdb; - } - - sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, - sgp->slab); - if (!sgp->pool) { - printk(KERN_ERR "SCSI: can't init sg mempool %s\n", - sgp->name); - goto cleanup_sdb; - } - } - return 0; - -cleanup_sdb: - for (i = 0; i < SG_MEMPOOL_NR; i++) { - struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; - if (sgp->pool) - mempool_destroy(sgp->pool); - if (sgp->slab) - kmem_cache_destroy(sgp->slab); - } - kmem_cache_destroy(scsi_sdb_cache); - - return -ENOMEM; } void scsi_exit_queue(void) { - int i; - kmem_cache_destroy(scsi_sdb_cache); - - for (i = 0; i < SG_MEMPOOL_NR; i++) { - struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; - mempool_destroy(sgp->pool); - kmem_cache_destroy(sgp->slab); - } } /** @@ -3196,6 +3064,7 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) * - EUI-64 based 12-byte * - NAA IEEE Registered * - NAA IEEE Extended + * - T10 Vendor ID * as longer descriptors reduce the likelyhood * of identification clashes. */ @@ -3214,6 +3083,21 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) goto next_desig; switch (d[1] & 0xf) { + case 0x1: + /* T10 Vendor ID */ + if (cur_id_size > d[3]) + break; + /* Prefer anything */ + if (cur_id_type > 0x01 && cur_id_type != 0xff) + break; + cur_id_size = d[3]; + if (cur_id_size + 4 > id_len) + cur_id_size = id_len - 4; + cur_id_str = d + 4; + cur_id_type = d[1] & 0xf; + id_size = snprintf(id, id_len, "t10.%*pE", + cur_id_size, cur_id_str); + break; case 0x2: /* EUI-64 */ if (cur_id_size > d[3]) diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 27b4d0a6a01d..57a4b9973320 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -116,7 +116,7 @@ extern void scsi_exit_procfs(void); extern char scsi_scan_type[]; extern int scsi_complete_async_scans(void); extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int, - unsigned int, u64, int); + unsigned int, u64, enum scsi_scan_mode); extern void scsi_forget_host(struct Scsi_Host *); extern void scsi_rescan_device(struct device *); diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index 251598eb3547..7a74b82e8973 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c @@ -251,7 +251,8 @@ static int scsi_add_single_device(uint host, uint channel, uint id, uint lun) if (shost->transportt->user_scan) error = shost->transportt->user_scan(shost, channel, id, lun); else - error = scsi_scan_host_selected(shost, channel, id, lun, 1); + error = scsi_scan_host_selected(shost, channel, id, lun, + SCSI_SCAN_MANUAL); scsi_host_put(shost); return error; } diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 97074c91e328..e0a78f53d809 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -96,10 +96,13 @@ MODULE_PARM_DESC(max_luns, #define SCSI_SCAN_TYPE_DEFAULT "sync" #endif -char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT; +char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT; -module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO); -MODULE_PARM_DESC(scan, "sync, async or none"); +module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), + S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(scan, "sync, async, manual, or none. " + "Setting to 'manual' disables automatic scanning, but allows " + "for manual device scan via the 'scan' sysfs attribute."); static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18; @@ -316,6 +319,7 @@ static void scsi_target_destroy(struct scsi_target *starget) struct Scsi_Host *shost = dev_to_shost(dev->parent); unsigned long flags; + BUG_ON(starget->state == STARGET_DEL); starget->state = STARGET_DEL; transport_destroy_device(dev); spin_lock_irqsave(shost->host_lock, flags); @@ -1040,7 +1044,8 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq, * @lun: LUN of target device * @bflagsp: store bflags here if not NULL * @sdevp: probe the LUN corresponding to this scsi_device - * @rescan: if nonzero skip some code only needed on first scan + * @rescan: if not equal to SCSI_SCAN_INITIAL skip some code only + * needed on first scan * @hostdata: passed to scsi_alloc_sdev() * * Description: @@ -1055,7 +1060,8 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq, **/ static int scsi_probe_and_add_lun(struct scsi_target *starget, u64 lun, int *bflagsp, - struct scsi_device **sdevp, int rescan, + struct scsi_device **sdevp, + enum scsi_scan_mode rescan, void *hostdata) { struct scsi_device *sdev; @@ -1069,7 +1075,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, */ sdev = scsi_device_lookup_by_target(starget, lun); if (sdev) { - if (rescan || !scsi_device_created(sdev)) { + if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) { SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, "scsi scan: device exists on %s\n", dev_name(&sdev->sdev_gendev))); @@ -1205,7 +1211,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, * Modifies sdevscan->lun. **/ static void scsi_sequential_lun_scan(struct scsi_target *starget, - int bflags, int scsi_level, int rescan) + int bflags, int scsi_level, + enum scsi_scan_mode rescan) { uint max_dev_lun; u64 sparse_lun, lun; @@ -1300,7 +1307,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget, * 1: could not scan with REPORT LUN **/ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, - int rescan) + enum scsi_scan_mode rescan) { char devname[64]; unsigned char scsi_cmd[MAX_COMMAND_SIZE]; @@ -1546,7 +1553,7 @@ void scsi_rescan_device(struct device *dev) EXPORT_SYMBOL(scsi_rescan_device); static void __scsi_scan_target(struct device *parent, unsigned int channel, - unsigned int id, u64 lun, int rescan) + unsigned int id, u64 lun, enum scsi_scan_mode rescan) { struct Scsi_Host *shost = dev_to_shost(parent); int bflags = 0; @@ -1604,7 +1611,10 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel, * @channel: channel to scan * @id: target id to scan * @lun: Specific LUN to scan or SCAN_WILD_CARD - * @rescan: passed to LUN scanning routines + * @rescan: passed to LUN scanning routines; SCSI_SCAN_INITIAL for + * no rescan, SCSI_SCAN_RESCAN to rescan existing LUNs, + * and SCSI_SCAN_MANUAL to force scanning even if + * 'scan=manual' is set. * * Description: * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0, @@ -1614,13 +1624,17 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel, * sequential scan of LUNs on the target id. **/ void scsi_scan_target(struct device *parent, unsigned int channel, - unsigned int id, u64 lun, int rescan) + unsigned int id, u64 lun, enum scsi_scan_mode rescan) { struct Scsi_Host *shost = dev_to_shost(parent); if (strncmp(scsi_scan_type, "none", 4) == 0) return; + if (rescan != SCSI_SCAN_MANUAL && + strncmp(scsi_scan_type, "manual", 6) == 0) + return; + mutex_lock(&shost->scan_mutex); if (!shost->async_scan) scsi_complete_async_scans(); @@ -1634,7 +1648,8 @@ void scsi_scan_target(struct device *parent, unsigned int channel, EXPORT_SYMBOL(scsi_scan_target); static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel, - unsigned int id, u64 lun, int rescan) + unsigned int id, u64 lun, + enum scsi_scan_mode rescan) { uint order_id; @@ -1665,7 +1680,8 @@ static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel, } int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, - unsigned int id, u64 lun, int rescan) + unsigned int id, u64 lun, + enum scsi_scan_mode rescan) { SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost, "%s: <%u:%u:%llu>\n", @@ -1844,7 +1860,8 @@ void scsi_scan_host(struct Scsi_Host *shost) { struct async_scan_data *data; - if (strncmp(scsi_scan_type, "none", 4) == 0) + if (strncmp(scsi_scan_type, "none", 4) == 0 || + strncmp(scsi_scan_type, "manual", 6) == 0) return; if (scsi_autopm_get_host(shost) < 0) return; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 2b642b145be1..07349270535d 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -145,7 +145,8 @@ static int scsi_scan(struct Scsi_Host *shost, const char *str) if (shost->transportt->user_scan) res = shost->transportt->user_scan(shost, channel, id, lun); else - res = scsi_scan_host_selected(shost, channel, id, lun, 1); + res = scsi_scan_host_selected(shost, channel, id, lun, + SCSI_SCAN_MANUAL); return res; } @@ -1366,18 +1367,18 @@ static void __scsi_remove_target(struct scsi_target *starget) void scsi_remove_target(struct device *dev) { struct Scsi_Host *shost = dev_to_shost(dev->parent); - struct scsi_target *starget, *last_target = NULL; + struct scsi_target *starget; unsigned long flags; restart: spin_lock_irqsave(shost->host_lock, flags); list_for_each_entry(starget, &shost->__targets, siblings) { if (starget->state == STARGET_DEL || - starget == last_target) + starget->state == STARGET_REMOVE) continue; if (starget->dev.parent == dev || &starget->dev == dev) { kref_get(&starget->reap_ref); - last_target = starget; + starget->state = STARGET_REMOVE; spin_unlock_irqrestore(shost->host_lock, flags); __scsi_remove_target(starget); scsi_target_reap(starget); diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c index 08bb47b53bc3..0ff083bbf5b1 100644 --- a/drivers/scsi/scsi_trace.c +++ b/drivers/scsi/scsi_trace.c @@ -17,6 +17,7 @@ */ #include <linux/kernel.h> #include <linux/trace_seq.h> +#include <asm/unaligned.h> #include <trace/events/scsi.h> #define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f) @@ -231,6 +232,158 @@ out: } static const char * +scsi_trace_maintenance_in(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p), *cmd; + u32 alloc_len; + + switch (SERVICE_ACTION16(cdb)) { + case MI_REPORT_IDENTIFYING_INFORMATION: + cmd = "REPORT_IDENTIFYING_INFORMATION"; + break; + case MI_REPORT_TARGET_PGS: + cmd = "REPORT_TARGET_PORT_GROUPS"; + break; + case MI_REPORT_ALIASES: + cmd = "REPORT_ALIASES"; + break; + case MI_REPORT_SUPPORTED_OPERATION_CODES: + cmd = "REPORT_SUPPORTED_OPERATION_CODES"; + break; + case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS: + cmd = "REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS"; + break; + case MI_REPORT_PRIORITY: + cmd = "REPORT_PRIORITY"; + break; + case MI_REPORT_TIMESTAMP: + cmd = "REPORT_TIMESTAMP"; + break; + case MI_MANAGEMENT_PROTOCOL_IN: + cmd = "MANAGEMENT_PROTOCOL_IN"; + break; + default: + trace_seq_puts(p, "UNKNOWN"); + goto out; + } + + alloc_len = get_unaligned_be32(&cdb[6]); + + trace_seq_printf(p, "%s alloc_len=%u", cmd, alloc_len); + +out: + trace_seq_putc(p, 0); + + return ret; +} + +static const char * +scsi_trace_maintenance_out(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p), *cmd; + u32 alloc_len; + + switch (SERVICE_ACTION16(cdb)) { + case MO_SET_IDENTIFYING_INFORMATION: + cmd = "SET_IDENTIFYING_INFORMATION"; + break; + case MO_SET_TARGET_PGS: + cmd = "SET_TARGET_PORT_GROUPS"; + break; + case MO_CHANGE_ALIASES: + cmd = "CHANGE_ALIASES"; + break; + case MO_SET_PRIORITY: + cmd = "SET_PRIORITY"; + break; + case MO_SET_TIMESTAMP: + cmd = "SET_TIMESTAMP"; + break; + case MO_MANAGEMENT_PROTOCOL_OUT: + cmd = "MANAGEMENT_PROTOCOL_OUT"; + break; + default: + trace_seq_puts(p, "UNKNOWN"); + goto out; + } + + alloc_len = get_unaligned_be32(&cdb[6]); + + trace_seq_printf(p, "%s alloc_len=%u", cmd, alloc_len); + +out: + trace_seq_putc(p, 0); + + return ret; +} + +static const char * +scsi_trace_zbc_in(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p), *cmd; + u64 zone_id; + u32 alloc_len; + u8 options; + + switch (SERVICE_ACTION16(cdb)) { + case ZI_REPORT_ZONES: + cmd = "REPORT_ZONES"; + break; + default: + trace_seq_puts(p, "UNKNOWN"); + goto out; + } + + zone_id = get_unaligned_be64(&cdb[2]); + alloc_len = get_unaligned_be32(&cdb[10]); + options = cdb[14] & 0x3f; + + trace_seq_printf(p, "%s zone=%llu alloc_len=%u options=%u partial=%u", + cmd, (unsigned long long)zone_id, alloc_len, + options, (cdb[14] >> 7) & 1); + +out: + trace_seq_putc(p, 0); + + return ret; +} + +static const char * +scsi_trace_zbc_out(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = trace_seq_buffer_ptr(p), *cmd; + u64 zone_id; + + switch (SERVICE_ACTION16(cdb)) { + case ZO_CLOSE_ZONE: + cmd = "CLOSE_ZONE"; + break; + case ZO_FINISH_ZONE: + cmd = "FINISH_ZONE"; + break; + case ZO_OPEN_ZONE: + cmd = "OPEN_ZONE"; + break; + case ZO_RESET_WRITE_POINTER: + cmd = "RESET_WRITE_POINTER"; + break; + default: + trace_seq_puts(p, "UNKNOWN"); + goto out; + } + + zone_id = get_unaligned_be64(&cdb[2]); + + trace_seq_printf(p, "%s zone=%llu all=%u", cmd, + (unsigned long long)zone_id, cdb[14] & 1); + +out: + trace_seq_putc(p, 0); + + return ret; +} + +static const char * scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len) { switch (SERVICE_ACTION32(cdb)) { @@ -282,6 +435,14 @@ scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len) return scsi_trace_service_action_in(p, cdb, len); case VARIABLE_LENGTH_CMD: return scsi_trace_varlen(p, cdb, len); + case MAINTENANCE_IN: + return scsi_trace_maintenance_in(p, cdb, len); + case MAINTENANCE_OUT: + return scsi_trace_maintenance_out(p, cdb, len); + case ZBC_IN: + return scsi_trace_zbc_in(p, cdb, len); + case ZBC_OUT: + return scsi_trace_zbc_out(p, cdb, len); default: return scsi_trace_misc(p, cdb, len); } diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 8a8822641b26..0f3a3869524b 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -2027,11 +2027,10 @@ static void fc_vport_dev_release(struct device *dev) kfree(vport); } -int scsi_is_fc_vport(const struct device *dev) +static int scsi_is_fc_vport(const struct device *dev) { return dev->release == fc_vport_dev_release; } -EXPORT_SYMBOL(scsi_is_fc_vport); static int fc_vport_match(struct attribute_container *cont, struct device *dev) @@ -2110,7 +2109,8 @@ fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, u64 lun) if ((channel == rport->channel) && (id == rport->scsi_target_id)) { spin_unlock_irqrestore(shost->host_lock, flags); - scsi_scan_target(&rport->dev, channel, id, lun, 1); + scsi_scan_target(&rport->dev, channel, id, lun, + SCSI_SCAN_MANUAL); return; } } @@ -3277,7 +3277,8 @@ fc_scsi_scan_rport(struct work_struct *work) (rport->roles & FC_PORT_ROLE_FCP_TARGET) && !(i->f->disable_target_scan)) { scsi_scan_target(&rport->dev, rport->channel, - rport->scsi_target_id, SCAN_WILD_CARD, 1); + rport->scsi_target_id, SCAN_WILD_CARD, + SCSI_SCAN_RESCAN); } spin_lock_irqsave(shost->host_lock, flags); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 441481623fb9..42bca619f854 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1009,7 +1009,7 @@ static void iscsi_flashnode_sess_release(struct device *dev) kfree(fnode_sess); } -struct device_type iscsi_flashnode_sess_dev_type = { +static struct device_type iscsi_flashnode_sess_dev_type = { .name = "iscsi_flashnode_sess_dev_type", .groups = iscsi_flashnode_sess_attr_groups, .release = iscsi_flashnode_sess_release, @@ -1195,13 +1195,13 @@ static void iscsi_flashnode_conn_release(struct device *dev) kfree(fnode_conn); } -struct device_type iscsi_flashnode_conn_dev_type = { +static struct device_type iscsi_flashnode_conn_dev_type = { .name = "iscsi_flashnode_conn_dev_type", .groups = iscsi_flashnode_conn_attr_groups, .release = iscsi_flashnode_conn_release, }; -struct bus_type iscsi_flashnode_bus; +static struct bus_type iscsi_flashnode_bus; int iscsi_flashnode_bus_match(struct device *dev, struct device_driver *drv) @@ -1212,7 +1212,7 @@ int iscsi_flashnode_bus_match(struct device *dev, } EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match); -struct bus_type iscsi_flashnode_bus = { +static struct bus_type iscsi_flashnode_bus = { .name = "iscsi_flashnode", .match = &iscsi_flashnode_bus_match, }; @@ -1324,11 +1324,10 @@ EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn); * 1 on success * 0 on failure */ -int iscsi_is_flashnode_conn_dev(struct device *dev, void *data) +static int iscsi_is_flashnode_conn_dev(struct device *dev, void *data) { return dev->bus == &iscsi_flashnode_bus; } -EXPORT_SYMBOL_GPL(iscsi_is_flashnode_conn_dev); static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn) { @@ -1783,6 +1782,7 @@ struct iscsi_scan_data { unsigned int channel; unsigned int id; u64 lun; + enum scsi_scan_mode rescan; }; static int iscsi_user_scan_session(struct device *dev, void *data) @@ -1819,7 +1819,7 @@ static int iscsi_user_scan_session(struct device *dev, void *data) (scan_data->id == SCAN_WILD_CARD || scan_data->id == id)) scsi_scan_target(&session->dev, 0, id, - scan_data->lun, 1); + scan_data->lun, scan_data->rescan); } user_scan_exit: @@ -1836,6 +1836,7 @@ static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, scan_data.channel = channel; scan_data.id = id; scan_data.lun = lun; + scan_data.rescan = SCSI_SCAN_MANUAL; return device_for_each_child(&shost->shost_gendev, &scan_data, iscsi_user_scan_session); @@ -1852,6 +1853,7 @@ static void iscsi_scan_session(struct work_struct *work) scan_data.channel = 0; scan_data.id = SCAN_WILD_CARD; scan_data.lun = SCAN_WILD_CARD; + scan_data.rescan = SCSI_SCAN_RESCAN; iscsi_user_scan_session(&session->dev, &scan_data); atomic_dec(&ihost->nr_scans); @@ -2067,13 +2069,10 @@ EXPORT_SYMBOL_GPL(iscsi_alloc_session); int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) { - struct Scsi_Host *shost = iscsi_session_to_shost(session); - struct iscsi_cls_host *ihost; unsigned long flags; int id = 0; int err; - ihost = shost->shost_data; session->sid = atomic_add_return(1, &iscsi_session_nr); if (target_id == ISCSI_MAX_TARGET) { diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index b6f958193dad..3f0ff072184b 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -1614,7 +1614,8 @@ int sas_rphy_add(struct sas_rphy *rphy) else lun = 0; - scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, 0); + scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, + SCSI_SCAN_INITIAL); } return 0; @@ -1739,8 +1740,8 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel, if ((channel == SCAN_WILD_CARD || channel == 0) && (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { - scsi_scan_target(&rphy->dev, 0, - rphy->scsi_target_id, lun, 1); + scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, + lun, SCSI_SCAN_MANUAL); } } mutex_unlock(&sas_host->lock); diff --git a/drivers/scsi/sense_codes.h b/drivers/scsi/sense_codes.h new file mode 100644 index 000000000000..e4e1dccd1f2f --- /dev/null +++ b/drivers/scsi/sense_codes.h @@ -0,0 +1,826 @@ +/* + * The canonical list of T10 Additional Sense Codes is available at: + * http://www.t10.org/lists/asc-num.txt [most recent: 20141221] + */ + +SENSE_CODE(0x0000, "No additional sense information") +SENSE_CODE(0x0001, "Filemark detected") +SENSE_CODE(0x0002, "End-of-partition/medium detected") +SENSE_CODE(0x0003, "Setmark detected") +SENSE_CODE(0x0004, "Beginning-of-partition/medium detected") +SENSE_CODE(0x0005, "End-of-data detected") +SENSE_CODE(0x0006, "I/O process terminated") +SENSE_CODE(0x0007, "Programmable early warning detected") +SENSE_CODE(0x0011, "Audio play operation in progress") +SENSE_CODE(0x0012, "Audio play operation paused") +SENSE_CODE(0x0013, "Audio play operation successfully completed") +SENSE_CODE(0x0014, "Audio play operation stopped due to error") +SENSE_CODE(0x0015, "No current audio status to return") +SENSE_CODE(0x0016, "Operation in progress") +SENSE_CODE(0x0017, "Cleaning requested") +SENSE_CODE(0x0018, "Erase operation in progress") +SENSE_CODE(0x0019, "Locate operation in progress") +SENSE_CODE(0x001A, "Rewind operation in progress") +SENSE_CODE(0x001B, "Set capacity operation in progress") +SENSE_CODE(0x001C, "Verify operation in progress") +SENSE_CODE(0x001D, "ATA pass through information available") +SENSE_CODE(0x001E, "Conflicting SA creation request") +SENSE_CODE(0x001F, "Logical unit transitioning to another power condition") +SENSE_CODE(0x0020, "Extended copy information available") +SENSE_CODE(0x0021, "Atomic command aborted due to ACA") + +SENSE_CODE(0x0100, "No index/sector signal") + +SENSE_CODE(0x0200, "No seek complete") + +SENSE_CODE(0x0300, "Peripheral device write fault") +SENSE_CODE(0x0301, "No write current") +SENSE_CODE(0x0302, "Excessive write errors") + +SENSE_CODE(0x0400, "Logical unit not ready, cause not reportable") +SENSE_CODE(0x0401, "Logical unit is in process of becoming ready") +SENSE_CODE(0x0402, "Logical unit not ready, initializing command required") +SENSE_CODE(0x0403, "Logical unit not ready, manual intervention required") +SENSE_CODE(0x0404, "Logical unit not ready, format in progress") +SENSE_CODE(0x0405, "Logical unit not ready, rebuild in progress") +SENSE_CODE(0x0406, "Logical unit not ready, recalculation in progress") +SENSE_CODE(0x0407, "Logical unit not ready, operation in progress") +SENSE_CODE(0x0408, "Logical unit not ready, long write in progress") +SENSE_CODE(0x0409, "Logical unit not ready, self-test in progress") +SENSE_CODE(0x040A, "Logical unit not accessible, asymmetric access state transition") +SENSE_CODE(0x040B, "Logical unit not accessible, target port in standby state") +SENSE_CODE(0x040C, "Logical unit not accessible, target port in unavailable state") +SENSE_CODE(0x040D, "Logical unit not ready, structure check required") +SENSE_CODE(0x040E, "Logical unit not ready, security session in progress") +SENSE_CODE(0x0410, "Logical unit not ready, auxiliary memory not accessible") +SENSE_CODE(0x0411, "Logical unit not ready, notify (enable spinup) required") +SENSE_CODE(0x0412, "Logical unit not ready, offline") +SENSE_CODE(0x0413, "Logical unit not ready, SA creation in progress") +SENSE_CODE(0x0414, "Logical unit not ready, space allocation in progress") +SENSE_CODE(0x0415, "Logical unit not ready, robotics disabled") +SENSE_CODE(0x0416, "Logical unit not ready, configuration required") +SENSE_CODE(0x0417, "Logical unit not ready, calibration required") +SENSE_CODE(0x0418, "Logical unit not ready, a door is open") +SENSE_CODE(0x0419, "Logical unit not ready, operating in sequential mode") +SENSE_CODE(0x041A, "Logical unit not ready, start stop unit command in progress") +SENSE_CODE(0x041B, "Logical unit not ready, sanitize in progress") +SENSE_CODE(0x041C, "Logical unit not ready, additional power use not yet granted") +SENSE_CODE(0x041D, "Logical unit not ready, configuration in progress") +SENSE_CODE(0x041E, "Logical unit not ready, microcode activation required") +SENSE_CODE(0x041F, "Logical unit not ready, microcode download required") +SENSE_CODE(0x0420, "Logical unit not ready, logical unit reset required") +SENSE_CODE(0x0421, "Logical unit not ready, hard reset required") +SENSE_CODE(0x0422, "Logical unit not ready, power cycle required") + +SENSE_CODE(0x0500, "Logical unit does not respond to selection") + +SENSE_CODE(0x0600, "No reference position found") + +SENSE_CODE(0x0700, "Multiple peripheral devices selected") + +SENSE_CODE(0x0800, "Logical unit communication failure") +SENSE_CODE(0x0801, "Logical unit communication time-out") +SENSE_CODE(0x0802, "Logical unit communication parity error") +SENSE_CODE(0x0803, "Logical unit communication CRC error (Ultra-DMA/32)") +SENSE_CODE(0x0804, "Unreachable copy target") + +SENSE_CODE(0x0900, "Track following error") +SENSE_CODE(0x0901, "Tracking servo failure") +SENSE_CODE(0x0902, "Focus servo failure") +SENSE_CODE(0x0903, "Spindle servo failure") +SENSE_CODE(0x0904, "Head select fault") +SENSE_CODE(0x0905, "Vibration induced tracking error") + +SENSE_CODE(0x0A00, "Error log overflow") + +SENSE_CODE(0x0B00, "Warning") +SENSE_CODE(0x0B01, "Warning - specified temperature exceeded") +SENSE_CODE(0x0B02, "Warning - enclosure degraded") +SENSE_CODE(0x0B03, "Warning - background self-test failed") +SENSE_CODE(0x0B04, "Warning - background pre-scan detected medium error") +SENSE_CODE(0x0B05, "Warning - background medium scan detected medium error") +SENSE_CODE(0x0B06, "Warning - non-volatile cache now volatile") +SENSE_CODE(0x0B07, "Warning - degraded power to non-volatile cache") +SENSE_CODE(0x0B08, "Warning - power loss expected") +SENSE_CODE(0x0B09, "Warning - device statistics notification active") + +SENSE_CODE(0x0C00, "Write error") +SENSE_CODE(0x0C01, "Write error - recovered with auto reallocation") +SENSE_CODE(0x0C02, "Write error - auto reallocation failed") +SENSE_CODE(0x0C03, "Write error - recommend reassignment") +SENSE_CODE(0x0C04, "Compression check miscompare error") +SENSE_CODE(0x0C05, "Data expansion occurred during compression") +SENSE_CODE(0x0C06, "Block not compressible") +SENSE_CODE(0x0C07, "Write error - recovery needed") +SENSE_CODE(0x0C08, "Write error - recovery failed") +SENSE_CODE(0x0C09, "Write error - loss of streaming") +SENSE_CODE(0x0C0A, "Write error - padding blocks added") +SENSE_CODE(0x0C0B, "Auxiliary memory write error") +SENSE_CODE(0x0C0C, "Write error - unexpected unsolicited data") +SENSE_CODE(0x0C0D, "Write error - not enough unsolicited data") +SENSE_CODE(0x0C0E, "Multiple write errors") +SENSE_CODE(0x0C0F, "Defects in error window") +SENSE_CODE(0x0C10, "Incomplete multiple atomic write operations") + +SENSE_CODE(0x0D00, "Error detected by third party temporary initiator") +SENSE_CODE(0x0D01, "Third party device failure") +SENSE_CODE(0x0D02, "Copy target device not reachable") +SENSE_CODE(0x0D03, "Incorrect copy target device type") +SENSE_CODE(0x0D04, "Copy target device data underrun") +SENSE_CODE(0x0D05, "Copy target device data overrun") + +SENSE_CODE(0x0E00, "Invalid information unit") +SENSE_CODE(0x0E01, "Information unit too short") +SENSE_CODE(0x0E02, "Information unit too long") +SENSE_CODE(0x0E03, "Invalid field in command information unit") + +SENSE_CODE(0x1000, "Id CRC or ECC error") +SENSE_CODE(0x1001, "Logical block guard check failed") +SENSE_CODE(0x1002, "Logical block application tag check failed") +SENSE_CODE(0x1003, "Logical block reference tag check failed") +SENSE_CODE(0x1004, "Logical block protection error on recover buffered data") +SENSE_CODE(0x1005, "Logical block protection method error") + +SENSE_CODE(0x1100, "Unrecovered read error") +SENSE_CODE(0x1101, "Read retries exhausted") +SENSE_CODE(0x1102, "Error too long to correct") +SENSE_CODE(0x1103, "Multiple read errors") +SENSE_CODE(0x1104, "Unrecovered read error - auto reallocate failed") +SENSE_CODE(0x1105, "L-EC uncorrectable error") +SENSE_CODE(0x1106, "CIRC unrecovered error") +SENSE_CODE(0x1107, "Data re-synchronization error") +SENSE_CODE(0x1108, "Incomplete block read") +SENSE_CODE(0x1109, "No gap found") +SENSE_CODE(0x110A, "Miscorrected error") +SENSE_CODE(0x110B, "Unrecovered read error - recommend reassignment") +SENSE_CODE(0x110C, "Unrecovered read error - recommend rewrite the data") +SENSE_CODE(0x110D, "De-compression CRC error") +SENSE_CODE(0x110E, "Cannot decompress using declared algorithm") +SENSE_CODE(0x110F, "Error reading UPC/EAN number") +SENSE_CODE(0x1110, "Error reading ISRC number") +SENSE_CODE(0x1111, "Read error - loss of streaming") +SENSE_CODE(0x1112, "Auxiliary memory read error") +SENSE_CODE(0x1113, "Read error - failed retransmission request") +SENSE_CODE(0x1114, "Read error - lba marked bad by application client") +SENSE_CODE(0x1115, "Write after sanitize required") + +SENSE_CODE(0x1200, "Address mark not found for id field") + +SENSE_CODE(0x1300, "Address mark not found for data field") + +SENSE_CODE(0x1400, "Recorded entity not found") +SENSE_CODE(0x1401, "Record not found") +SENSE_CODE(0x1402, "Filemark or setmark not found") +SENSE_CODE(0x1403, "End-of-data not found") +SENSE_CODE(0x1404, "Block sequence error") +SENSE_CODE(0x1405, "Record not found - recommend reassignment") +SENSE_CODE(0x1406, "Record not found - data auto-reallocated") +SENSE_CODE(0x1407, "Locate operation failure") + +SENSE_CODE(0x1500, "Random positioning error") +SENSE_CODE(0x1501, "Mechanical positioning error") +SENSE_CODE(0x1502, "Positioning error detected by read of medium") + +SENSE_CODE(0x1600, "Data synchronization mark error") +SENSE_CODE(0x1601, "Data sync error - data rewritten") +SENSE_CODE(0x1602, "Data sync error - recommend rewrite") +SENSE_CODE(0x1603, "Data sync error - data auto-reallocated") +SENSE_CODE(0x1604, "Data sync error - recommend reassignment") + +SENSE_CODE(0x1700, "Recovered data with no error correction applied") +SENSE_CODE(0x1701, "Recovered data with retries") +SENSE_CODE(0x1702, "Recovered data with positive head offset") +SENSE_CODE(0x1703, "Recovered data with negative head offset") +SENSE_CODE(0x1704, "Recovered data with retries and/or circ applied") +SENSE_CODE(0x1705, "Recovered data using previous sector id") +SENSE_CODE(0x1706, "Recovered data without ECC - data auto-reallocated") +SENSE_CODE(0x1707, "Recovered data without ECC - recommend reassignment") +SENSE_CODE(0x1708, "Recovered data without ECC - recommend rewrite") +SENSE_CODE(0x1709, "Recovered data without ECC - data rewritten") + +SENSE_CODE(0x1800, "Recovered data with error correction applied") +SENSE_CODE(0x1801, "Recovered data with error corr. & retries applied") +SENSE_CODE(0x1802, "Recovered data - data auto-reallocated") +SENSE_CODE(0x1803, "Recovered data with CIRC") +SENSE_CODE(0x1804, "Recovered data with L-EC") +SENSE_CODE(0x1805, "Recovered data - recommend reassignment") +SENSE_CODE(0x1806, "Recovered data - recommend rewrite") +SENSE_CODE(0x1807, "Recovered data with ECC - data rewritten") +SENSE_CODE(0x1808, "Recovered data with linking") + +SENSE_CODE(0x1900, "Defect list error") +SENSE_CODE(0x1901, "Defect list not available") +SENSE_CODE(0x1902, "Defect list error in primary list") +SENSE_CODE(0x1903, "Defect list error in grown list") + +SENSE_CODE(0x1A00, "Parameter list length error") + +SENSE_CODE(0x1B00, "Synchronous data transfer error") + +SENSE_CODE(0x1C00, "Defect list not found") +SENSE_CODE(0x1C01, "Primary defect list not found") +SENSE_CODE(0x1C02, "Grown defect list not found") + +SENSE_CODE(0x1D00, "Miscompare during verify operation") +SENSE_CODE(0x1D01, "Miscompare verify of unmapped LBA") + +SENSE_CODE(0x1E00, "Recovered id with ECC correction") + +SENSE_CODE(0x1F00, "Partial defect list transfer") + +SENSE_CODE(0x2000, "Invalid command operation code") +SENSE_CODE(0x2001, "Access denied - initiator pending-enrolled") +SENSE_CODE(0x2002, "Access denied - no access rights") +SENSE_CODE(0x2003, "Access denied - invalid mgmt id key") +SENSE_CODE(0x2004, "Illegal command while in write capable state") +SENSE_CODE(0x2005, "Obsolete") +SENSE_CODE(0x2006, "Illegal command while in explicit address mode") +SENSE_CODE(0x2007, "Illegal command while in implicit address mode") +SENSE_CODE(0x2008, "Access denied - enrollment conflict") +SENSE_CODE(0x2009, "Access denied - invalid LU identifier") +SENSE_CODE(0x200A, "Access denied - invalid proxy token") +SENSE_CODE(0x200B, "Access denied - ACL LUN conflict") +SENSE_CODE(0x200C, "Illegal command when not in append-only mode") + +SENSE_CODE(0x2100, "Logical block address out of range") +SENSE_CODE(0x2101, "Invalid element address") +SENSE_CODE(0x2102, "Invalid address for write") +SENSE_CODE(0x2103, "Invalid write crossing layer jump") +SENSE_CODE(0x2104, "Unaligned write command") +SENSE_CODE(0x2105, "Write boundary violation") +SENSE_CODE(0x2106, "Attempt to read invalid data") +SENSE_CODE(0x2107, "Read boundary violation") + +SENSE_CODE(0x2200, "Illegal function (use 20 00, 24 00, or 26 00)") + +SENSE_CODE(0x2300, "Invalid token operation, cause not reportable") +SENSE_CODE(0x2301, "Invalid token operation, unsupported token type") +SENSE_CODE(0x2302, "Invalid token operation, remote token usage not supported") +SENSE_CODE(0x2303, "Invalid token operation, remote rod token creation not supported") +SENSE_CODE(0x2304, "Invalid token operation, token unknown") +SENSE_CODE(0x2305, "Invalid token operation, token corrupt") +SENSE_CODE(0x2306, "Invalid token operation, token revoked") +SENSE_CODE(0x2307, "Invalid token operation, token expired") +SENSE_CODE(0x2308, "Invalid token operation, token cancelled") +SENSE_CODE(0x2309, "Invalid token operation, token deleted") +SENSE_CODE(0x230A, "Invalid token operation, invalid token length") + +SENSE_CODE(0x2400, "Invalid field in cdb") +SENSE_CODE(0x2401, "CDB decryption error") +SENSE_CODE(0x2402, "Obsolete") +SENSE_CODE(0x2403, "Obsolete") +SENSE_CODE(0x2404, "Security audit value frozen") +SENSE_CODE(0x2405, "Security working key frozen") +SENSE_CODE(0x2406, "Nonce not unique") +SENSE_CODE(0x2407, "Nonce timestamp out of range") +SENSE_CODE(0x2408, "Invalid XCDB") + +SENSE_CODE(0x2500, "Logical unit not supported") + +SENSE_CODE(0x2600, "Invalid field in parameter list") +SENSE_CODE(0x2601, "Parameter not supported") +SENSE_CODE(0x2602, "Parameter value invalid") +SENSE_CODE(0x2603, "Threshold parameters not supported") +SENSE_CODE(0x2604, "Invalid release of persistent reservation") +SENSE_CODE(0x2605, "Data decryption error") +SENSE_CODE(0x2606, "Too many target descriptors") +SENSE_CODE(0x2607, "Unsupported target descriptor type code") +SENSE_CODE(0x2608, "Too many segment descriptors") +SENSE_CODE(0x2609, "Unsupported segment descriptor type code") +SENSE_CODE(0x260A, "Unexpected inexact segment") +SENSE_CODE(0x260B, "Inline data length exceeded") +SENSE_CODE(0x260C, "Invalid operation for copy source or destination") +SENSE_CODE(0x260D, "Copy segment granularity violation") +SENSE_CODE(0x260E, "Invalid parameter while port is enabled") +SENSE_CODE(0x260F, "Invalid data-out buffer integrity check value") +SENSE_CODE(0x2610, "Data decryption key fail limit reached") +SENSE_CODE(0x2611, "Incomplete key-associated data set") +SENSE_CODE(0x2612, "Vendor specific key reference not found") + +SENSE_CODE(0x2700, "Write protected") +SENSE_CODE(0x2701, "Hardware write protected") +SENSE_CODE(0x2702, "Logical unit software write protected") +SENSE_CODE(0x2703, "Associated write protect") +SENSE_CODE(0x2704, "Persistent write protect") +SENSE_CODE(0x2705, "Permanent write protect") +SENSE_CODE(0x2706, "Conditional write protect") +SENSE_CODE(0x2707, "Space allocation failed write protect") +SENSE_CODE(0x2708, "Zone is read only") + +SENSE_CODE(0x2800, "Not ready to ready change, medium may have changed") +SENSE_CODE(0x2801, "Import or export element accessed") +SENSE_CODE(0x2802, "Format-layer may have changed") +SENSE_CODE(0x2803, "Import/export element accessed, medium changed") + +SENSE_CODE(0x2900, "Power on, reset, or bus device reset occurred") +SENSE_CODE(0x2901, "Power on occurred") +SENSE_CODE(0x2902, "Scsi bus reset occurred") +SENSE_CODE(0x2903, "Bus device reset function occurred") +SENSE_CODE(0x2904, "Device internal reset") +SENSE_CODE(0x2905, "Transceiver mode changed to single-ended") +SENSE_CODE(0x2906, "Transceiver mode changed to lvd") +SENSE_CODE(0x2907, "I_T nexus loss occurred") + +SENSE_CODE(0x2A00, "Parameters changed") +SENSE_CODE(0x2A01, "Mode parameters changed") +SENSE_CODE(0x2A02, "Log parameters changed") +SENSE_CODE(0x2A03, "Reservations preempted") +SENSE_CODE(0x2A04, "Reservations released") +SENSE_CODE(0x2A05, "Registrations preempted") +SENSE_CODE(0x2A06, "Asymmetric access state changed") +SENSE_CODE(0x2A07, "Implicit asymmetric access state transition failed") +SENSE_CODE(0x2A08, "Priority changed") +SENSE_CODE(0x2A09, "Capacity data has changed") +SENSE_CODE(0x2A0A, "Error history I_T nexus cleared") +SENSE_CODE(0x2A0B, "Error history snapshot released") +SENSE_CODE(0x2A0C, "Error recovery attributes have changed") +SENSE_CODE(0x2A0D, "Data encryption capabilities changed") +SENSE_CODE(0x2A10, "Timestamp changed") +SENSE_CODE(0x2A11, "Data encryption parameters changed by another i_t nexus") +SENSE_CODE(0x2A12, "Data encryption parameters changed by vendor specific event") +SENSE_CODE(0x2A13, "Data encryption key instance counter has changed") +SENSE_CODE(0x2A14, "SA creation capabilities data has changed") +SENSE_CODE(0x2A15, "Medium removal prevention preempted") + +SENSE_CODE(0x2B00, "Copy cannot execute since host cannot disconnect") + +SENSE_CODE(0x2C00, "Command sequence error") +SENSE_CODE(0x2C01, "Too many windows specified") +SENSE_CODE(0x2C02, "Invalid combination of windows specified") +SENSE_CODE(0x2C03, "Current program area is not empty") +SENSE_CODE(0x2C04, "Current program area is empty") +SENSE_CODE(0x2C05, "Illegal power condition request") +SENSE_CODE(0x2C06, "Persistent prevent conflict") +SENSE_CODE(0x2C07, "Previous busy status") +SENSE_CODE(0x2C08, "Previous task set full status") +SENSE_CODE(0x2C09, "Previous reservation conflict status") +SENSE_CODE(0x2C0A, "Partition or collection contains user objects") +SENSE_CODE(0x2C0B, "Not reserved") +SENSE_CODE(0x2C0C, "Orwrite generation does not match") +SENSE_CODE(0x2C0D, "Reset write pointer not allowed") +SENSE_CODE(0x2C0E, "Zone is offline") + +SENSE_CODE(0x2D00, "Overwrite error on update in place") + +SENSE_CODE(0x2E00, "Insufficient time for operation") +SENSE_CODE(0x2E01, "Command timeout before processing") +SENSE_CODE(0x2E02, "Command timeout during processing") +SENSE_CODE(0x2E03, "Command timeout during processing due to error recovery") + +SENSE_CODE(0x2F00, "Commands cleared by another initiator") +SENSE_CODE(0x2F01, "Commands cleared by power loss notification") +SENSE_CODE(0x2F02, "Commands cleared by device server") +SENSE_CODE(0x2F03, "Some commands cleared by queuing layer event") + +SENSE_CODE(0x3000, "Incompatible medium installed") +SENSE_CODE(0x3001, "Cannot read medium - unknown format") +SENSE_CODE(0x3002, "Cannot read medium - incompatible format") +SENSE_CODE(0x3003, "Cleaning cartridge installed") +SENSE_CODE(0x3004, "Cannot write medium - unknown format") +SENSE_CODE(0x3005, "Cannot write medium - incompatible format") +SENSE_CODE(0x3006, "Cannot format medium - incompatible medium") +SENSE_CODE(0x3007, "Cleaning failure") +SENSE_CODE(0x3008, "Cannot write - application code mismatch") +SENSE_CODE(0x3009, "Current session not fixated for append") +SENSE_CODE(0x300A, "Cleaning request rejected") +SENSE_CODE(0x300C, "WORM medium - overwrite attempted") +SENSE_CODE(0x300D, "WORM medium - integrity check") +SENSE_CODE(0x3010, "Medium not formatted") +SENSE_CODE(0x3011, "Incompatible volume type") +SENSE_CODE(0x3012, "Incompatible volume qualifier") +SENSE_CODE(0x3013, "Cleaning volume expired") + +SENSE_CODE(0x3100, "Medium format corrupted") +SENSE_CODE(0x3101, "Format command failed") +SENSE_CODE(0x3102, "Zoned formatting failed due to spare linking") +SENSE_CODE(0x3103, "Sanitize command failed") + +SENSE_CODE(0x3200, "No defect spare location available") +SENSE_CODE(0x3201, "Defect list update failure") + +SENSE_CODE(0x3300, "Tape length error") + +SENSE_CODE(0x3400, "Enclosure failure") + +SENSE_CODE(0x3500, "Enclosure services failure") +SENSE_CODE(0x3501, "Unsupported enclosure function") +SENSE_CODE(0x3502, "Enclosure services unavailable") +SENSE_CODE(0x3503, "Enclosure services transfer failure") +SENSE_CODE(0x3504, "Enclosure services transfer refused") +SENSE_CODE(0x3505, "Enclosure services checksum error") + +SENSE_CODE(0x3600, "Ribbon, ink, or toner failure") + +SENSE_CODE(0x3700, "Rounded parameter") + +SENSE_CODE(0x3800, "Event status notification") +SENSE_CODE(0x3802, "Esn - power management class event") +SENSE_CODE(0x3804, "Esn - media class event") +SENSE_CODE(0x3806, "Esn - device busy class event") +SENSE_CODE(0x3807, "Thin Provisioning soft threshold reached") + +SENSE_CODE(0x3900, "Saving parameters not supported") + +SENSE_CODE(0x3A00, "Medium not present") +SENSE_CODE(0x3A01, "Medium not present - tray closed") +SENSE_CODE(0x3A02, "Medium not present - tray open") +SENSE_CODE(0x3A03, "Medium not present - loadable") +SENSE_CODE(0x3A04, "Medium not present - medium auxiliary memory accessible") + +SENSE_CODE(0x3B00, "Sequential positioning error") +SENSE_CODE(0x3B01, "Tape position error at beginning-of-medium") +SENSE_CODE(0x3B02, "Tape position error at end-of-medium") +SENSE_CODE(0x3B03, "Tape or electronic vertical forms unit not ready") +SENSE_CODE(0x3B04, "Slew failure") +SENSE_CODE(0x3B05, "Paper jam") +SENSE_CODE(0x3B06, "Failed to sense top-of-form") +SENSE_CODE(0x3B07, "Failed to sense bottom-of-form") +SENSE_CODE(0x3B08, "Reposition error") +SENSE_CODE(0x3B09, "Read past end of medium") +SENSE_CODE(0x3B0A, "Read past beginning of medium") +SENSE_CODE(0x3B0B, "Position past end of medium") +SENSE_CODE(0x3B0C, "Position past beginning of medium") +SENSE_CODE(0x3B0D, "Medium destination element full") +SENSE_CODE(0x3B0E, "Medium source element empty") +SENSE_CODE(0x3B0F, "End of medium reached") +SENSE_CODE(0x3B11, "Medium magazine not accessible") +SENSE_CODE(0x3B12, "Medium magazine removed") +SENSE_CODE(0x3B13, "Medium magazine inserted") +SENSE_CODE(0x3B14, "Medium magazine locked") +SENSE_CODE(0x3B15, "Medium magazine unlocked") +SENSE_CODE(0x3B16, "Mechanical positioning or changer error") +SENSE_CODE(0x3B17, "Read past end of user object") +SENSE_CODE(0x3B18, "Element disabled") +SENSE_CODE(0x3B19, "Element enabled") +SENSE_CODE(0x3B1A, "Data transfer device removed") +SENSE_CODE(0x3B1B, "Data transfer device inserted") +SENSE_CODE(0x3B1C, "Too many logical objects on partition to support operation") + +SENSE_CODE(0x3D00, "Invalid bits in identify message") + +SENSE_CODE(0x3E00, "Logical unit has not self-configured yet") +SENSE_CODE(0x3E01, "Logical unit failure") +SENSE_CODE(0x3E02, "Timeout on logical unit") +SENSE_CODE(0x3E03, "Logical unit failed self-test") +SENSE_CODE(0x3E04, "Logical unit unable to update self-test log") + +SENSE_CODE(0x3F00, "Target operating conditions have changed") +SENSE_CODE(0x3F01, "Microcode has been changed") +SENSE_CODE(0x3F02, "Changed operating definition") +SENSE_CODE(0x3F03, "Inquiry data has changed") +SENSE_CODE(0x3F04, "Component device attached") +SENSE_CODE(0x3F05, "Device identifier changed") +SENSE_CODE(0x3F06, "Redundancy group created or modified") +SENSE_CODE(0x3F07, "Redundancy group deleted") +SENSE_CODE(0x3F08, "Spare created or modified") +SENSE_CODE(0x3F09, "Spare deleted") +SENSE_CODE(0x3F0A, "Volume set created or modified") +SENSE_CODE(0x3F0B, "Volume set deleted") +SENSE_CODE(0x3F0C, "Volume set deassigned") +SENSE_CODE(0x3F0D, "Volume set reassigned") +SENSE_CODE(0x3F0E, "Reported luns data has changed") +SENSE_CODE(0x3F0F, "Echo buffer overwritten") +SENSE_CODE(0x3F10, "Medium loadable") +SENSE_CODE(0x3F11, "Medium auxiliary memory accessible") +SENSE_CODE(0x3F12, "iSCSI IP address added") +SENSE_CODE(0x3F13, "iSCSI IP address removed") +SENSE_CODE(0x3F14, "iSCSI IP address changed") +SENSE_CODE(0x3F15, "Inspect referrals sense descriptors") +SENSE_CODE(0x3F16, "Microcode has been changed without reset") +/* + * SENSE_CODE(0x40NN, "Ram failure") + * SENSE_CODE(0x40NN, "Diagnostic failure on component nn") + * SENSE_CODE(0x41NN, "Data path failure") + * SENSE_CODE(0x42NN, "Power-on or self-test failure") + */ +SENSE_CODE(0x4300, "Message error") + +SENSE_CODE(0x4400, "Internal target failure") +SENSE_CODE(0x4401, "Persistent reservation information lost") +SENSE_CODE(0x4471, "ATA device failed set features") + +SENSE_CODE(0x4500, "Select or reselect failure") + +SENSE_CODE(0x4600, "Unsuccessful soft reset") + +SENSE_CODE(0x4700, "Scsi parity error") +SENSE_CODE(0x4701, "Data phase CRC error detected") +SENSE_CODE(0x4702, "Scsi parity error detected during st data phase") +SENSE_CODE(0x4703, "Information unit iuCRC error detected") +SENSE_CODE(0x4704, "Asynchronous information protection error detected") +SENSE_CODE(0x4705, "Protocol service CRC error") +SENSE_CODE(0x4706, "Phy test function in progress") +SENSE_CODE(0x477f, "Some commands cleared by iSCSI Protocol event") + +SENSE_CODE(0x4800, "Initiator detected error message received") + +SENSE_CODE(0x4900, "Invalid message error") + +SENSE_CODE(0x4A00, "Command phase error") + +SENSE_CODE(0x4B00, "Data phase error") +SENSE_CODE(0x4B01, "Invalid target port transfer tag received") +SENSE_CODE(0x4B02, "Too much write data") +SENSE_CODE(0x4B03, "Ack/nak timeout") +SENSE_CODE(0x4B04, "Nak received") +SENSE_CODE(0x4B05, "Data offset error") +SENSE_CODE(0x4B06, "Initiator response timeout") +SENSE_CODE(0x4B07, "Connection lost") +SENSE_CODE(0x4B08, "Data-in buffer overflow - data buffer size") +SENSE_CODE(0x4B09, "Data-in buffer overflow - data buffer descriptor area") +SENSE_CODE(0x4B0A, "Data-in buffer error") +SENSE_CODE(0x4B0B, "Data-out buffer overflow - data buffer size") +SENSE_CODE(0x4B0C, "Data-out buffer overflow - data buffer descriptor area") +SENSE_CODE(0x4B0D, "Data-out buffer error") +SENSE_CODE(0x4B0E, "PCIe fabric error") +SENSE_CODE(0x4B0F, "PCIe completion timeout") +SENSE_CODE(0x4B10, "PCIe completer abort") +SENSE_CODE(0x4B11, "PCIe poisoned tlp received") +SENSE_CODE(0x4B12, "PCIe eCRC check failed") +SENSE_CODE(0x4B13, "PCIe unsupported request") +SENSE_CODE(0x4B14, "PCIe acs violation") +SENSE_CODE(0x4B15, "PCIe tlp prefix blocked") + +SENSE_CODE(0x4C00, "Logical unit failed self-configuration") +/* + * SENSE_CODE(0x4DNN, "Tagged overlapped commands (nn = queue tag)") + */ +SENSE_CODE(0x4E00, "Overlapped commands attempted") + +SENSE_CODE(0x5000, "Write append error") +SENSE_CODE(0x5001, "Write append position error") +SENSE_CODE(0x5002, "Position error related to timing") + +SENSE_CODE(0x5100, "Erase failure") +SENSE_CODE(0x5101, "Erase failure - incomplete erase operation detected") + +SENSE_CODE(0x5200, "Cartridge fault") + +SENSE_CODE(0x5300, "Media load or eject failed") +SENSE_CODE(0x5301, "Unload tape failure") +SENSE_CODE(0x5302, "Medium removal prevented") +SENSE_CODE(0x5303, "Medium removal prevented by data transfer element") +SENSE_CODE(0x5304, "Medium thread or unthread failure") +SENSE_CODE(0x5305, "Volume identifier invalid") +SENSE_CODE(0x5306, "Volume identifier missing") +SENSE_CODE(0x5307, "Duplicate volume identifier") +SENSE_CODE(0x5308, "Element status unknown") +SENSE_CODE(0x5309, "Data transfer device error - load failed") +SENSE_CODE(0x530a, "Data transfer device error - unload failed") +SENSE_CODE(0x530b, "Data transfer device error - unload missing") +SENSE_CODE(0x530c, "Data transfer device error - eject failed") +SENSE_CODE(0x530d, "Data transfer device error - library communication failed") + +SENSE_CODE(0x5400, "Scsi to host system interface failure") + +SENSE_CODE(0x5500, "System resource failure") +SENSE_CODE(0x5501, "System buffer full") +SENSE_CODE(0x5502, "Insufficient reservation resources") +SENSE_CODE(0x5503, "Insufficient resources") +SENSE_CODE(0x5504, "Insufficient registration resources") +SENSE_CODE(0x5505, "Insufficient access control resources") +SENSE_CODE(0x5506, "Auxiliary memory out of space") +SENSE_CODE(0x5507, "Quota error") +SENSE_CODE(0x5508, "Maximum number of supplemental decryption keys exceeded") +SENSE_CODE(0x5509, "Medium auxiliary memory not accessible") +SENSE_CODE(0x550A, "Data currently unavailable") +SENSE_CODE(0x550B, "Insufficient power for operation") +SENSE_CODE(0x550C, "Insufficient resources to create rod") +SENSE_CODE(0x550D, "Insufficient resources to create rod token") +SENSE_CODE(0x550E, "Insufficient zone resources") + +SENSE_CODE(0x5700, "Unable to recover table-of-contents") + +SENSE_CODE(0x5800, "Generation does not exist") + +SENSE_CODE(0x5900, "Updated block read") + +SENSE_CODE(0x5A00, "Operator request or state change input") +SENSE_CODE(0x5A01, "Operator medium removal request") +SENSE_CODE(0x5A02, "Operator selected write protect") +SENSE_CODE(0x5A03, "Operator selected write permit") + +SENSE_CODE(0x5B00, "Log exception") +SENSE_CODE(0x5B01, "Threshold condition met") +SENSE_CODE(0x5B02, "Log counter at maximum") +SENSE_CODE(0x5B03, "Log list codes exhausted") + +SENSE_CODE(0x5C00, "Rpl status change") +SENSE_CODE(0x5C01, "Spindles synchronized") +SENSE_CODE(0x5C02, "Spindles not synchronized") + +SENSE_CODE(0x5D00, "Failure prediction threshold exceeded") +SENSE_CODE(0x5D01, "Media failure prediction threshold exceeded") +SENSE_CODE(0x5D02, "Logical unit failure prediction threshold exceeded") +SENSE_CODE(0x5D03, "Spare area exhaustion prediction threshold exceeded") +SENSE_CODE(0x5D10, "Hardware impending failure general hard drive failure") +SENSE_CODE(0x5D11, "Hardware impending failure drive error rate too high") +SENSE_CODE(0x5D12, "Hardware impending failure data error rate too high") +SENSE_CODE(0x5D13, "Hardware impending failure seek error rate too high") +SENSE_CODE(0x5D14, "Hardware impending failure too many block reassigns") +SENSE_CODE(0x5D15, "Hardware impending failure access times too high") +SENSE_CODE(0x5D16, "Hardware impending failure start unit times too high") +SENSE_CODE(0x5D17, "Hardware impending failure channel parametrics") +SENSE_CODE(0x5D18, "Hardware impending failure controller detected") +SENSE_CODE(0x5D19, "Hardware impending failure throughput performance") +SENSE_CODE(0x5D1A, "Hardware impending failure seek time performance") +SENSE_CODE(0x5D1B, "Hardware impending failure spin-up retry count") +SENSE_CODE(0x5D1C, "Hardware impending failure drive calibration retry count") +SENSE_CODE(0x5D20, "Controller impending failure general hard drive failure") +SENSE_CODE(0x5D21, "Controller impending failure drive error rate too high") +SENSE_CODE(0x5D22, "Controller impending failure data error rate too high") +SENSE_CODE(0x5D23, "Controller impending failure seek error rate too high") +SENSE_CODE(0x5D24, "Controller impending failure too many block reassigns") +SENSE_CODE(0x5D25, "Controller impending failure access times too high") +SENSE_CODE(0x5D26, "Controller impending failure start unit times too high") +SENSE_CODE(0x5D27, "Controller impending failure channel parametrics") +SENSE_CODE(0x5D28, "Controller impending failure controller detected") +SENSE_CODE(0x5D29, "Controller impending failure throughput performance") +SENSE_CODE(0x5D2A, "Controller impending failure seek time performance") +SENSE_CODE(0x5D2B, "Controller impending failure spin-up retry count") +SENSE_CODE(0x5D2C, "Controller impending failure drive calibration retry count") +SENSE_CODE(0x5D30, "Data channel impending failure general hard drive failure") +SENSE_CODE(0x5D31, "Data channel impending failure drive error rate too high") +SENSE_CODE(0x5D32, "Data channel impending failure data error rate too high") +SENSE_CODE(0x5D33, "Data channel impending failure seek error rate too high") +SENSE_CODE(0x5D34, "Data channel impending failure too many block reassigns") +SENSE_CODE(0x5D35, "Data channel impending failure access times too high") +SENSE_CODE(0x5D36, "Data channel impending failure start unit times too high") +SENSE_CODE(0x5D37, "Data channel impending failure channel parametrics") +SENSE_CODE(0x5D38, "Data channel impending failure controller detected") +SENSE_CODE(0x5D39, "Data channel impending failure throughput performance") +SENSE_CODE(0x5D3A, "Data channel impending failure seek time performance") +SENSE_CODE(0x5D3B, "Data channel impending failure spin-up retry count") +SENSE_CODE(0x5D3C, "Data channel impending failure drive calibration retry count") +SENSE_CODE(0x5D40, "Servo impending failure general hard drive failure") +SENSE_CODE(0x5D41, "Servo impending failure drive error rate too high") +SENSE_CODE(0x5D42, "Servo impending failure data error rate too high") +SENSE_CODE(0x5D43, "Servo impending failure seek error rate too high") +SENSE_CODE(0x5D44, "Servo impending failure too many block reassigns") +SENSE_CODE(0x5D45, "Servo impending failure access times too high") +SENSE_CODE(0x5D46, "Servo impending failure start unit times too high") +SENSE_CODE(0x5D47, "Servo impending failure channel parametrics") +SENSE_CODE(0x5D48, "Servo impending failure controller detected") +SENSE_CODE(0x5D49, "Servo impending failure throughput performance") +SENSE_CODE(0x5D4A, "Servo impending failure seek time performance") +SENSE_CODE(0x5D4B, "Servo impending failure spin-up retry count") +SENSE_CODE(0x5D4C, "Servo impending failure drive calibration retry count") +SENSE_CODE(0x5D50, "Spindle impending failure general hard drive failure") +SENSE_CODE(0x5D51, "Spindle impending failure drive error rate too high") +SENSE_CODE(0x5D52, "Spindle impending failure data error rate too high") +SENSE_CODE(0x5D53, "Spindle impending failure seek error rate too high") +SENSE_CODE(0x5D54, "Spindle impending failure too many block reassigns") +SENSE_CODE(0x5D55, "Spindle impending failure access times too high") +SENSE_CODE(0x5D56, "Spindle impending failure start unit times too high") +SENSE_CODE(0x5D57, "Spindle impending failure channel parametrics") +SENSE_CODE(0x5D58, "Spindle impending failure controller detected") +SENSE_CODE(0x5D59, "Spindle impending failure throughput performance") +SENSE_CODE(0x5D5A, "Spindle impending failure seek time performance") +SENSE_CODE(0x5D5B, "Spindle impending failure spin-up retry count") +SENSE_CODE(0x5D5C, "Spindle impending failure drive calibration retry count") +SENSE_CODE(0x5D60, "Firmware impending failure general hard drive failure") +SENSE_CODE(0x5D61, "Firmware impending failure drive error rate too high") +SENSE_CODE(0x5D62, "Firmware impending failure data error rate too high") +SENSE_CODE(0x5D63, "Firmware impending failure seek error rate too high") +SENSE_CODE(0x5D64, "Firmware impending failure too many block reassigns") +SENSE_CODE(0x5D65, "Firmware impending failure access times too high") +SENSE_CODE(0x5D66, "Firmware impending failure start unit times too high") +SENSE_CODE(0x5D67, "Firmware impending failure channel parametrics") +SENSE_CODE(0x5D68, "Firmware impending failure controller detected") +SENSE_CODE(0x5D69, "Firmware impending failure throughput performance") +SENSE_CODE(0x5D6A, "Firmware impending failure seek time performance") +SENSE_CODE(0x5D6B, "Firmware impending failure spin-up retry count") +SENSE_CODE(0x5D6C, "Firmware impending failure drive calibration retry count") +SENSE_CODE(0x5DFF, "Failure prediction threshold exceeded (false)") + +SENSE_CODE(0x5E00, "Low power condition on") +SENSE_CODE(0x5E01, "Idle condition activated by timer") +SENSE_CODE(0x5E02, "Standby condition activated by timer") +SENSE_CODE(0x5E03, "Idle condition activated by command") +SENSE_CODE(0x5E04, "Standby condition activated by command") +SENSE_CODE(0x5E05, "Idle_b condition activated by timer") +SENSE_CODE(0x5E06, "Idle_b condition activated by command") +SENSE_CODE(0x5E07, "Idle_c condition activated by timer") +SENSE_CODE(0x5E08, "Idle_c condition activated by command") +SENSE_CODE(0x5E09, "Standby_y condition activated by timer") +SENSE_CODE(0x5E0A, "Standby_y condition activated by command") +SENSE_CODE(0x5E41, "Power state change to active") +SENSE_CODE(0x5E42, "Power state change to idle") +SENSE_CODE(0x5E43, "Power state change to standby") +SENSE_CODE(0x5E45, "Power state change to sleep") +SENSE_CODE(0x5E47, "Power state change to device control") + +SENSE_CODE(0x6000, "Lamp failure") + +SENSE_CODE(0x6100, "Video acquisition error") +SENSE_CODE(0x6101, "Unable to acquire video") +SENSE_CODE(0x6102, "Out of focus") + +SENSE_CODE(0x6200, "Scan head positioning error") + +SENSE_CODE(0x6300, "End of user area encountered on this track") +SENSE_CODE(0x6301, "Packet does not fit in available space") + +SENSE_CODE(0x6400, "Illegal mode for this track") +SENSE_CODE(0x6401, "Invalid packet size") + +SENSE_CODE(0x6500, "Voltage fault") + +SENSE_CODE(0x6600, "Automatic document feeder cover up") +SENSE_CODE(0x6601, "Automatic document feeder lift up") +SENSE_CODE(0x6602, "Document jam in automatic document feeder") +SENSE_CODE(0x6603, "Document miss feed automatic in document feeder") + +SENSE_CODE(0x6700, "Configuration failure") +SENSE_CODE(0x6701, "Configuration of incapable logical units failed") +SENSE_CODE(0x6702, "Add logical unit failed") +SENSE_CODE(0x6703, "Modification of logical unit failed") +SENSE_CODE(0x6704, "Exchange of logical unit failed") +SENSE_CODE(0x6705, "Remove of logical unit failed") +SENSE_CODE(0x6706, "Attachment of logical unit failed") +SENSE_CODE(0x6707, "Creation of logical unit failed") +SENSE_CODE(0x6708, "Assign failure occurred") +SENSE_CODE(0x6709, "Multiply assigned logical unit") +SENSE_CODE(0x670A, "Set target port groups command failed") +SENSE_CODE(0x670B, "ATA device feature not enabled") + +SENSE_CODE(0x6800, "Logical unit not configured") +SENSE_CODE(0x6801, "Subsidiary logical unit not configured") + +SENSE_CODE(0x6900, "Data loss on logical unit") +SENSE_CODE(0x6901, "Multiple logical unit failures") +SENSE_CODE(0x6902, "Parity/data mismatch") + +SENSE_CODE(0x6A00, "Informational, refer to log") + +SENSE_CODE(0x6B00, "State change has occurred") +SENSE_CODE(0x6B01, "Redundancy level got better") +SENSE_CODE(0x6B02, "Redundancy level got worse") + +SENSE_CODE(0x6C00, "Rebuild failure occurred") + +SENSE_CODE(0x6D00, "Recalculate failure occurred") + +SENSE_CODE(0x6E00, "Command to logical unit failed") + +SENSE_CODE(0x6F00, "Copy protection key exchange failure - authentication failure") +SENSE_CODE(0x6F01, "Copy protection key exchange failure - key not present") +SENSE_CODE(0x6F02, "Copy protection key exchange failure - key not established") +SENSE_CODE(0x6F03, "Read of scrambled sector without authentication") +SENSE_CODE(0x6F04, "Media region code is mismatched to logical unit region") +SENSE_CODE(0x6F05, "Drive region must be permanent/region reset count error") +SENSE_CODE(0x6F06, "Insufficient block count for binding nonce recording") +SENSE_CODE(0x6F07, "Conflict in binding nonce recording") +/* + * SENSE_CODE(0x70NN, "Decompression exception short algorithm id of nn") + */ +SENSE_CODE(0x7100, "Decompression exception long algorithm id") + +SENSE_CODE(0x7200, "Session fixation error") +SENSE_CODE(0x7201, "Session fixation error writing lead-in") +SENSE_CODE(0x7202, "Session fixation error writing lead-out") +SENSE_CODE(0x7203, "Session fixation error - incomplete track in session") +SENSE_CODE(0x7204, "Empty or partially written reserved track") +SENSE_CODE(0x7205, "No more track reservations allowed") +SENSE_CODE(0x7206, "RMZ extension is not allowed") +SENSE_CODE(0x7207, "No more test zone extensions are allowed") + +SENSE_CODE(0x7300, "Cd control error") +SENSE_CODE(0x7301, "Power calibration area almost full") +SENSE_CODE(0x7302, "Power calibration area is full") +SENSE_CODE(0x7303, "Power calibration area error") +SENSE_CODE(0x7304, "Program memory area update failure") +SENSE_CODE(0x7305, "Program memory area is full") +SENSE_CODE(0x7306, "RMA/PMA is almost full") +SENSE_CODE(0x7310, "Current power calibration area almost full") +SENSE_CODE(0x7311, "Current power calibration area is full") +SENSE_CODE(0x7317, "RDZ is full") + +SENSE_CODE(0x7400, "Security error") +SENSE_CODE(0x7401, "Unable to decrypt data") +SENSE_CODE(0x7402, "Unencrypted data encountered while decrypting") +SENSE_CODE(0x7403, "Incorrect data encryption key") +SENSE_CODE(0x7404, "Cryptographic integrity validation failed") +SENSE_CODE(0x7405, "Error decrypting data") +SENSE_CODE(0x7406, "Unknown signature verification key") +SENSE_CODE(0x7407, "Encryption parameters not useable") +SENSE_CODE(0x7408, "Digital signature validation failure") +SENSE_CODE(0x7409, "Encryption mode mismatch on read") +SENSE_CODE(0x740A, "Encrypted block not raw read enabled") +SENSE_CODE(0x740B, "Incorrect Encryption parameters") +SENSE_CODE(0x740C, "Unable to decrypt parameter list") +SENSE_CODE(0x740D, "Encryption algorithm disabled") +SENSE_CODE(0x7410, "SA creation parameter value invalid") +SENSE_CODE(0x7411, "SA creation parameter value rejected") +SENSE_CODE(0x7412, "Invalid SA usage") +SENSE_CODE(0x7421, "Data Encryption configuration prevented") +SENSE_CODE(0x7430, "SA creation parameter not supported") +SENSE_CODE(0x7440, "Authentication failed") +SENSE_CODE(0x7461, "External data encryption key manager access error") +SENSE_CODE(0x7462, "External data encryption key manager error") +SENSE_CODE(0x7463, "External data encryption key not found") +SENSE_CODE(0x7464, "External data encryption request not authorized") +SENSE_CODE(0x746E, "External data encryption control timeout") +SENSE_CODE(0x746F, "External data encryption control error") +SENSE_CODE(0x7471, "Logical unit access not authorized") +SENSE_CODE(0x7479, "Security conflict in translated device") diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h index d7f5ba6ba84c..8ed778d4dbb9 100644 --- a/drivers/scsi/snic/snic.h +++ b/drivers/scsi/snic/snic.h @@ -95,6 +95,8 @@ #define SNIC_DEV_RST_NOTSUP BIT(25) #define SNIC_SCSI_CLEANUP BIT(26) #define SNIC_HOST_RESET_ISSUED BIT(27) +#define SNIC_HOST_RESET_CMD_TERM \ + (SNIC_DEV_RST_NOTSUP | SNIC_SCSI_CLEANUP | SNIC_HOST_RESET_ISSUED) #define SNIC_ABTS_TIMEOUT 30000 /* msec */ #define SNIC_LUN_RESET_TIMEOUT 30000 /* msec */ @@ -216,9 +218,10 @@ enum snic_msix_intr_index { SNIC_MSIX_INTR_MAX, }; +#define SNIC_INTRHDLR_NAMSZ (2 * IFNAMSIZ) struct snic_msix_entry { int requested; - char devname[IFNAMSIZ]; + char devname[SNIC_INTRHDLR_NAMSZ]; irqreturn_t (*isr)(int, void *); void *devid; }; diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c index ab0e06b0b4ff..449b03f3bbd3 100644 --- a/drivers/scsi/snic/snic_ctl.c +++ b/drivers/scsi/snic/snic_ctl.c @@ -39,17 +39,15 @@ snic_handle_link(struct work_struct *work) { struct snic *snic = container_of(work, struct snic, link_work); - if (snic->config.xpt_type != SNIC_DAS) { - SNIC_HOST_INFO(snic->shost, "Link Event Received.\n"); - SNIC_ASSERT_NOT_IMPL(1); - + if (snic->config.xpt_type == SNIC_DAS) return; - } snic->link_status = svnic_dev_link_status(snic->vdev); snic->link_down_cnt = svnic_dev_link_down_cnt(snic->vdev); SNIC_HOST_INFO(snic->shost, "Link Event: Link %s.\n", ((snic->link_status) ? "Up" : "Down")); + + SNIC_ASSERT_NOT_IMPL(1); } diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c index 1686f0196251..d30280326bde 100644 --- a/drivers/scsi/snic/snic_debugfs.c +++ b/drivers/scsi/snic/snic_debugfs.c @@ -264,12 +264,14 @@ snic_stats_show(struct seq_file *sfp, void *data) "Aborts Fail : %lld\n" "Aborts Driver Timeout : %lld\n" "Abort FW Timeout : %lld\n" - "Abort IO NOT Found : %lld\n", + "Abort IO NOT Found : %lld\n" + "Abort Queuing Failed : %lld\n", (u64) atomic64_read(&stats->abts.num), (u64) atomic64_read(&stats->abts.fail), (u64) atomic64_read(&stats->abts.drv_tmo), (u64) atomic64_read(&stats->abts.fw_tmo), - (u64) atomic64_read(&stats->abts.io_not_found)); + (u64) atomic64_read(&stats->abts.io_not_found), + (u64) atomic64_read(&stats->abts.q_fail)); /* Dump Reset Stats */ seq_printf(sfp, @@ -316,7 +318,9 @@ snic_stats_show(struct seq_file *sfp, void *data) seq_printf(sfp, "Last ISR Time : %llu (%8lu.%8lu)\n" "Last Ack Time : %llu (%8lu.%8lu)\n" - "ISRs : %llu\n" + "Ack ISRs : %llu\n" + "IO Cmpl ISRs : %llu\n" + "Err Notify ISRs : %llu\n" "Max CQ Entries : %lld\n" "Data Count Mismatch : %lld\n" "IOs w/ Timeout Status : %lld\n" @@ -324,12 +328,17 @@ snic_stats_show(struct seq_file *sfp, void *data) "IOs w/ SGL Invalid Stat : %lld\n" "WQ Desc Alloc Fail : %lld\n" "Queue Full : %lld\n" + "Queue Ramp Up : %lld\n" + "Queue Ramp Down : %lld\n" + "Queue Last Queue Depth : %lld\n" "Target Not Ready : %lld\n", (u64) stats->misc.last_isr_time, last_isr_tms.tv_sec, last_isr_tms.tv_nsec, (u64)stats->misc.last_ack_time, last_ack_tms.tv_sec, last_ack_tms.tv_nsec, - (u64) atomic64_read(&stats->misc.isr_cnt), + (u64) atomic64_read(&stats->misc.ack_isr_cnt), + (u64) atomic64_read(&stats->misc.cmpl_isr_cnt), + (u64) atomic64_read(&stats->misc.errnotify_isr_cnt), (u64) atomic64_read(&stats->misc.max_cq_ents), (u64) atomic64_read(&stats->misc.data_cnt_mismat), (u64) atomic64_read(&stats->misc.io_tmo), @@ -337,6 +346,9 @@ snic_stats_show(struct seq_file *sfp, void *data) (u64) atomic64_read(&stats->misc.sgl_inval), (u64) atomic64_read(&stats->misc.wq_alloc_fail), (u64) atomic64_read(&stats->misc.qfull), + (u64) atomic64_read(&stats->misc.qsz_rampup), + (u64) atomic64_read(&stats->misc.qsz_rampdown), + (u64) atomic64_read(&stats->misc.last_qsz), (u64) atomic64_read(&stats->misc.tgt_not_rdy)); return 0; diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index 5f6321759ad9..b0fefd67cac3 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c @@ -171,7 +171,7 @@ snic_scsi_scan_tgt(struct work_struct *work) tgt->channel, tgt->scsi_tgt_id, SCAN_WILD_CARD, - 1); + SCSI_SCAN_RESCAN); spin_lock_irqsave(shost->host_lock, flags); tgt->flags &= ~SNIC_TGT_SCAN_PENDING; @@ -480,10 +480,21 @@ int snic_disc_start(struct snic *snic) { struct snic_disc *disc = &snic->disc; + unsigned long flags; int ret = 0; SNIC_SCSI_DBG(snic->shost, "Discovery Start.\n"); + spin_lock_irqsave(&snic->snic_lock, flags); + if (snic->in_remove) { + spin_unlock_irqrestore(&snic->snic_lock, flags); + SNIC_ERR("snic driver removal in progress ...\n"); + ret = 0; + + return ret; + } + spin_unlock_irqrestore(&snic->snic_lock, flags); + mutex_lock(&disc->mutex); if (disc->state == SNIC_DISC_PENDING) { disc->req_cnt++; @@ -533,6 +544,8 @@ snic_tgt_del_all(struct snic *snic) struct list_head *cur, *nxt; unsigned long flags; + scsi_flush_work(snic->shost); + mutex_lock(&snic->disc.mutex); spin_lock_irqsave(snic->shost->host_lock, flags); @@ -545,7 +558,7 @@ snic_tgt_del_all(struct snic *snic) tgt = NULL; } spin_unlock_irqrestore(snic->shost->host_lock, flags); - - scsi_flush_work(snic->shost); mutex_unlock(&snic->disc.mutex); + + flush_workqueue(snic_glob->event_q); } /* end of snic_tgt_del_all */ diff --git a/drivers/scsi/snic/snic_fwint.h b/drivers/scsi/snic/snic_fwint.h index 2cfaf2dc915f..c5f9e1917a8e 100644 --- a/drivers/scsi/snic/snic_fwint.h +++ b/drivers/scsi/snic/snic_fwint.h @@ -414,7 +414,7 @@ enum snic_ev_type { /* Payload 88 bytes = 128 - 24 - 16 */ #define SNIC_HOST_REQ_PAYLOAD ((int)(SNIC_HOST_REQ_LEN - \ sizeof(struct snic_io_hdr) - \ - (2 * sizeof(u64)))) + (2 * sizeof(u64)) - sizeof(ulong))) /* * snic_host_req: host -> firmware request @@ -448,6 +448,8 @@ struct snic_host_req { /* hba reset */ struct snic_hba_reset reset; } u; + + ulong req_pa; }; /* end of snic_host_req structure */ diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c index 993db7de4e4b..8e69548395b9 100644 --- a/drivers/scsi/snic/snic_io.c +++ b/drivers/scsi/snic/snic_io.c @@ -48,7 +48,7 @@ snic_wq_cmpl_frame_send(struct vnic_wq *wq, SNIC_TRC(snic->shost->host_no, 0, 0, ((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0, 0); - pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); + buf->os_buf = NULL; } @@ -137,13 +137,36 @@ snic_select_wq(struct snic *snic) return 0; } +static int +snic_wqdesc_avail(struct snic *snic, int q_num, int req_type) +{ + int nr_wqdesc = snic->config.wq_enet_desc_count; + + if (q_num > 0) { + /* + * Multi Queue case, additional care is required. + * Per WQ active requests need to be maintained. + */ + SNIC_HOST_INFO(snic->shost, "desc_avail: Multi Queue case.\n"); + SNIC_BUG_ON(q_num > 0); + + return -1; + } + + nr_wqdesc -= atomic64_read(&snic->s_stats.fw.actv_reqs); + + return ((req_type == SNIC_REQ_HBA_RESET) ? nr_wqdesc : nr_wqdesc - 1); +} + int snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) { dma_addr_t pa = 0; unsigned long flags; struct snic_fw_stats *fwstats = &snic->s_stats.fw; + struct snic_host_req *req = (struct snic_host_req *) os_buf; long act_reqs; + long desc_avail = 0; int q_num = 0; snic_print_desc(__func__, os_buf, len); @@ -156,11 +179,15 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) return -ENOMEM; } + req->req_pa = (ulong)pa; + q_num = snic_select_wq(snic); spin_lock_irqsave(&snic->wq_lock[q_num], flags); - if (!svnic_wq_desc_avail(snic->wq)) { + desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type); + if (desc_avail <= 0) { pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE); + req->req_pa = 0; spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); atomic64_inc(&snic->s_stats.misc.wq_alloc_fail); SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no); @@ -169,10 +196,13 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) } snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1); + /* + * Update stats + * note: when multi queue enabled, fw actv_reqs should be per queue. + */ + act_reqs = atomic64_inc_return(&fwstats->actv_reqs); spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); - /* Update stats */ - act_reqs = atomic64_inc_return(&fwstats->actv_reqs); if (act_reqs > atomic64_read(&fwstats->max_actv_reqs)) atomic64_set(&fwstats->max_actv_reqs, act_reqs); @@ -318,11 +348,31 @@ snic_req_free(struct snic *snic, struct snic_req_info *rqi) "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n", rqi, rqi->req, rqi->abort_req, rqi->dr_req); - if (rqi->abort_req) + if (rqi->abort_req) { + if (rqi->abort_req->req_pa) + pci_unmap_single(snic->pdev, + rqi->abort_req->req_pa, + sizeof(struct snic_host_req), + PCI_DMA_TODEVICE); + mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]); + } + + if (rqi->dr_req) { + if (rqi->dr_req->req_pa) + pci_unmap_single(snic->pdev, + rqi->dr_req->req_pa, + sizeof(struct snic_host_req), + PCI_DMA_TODEVICE); - if (rqi->dr_req) mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]); + } + + if (rqi->req->req_pa) + pci_unmap_single(snic->pdev, + rqi->req->req_pa, + rqi->req_len, + PCI_DMA_TODEVICE); mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]); } diff --git a/drivers/scsi/snic/snic_isr.c b/drivers/scsi/snic/snic_isr.c index a85fae25ec8c..f552003128c6 100644 --- a/drivers/scsi/snic/snic_isr.c +++ b/drivers/scsi/snic/snic_isr.c @@ -38,7 +38,7 @@ snic_isr_msix_wq(int irq, void *data) unsigned long wq_work_done = 0; snic->s_stats.misc.last_isr_time = jiffies; - atomic64_inc(&snic->s_stats.misc.isr_cnt); + atomic64_inc(&snic->s_stats.misc.ack_isr_cnt); wq_work_done = snic_wq_cmpl_handler(snic, -1); svnic_intr_return_credits(&snic->intr[SNIC_MSIX_WQ], @@ -56,7 +56,7 @@ snic_isr_msix_io_cmpl(int irq, void *data) unsigned long iocmpl_work_done = 0; snic->s_stats.misc.last_isr_time = jiffies; - atomic64_inc(&snic->s_stats.misc.isr_cnt); + atomic64_inc(&snic->s_stats.misc.cmpl_isr_cnt); iocmpl_work_done = snic_fwcq_cmpl_handler(snic, -1); svnic_intr_return_credits(&snic->intr[SNIC_MSIX_IO_CMPL], @@ -73,7 +73,7 @@ snic_isr_msix_err_notify(int irq, void *data) struct snic *snic = data; snic->s_stats.misc.last_isr_time = jiffies; - atomic64_inc(&snic->s_stats.misc.isr_cnt); + atomic64_inc(&snic->s_stats.misc.errnotify_isr_cnt); svnic_intr_return_all_credits(&snic->intr[SNIC_MSIX_ERR_NOTIFY]); snic_log_q_error(snic); diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c index 2b3c25371d76..396b32dca074 100644 --- a/drivers/scsi/snic/snic_main.c +++ b/drivers/scsi/snic/snic_main.c @@ -98,11 +98,18 @@ snic_slave_configure(struct scsi_device *sdev) static int snic_change_queue_depth(struct scsi_device *sdev, int qdepth) { + struct snic *snic = shost_priv(sdev->host); int qsz = 0; qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH); + if (qsz < sdev->queue_depth) + atomic64_inc(&snic->s_stats.misc.qsz_rampdown); + else if (qsz > sdev->queue_depth) + atomic64_inc(&snic->s_stats.misc.qsz_rampup); + + atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth); + scsi_change_queue_depth(sdev, qsz); - SNIC_INFO("QDepth Changed to %d\n", sdev->queue_depth); return sdev->queue_depth; } @@ -624,19 +631,6 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_free_tmreq_pool; } - /* - * Initialization done with PCI system, hardware, firmware. - * Add shost to SCSI - */ - ret = snic_add_host(shost, pdev); - if (ret) { - SNIC_HOST_ERR(shost, - "Adding scsi host Failed ... exiting. %d\n", - ret); - - goto err_notify_unset; - } - spin_lock_irqsave(&snic_glob->snic_list_lock, flags); list_add_tail(&snic->list, &snic_glob->snic_list); spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags); @@ -669,8 +663,6 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < snic->intr_count; i++) svnic_intr_unmask(&snic->intr[i]); - snic_set_state(snic, SNIC_ONLINE); - /* Get snic params */ ret = snic_get_conf(snic); if (ret) { @@ -681,6 +673,21 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_get_conf; } + /* + * Initialization done with PCI system, hardware, firmware. + * Add shost to SCSI + */ + ret = snic_add_host(shost, pdev); + if (ret) { + SNIC_HOST_ERR(shost, + "Adding scsi host Failed ... exiting. %d\n", + ret); + + goto err_get_conf; + } + + snic_set_state(snic, SNIC_ONLINE); + ret = snic_disc_start(snic); if (ret) { SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n", @@ -705,6 +712,8 @@ err_req_intr: svnic_dev_disable(snic->vdev); err_vdev_enable: + svnic_dev_notify_unset(snic->vdev); + for (i = 0; i < snic->wq_count; i++) { int rc = 0; @@ -718,9 +727,6 @@ err_vdev_enable: } snic_del_host(snic->shost); -err_notify_unset: - svnic_dev_notify_unset(snic->vdev); - err_free_tmreq_pool: mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]); diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c index 2c7b4c321cbe..abada16b375b 100644 --- a/drivers/scsi/snic/snic_scsi.c +++ b/drivers/scsi/snic/snic_scsi.c @@ -221,11 +221,15 @@ snic_queue_icmnd_req(struct snic *snic, pa, /* sense buffer pa */ SCSI_SENSE_BUFFERSIZE); + atomic64_inc(&snic->s_stats.io.active); ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len); - if (ret) + if (ret) { + atomic64_dec(&snic->s_stats.io.active); SNIC_HOST_ERR(snic->shost, "QIcmnd: Queuing Icmnd Failed. ret = %d\n", ret); + } else + snic_stats_update_active_ios(&snic->s_stats); return ret; } /* end of snic_queue_icmnd_req */ @@ -361,8 +365,7 @@ snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) if (ret) { SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret); ret = SCSI_MLQUEUE_HOST_BUSY; - } else - snic_stats_update_active_ios(&snic->s_stats); + } atomic_dec(&snic->ios_inflight); @@ -598,6 +601,12 @@ snic_icmnd_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) sc->device->lun, sc, sc->cmnd[0], snic_cmd_tag(sc), CMD_FLAGS(sc), rqi); + if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) { + spin_unlock_irqrestore(io_lock, flags); + + return; + } + SNIC_BUG_ON(rqi != (struct snic_req_info *)ctx); WARN_ON_ONCE(req); if (!rqi) { @@ -779,6 +788,11 @@ snic_process_itmf_cmpl(struct snic *snic, io_lock = snic_io_lock_hash(snic, sc); spin_lock_irqsave(io_lock, flags); + if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) { + spin_unlock_irqrestore(io_lock, flags); + + return ret; + } rqi = (struct snic_req_info *) CMD_SP(sc); WARN_ON_ONCE(!rqi); @@ -1001,10 +1015,11 @@ snic_hba_reset_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) unsigned long flags, gflags; int ret = 0; + snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); SNIC_HOST_INFO(snic->shost, - "reset_cmpl:HBA Reset Completion received.\n"); + "reset_cmpl:Tag %d ctx %lx cmpl status %s HBA Reset Completion received.\n", + cmnd_id, ctx, snic_io_status_to_str(hdr_stat)); - snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); SNIC_SCSI_DBG(snic->shost, "reset_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n", typ, hdr_stat, cmnd_id, hid, ctx); @@ -1012,6 +1027,9 @@ snic_hba_reset_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) /* spl case, host reset issued through ioctl */ if (cmnd_id == SCSI_NO_TAG) { rqi = (struct snic_req_info *) ctx; + SNIC_HOST_INFO(snic->shost, + "reset_cmpl:Tag %d ctx %lx cmpl stat %s\n", + cmnd_id, ctx, snic_io_status_to_str(hdr_stat)); sc = rqi->sc; goto ioctl_hba_rst; @@ -1038,6 +1056,10 @@ ioctl_hba_rst: return ret; } + SNIC_HOST_INFO(snic->shost, + "reset_cmpl: sc %p rqi %p Tag %d flags 0x%llx\n", + sc, rqi, cmnd_id, CMD_FLAGS(sc)); + io_lock = snic_io_lock_hash(snic, sc); spin_lock_irqsave(io_lock, flags); @@ -1454,11 +1476,19 @@ snic_abort_finish(struct snic *snic, struct scsi_cmnd *sc) case SNIC_STAT_IO_SUCCESS: case SNIC_STAT_IO_NOT_FOUND: ret = SUCCESS; + /* + * If abort path doesn't call scsi_done(), + * the # IO timeouts == 2, will cause the LUN offline. + * Call scsi_done to complete the IO. + */ + sc->result = (DID_ERROR << 16); + sc->scsi_done(sc); break; default: /* Firmware completed abort with error */ ret = FAILED; + rqi = NULL; break; } @@ -1554,6 +1584,7 @@ snic_send_abort_and_wait(struct snic *snic, struct scsi_cmnd *sc) /* Now Queue the abort command to firmware */ ret = snic_queue_abort_req(snic, rqi, sc, tmf); if (ret) { + atomic64_inc(&snic->s_stats.abts.q_fail); SNIC_HOST_ERR(snic->shost, "send_abt_cmd: IO w/ Tag 0x%x fail w/ err %d flags 0x%llx\n", tag, ret, CMD_FLAGS(sc)); @@ -1830,6 +1861,9 @@ snic_dr_clean_single_req(struct snic *snic, snic_release_req_buf(snic, rqi, sc); + sc->result = (DID_ERROR << 16); + sc->scsi_done(sc); + ret = 0; return ret; @@ -2384,6 +2418,13 @@ snic_cmpl_pending_tmreq(struct snic *snic, struct scsi_cmnd *sc) "Completing Pending TM Req sc %p, state %s flags 0x%llx\n", sc, snic_io_status_to_str(CMD_STATE(sc)), CMD_FLAGS(sc)); + /* + * CASE : FW didn't post itmf completion due to PCIe Errors. + * Marking the abort status as Success to call scsi completion + * in snic_abort_finish() + */ + CMD_ABTS_STATUS(sc) = SNIC_STAT_IO_SUCCESS; + rqi = (struct snic_req_info *) CMD_SP(sc); if (!rqi) return; @@ -2459,8 +2500,9 @@ snic_scsi_cleanup(struct snic *snic, int ex_tag) cleanup: sc->result = DID_TRANSPORT_DISRUPTED << 16; SNIC_HOST_INFO(snic->shost, - "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p. rqi %p duration %llu msecs\n", - sc, rqi, (jiffies - st_time)); + "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p, Tag %d flags 0x%llx rqi %p duration %u msecs\n", + sc, sc->request->tag, CMD_FLAGS(sc), rqi, + jiffies_to_msecs(jiffies - st_time)); /* Update IO stats */ snic_stats_update_io_cmpl(&snic->s_stats); diff --git a/drivers/scsi/snic/snic_stats.h b/drivers/scsi/snic/snic_stats.h index 11e614849a82..fd1066b1cad5 100644 --- a/drivers/scsi/snic/snic_stats.h +++ b/drivers/scsi/snic/snic_stats.h @@ -42,6 +42,7 @@ struct snic_abort_stats { atomic64_t drv_tmo; /* Abort Driver Timeouts */ atomic64_t fw_tmo; /* Abort Firmware Timeouts */ atomic64_t io_not_found;/* Abort IO Not Found */ + atomic64_t q_fail; /* Abort Queuing Failed */ }; struct snic_reset_stats { @@ -69,7 +70,9 @@ struct snic_fw_stats { struct snic_misc_stats { u64 last_isr_time; u64 last_ack_time; - atomic64_t isr_cnt; + atomic64_t ack_isr_cnt; + atomic64_t cmpl_isr_cnt; + atomic64_t errnotify_isr_cnt; atomic64_t max_cq_ents; /* Max CQ Entries */ atomic64_t data_cnt_mismat; /* Data Count Mismatch */ atomic64_t io_tmo; @@ -81,6 +84,9 @@ struct snic_misc_stats { atomic64_t no_icmnd_itmf_cmpls; atomic64_t io_under_run; atomic64_t qfull; + atomic64_t qsz_rampup; + atomic64_t qsz_rampdown; + atomic64_t last_qsz; atomic64_t tgt_not_rdy; }; @@ -101,9 +107,9 @@ static inline void snic_stats_update_active_ios(struct snic_stats *s_stats) { struct snic_io_stats *io = &s_stats->io; - u32 nr_active_ios; + int nr_active_ios; - nr_active_ios = atomic64_inc_return(&io->active); + nr_active_ios = atomic64_read(&io->active); if (atomic64_read(&io->max_active) < nr_active_ios) atomic64_set(&io->max_active, nr_active_ios); diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c index e0b5549bc9fb..dad5fc66effb 100644 --- a/drivers/scsi/snic/vnic_dev.c +++ b/drivers/scsi/snic/vnic_dev.c @@ -263,12 +263,20 @@ static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) { struct devcmd2_controller *dc2c = vdev->devcmd2; - struct devcmd2_result *result = dc2c->result + dc2c->next_result; + struct devcmd2_result *result = NULL; unsigned int i; int delay; int err; u32 posted; + u32 fetch_idx; u32 new_posted; + u8 color; + + fetch_idx = ioread32(&dc2c->wq_ctrl->fetch_index); + if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */ + /* Hardware surprise removal: return error */ + return -ENODEV; + } posted = ioread32(&dc2c->wq_ctrl->posted_index); @@ -278,6 +286,13 @@ static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, } new_posted = (posted + 1) % DEVCMD2_RING_SIZE; + if (new_posted == fetch_idx) { + pr_err("%s: wq is full while issuing devcmd2 command %d, fetch index: %u, posted index: %u\n", + pci_name(vdev->pdev), _CMD_N(cmd), fetch_idx, posted); + + return -EBUSY; + } + dc2c->cmd_ring[posted].cmd = cmd; dc2c->cmd_ring[posted].flags = 0; @@ -299,14 +314,22 @@ static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) return 0; + result = dc2c->result + dc2c->next_result; + color = dc2c->color; + + /* + * Increment next_result, after posting the devcmd, irrespective of + * devcmd result, and it should be done only once. + */ + dc2c->next_result++; + if (dc2c->next_result == dc2c->result_size) { + dc2c->next_result = 0; + dc2c->color = dc2c->color ? 0 : 1; + } + for (delay = 0; delay < wait; delay++) { udelay(100); - if (result->color == dc2c->color) { - dc2c->next_result++; - if (dc2c->next_result == dc2c->result_size) { - dc2c->next_result = 0; - dc2c->color = dc2c->color ? 0 : 1; - } + if (result->color == color) { if (result->error) { err = (int) result->error; if (err != ERR_ECMDUNKNOWN || @@ -317,13 +340,6 @@ static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, return err; } if (_CMD_DIR(cmd) & _CMD_DIR_READ) { - /* - * Adding the rmb() prevents the compiler - * and/or CPU from reordering the reads which - * would potentially result in reading stale - * values. - */ - rmb(); for (i = 0; i < VNIC_DEVCMD_NARGS; i++) vdev->args[i] = result->results[i]; } diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index dbf1882cfbac..7af5226aa55b 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -1974,9 +1974,12 @@ static long read_tape(struct scsi_tape *STp, long count, transfer = (int)cmdstatp->uremainder64; else transfer = 0; - if (STp->block_size == 0 && - cmdstatp->sense_hdr.sense_key == MEDIUM_ERROR) - transfer = bytes; + if (cmdstatp->sense_hdr.sense_key == MEDIUM_ERROR) { + if (STp->block_size == 0) + transfer = bytes; + /* Some drives set ILI with MEDIUM ERROR */ + cmdstatp->flags &= ~SENSE_ILI; + } if (cmdstatp->flags & SENSE_ILI) { /* ILI */ if (STp->block_size == 0 && diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index b9de487bbd31..3c4c07038948 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -36,14 +36,10 @@ #include <scsi/scsi_host.h> #include "sun3_scsi.h" -/* Definitions for the core NCR5380 driver. */ - -#define REAL_DMA -/* #define SUPPORT_TAGS */ /* minimum number of bytes to do dma on */ #define DMA_MIN_SIZE 129 -/* #define MAX_TAGS 32 */ +/* Definitions for the core NCR5380 driver. */ #define NCR5380_implementation_fields /* none */ @@ -55,14 +51,12 @@ #define NCR5380_abort sun3scsi_abort #define NCR5380_info sun3scsi_info -#define NCR5380_dma_read_setup(instance, data, count) \ - sun3scsi_dma_setup(instance, data, count, 0) -#define NCR5380_dma_write_setup(instance, data, count) \ - sun3scsi_dma_setup(instance, data, count, 1) +#define NCR5380_dma_recv_setup(instance, data, count) (count) +#define NCR5380_dma_send_setup(instance, data, count) (count) #define NCR5380_dma_residual(instance) \ sun3scsi_dma_residual(instance) #define NCR5380_dma_xfer_len(instance, cmd, phase) \ - sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO)) + sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd) #define NCR5380_acquire_dma_irq(instance) (1) #define NCR5380_release_dma_irq(instance) @@ -78,10 +72,6 @@ static int setup_cmd_per_lun = -1; module_param(setup_cmd_per_lun, int, 0); static int setup_sg_tablesize = -1; module_param(setup_sg_tablesize, int, 0); -#ifdef SUPPORT_TAGS -static int setup_use_tagged_queuing = -1; -module_param(setup_use_tagged_queuing, int, 0); -#endif static int setup_hostid = -1; module_param(setup_hostid, int, 0); @@ -263,14 +253,13 @@ static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) return last_residual; } -static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, - struct scsi_cmnd *cmd, - int write_flag) +static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted_len, + struct scsi_cmnd *cmd) { - if (cmd->request->cmd_type == REQ_TYPE_FS) - return wanted; - else + if (wanted_len < DMA_MIN_SIZE || cmd->request->cmd_type != REQ_TYPE_FS) return 0; + + return wanted_len; } static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data) @@ -408,7 +397,7 @@ static int sun3scsi_dma_finish(int write_flag) } -#include "atari_NCR5380.c" +#include "NCR5380.c" #ifdef SUN3_SCSI_VME #define SUN3_SCSI_NAME "Sun3 NCR5380 VME SCSI" @@ -516,10 +505,6 @@ static int __init sun3_scsi_probe(struct platform_device *pdev) instance->io_port = (unsigned long)ioaddr; instance->irq = irq->start; -#ifdef SUPPORT_TAGS - host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0; -#endif - error = NCR5380_init(instance, host_flags); if (error) goto fail_init; @@ -527,15 +512,9 @@ static int __init sun3_scsi_probe(struct platform_device *pdev) error = request_irq(instance->irq, scsi_sun3_intr, 0, "NCR5380", instance); if (error) { -#ifdef REAL_DMA pr_err(PFX "scsi%d: IRQ %d not free, bailing out\n", instance->host_no, instance->irq); goto fail_irq; -#else - pr_warn(PFX "scsi%d: IRQ %d not free, interrupts disabled\n", - instance->host_no, instance->irq); - instance->irq = NO_IRQ; -#endif } dregs->csr = 0; @@ -565,8 +544,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev) return 0; fail_host: - if (instance->irq != NO_IRQ) - free_irq(instance->irq, instance); + free_irq(instance->irq, instance); fail_irq: NCR5380_exit(instance); fail_init: @@ -583,8 +561,7 @@ static int __exit sun3_scsi_remove(struct platform_device *pdev) struct Scsi_Host *instance = platform_get_drvdata(pdev); scsi_remove_host(instance); - if (instance->irq != NO_IRQ) - free_irq(instance->irq, instance); + free_irq(instance->irq, instance); NCR5380_exit(instance); scsi_host_put(instance); if (udc_regs) diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c index 4615fda60dbd..8a8608ac62e6 100644 --- a/drivers/scsi/t128.c +++ b/drivers/scsi/t128.c @@ -1,5 +1,3 @@ -#define PSEUDO_DMA - /* * Trantor T128/T128F/T228 driver * Note : architecturally, the T100 and T130 are different and won't @@ -76,7 +74,6 @@ #include <scsi/scsi_host.h> #include "t128.h" -#define AUTOPROBE_IRQ #include "NCR5380.h" static struct override { @@ -210,7 +207,7 @@ found: instance->base = base; ((struct NCR5380_hostdata *)instance->hostdata)->base = p; - if (NCR5380_init(instance, 0)) + if (NCR5380_init(instance, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP)) goto out_unregister; NCR5380_maybe_reset_bus(instance); @@ -294,7 +291,7 @@ static int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev, } /* - * Function : int NCR5380_pread (struct Scsi_Host *instance, + * Function : int t128_pread (struct Scsi_Host *instance, * unsigned char *dst, int len) * * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to @@ -306,8 +303,8 @@ static int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev, * timeout. */ -static inline int -NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) +static inline int t128_pread(struct Scsi_Host *instance, + unsigned char *dst, int len) { struct NCR5380_hostdata *hostdata = shost_priv(instance); void __iomem *reg, *base = hostdata->base; @@ -340,7 +337,7 @@ NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) } /* - * Function : int NCR5380_pwrite (struct Scsi_Host *instance, + * Function : int t128_pwrite (struct Scsi_Host *instance, * unsigned char *src, int len) * * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from @@ -352,8 +349,8 @@ NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) * timeout. */ -static inline int -NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len) +static inline int t128_pwrite(struct Scsi_Host *instance, + unsigned char *src, int len) { struct NCR5380_hostdata *hostdata = shost_priv(instance); void __iomem *reg, *base = hostdata->base; @@ -394,8 +391,6 @@ static struct scsi_host_template driver_template = { .detect = t128_detect, .release = t128_release, .proc_name = "t128", - .show_info = t128_show_info, - .write_info = t128_write_info, .info = t128_info, .queuecommand = t128_queue_command, .eh_abort_handler = t128_abort, diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h index dd16d85497e1..c95bcd839109 100644 --- a/drivers/scsi/t128.h +++ b/drivers/scsi/t128.h @@ -77,14 +77,17 @@ #define NCR5380_write(reg, value) writeb((value),(T128_address(reg))) #define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize) +#define NCR5380_dma_recv_setup t128_pread +#define NCR5380_dma_send_setup t128_pwrite +#define NCR5380_dma_residual(instance) (0) #define NCR5380_intr t128_intr #define NCR5380_queue_command t128_queue_command #define NCR5380_abort t128_abort #define NCR5380_bus_reset t128_bus_reset #define NCR5380_info t128_info -#define NCR5380_show_info t128_show_info -#define NCR5380_write_info t128_write_info + +#define NCR5380_io_delay(x) udelay(x) /* 15 14 12 10 7 5 3 1101 0100 1010 1000 */ diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 90901861bfc0..ae85861051eb 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -563,7 +563,7 @@ static const struct scsi_host_template usb_stor_host_template = { .target_alloc = target_alloc, /* lots of sg segments can be handled */ - .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, + .sg_tablesize = SG_MAX_SEGMENTS, /* limit the total size of a transfer to 120 KB */ .max_sectors = 240, diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 556ec1ea2574..cb3c8fe6acd7 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -286,6 +286,31 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) /* + * The maximum number of SG segments that we will put inside a + * scatterlist (unless chaining is used). Should ideally fit inside a + * single page, to avoid a higher order allocation. We could define this + * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The + * minimum value is 32 + */ +#define SG_CHUNK_SIZE 128 + +/* + * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit + * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. + */ +#ifdef CONFIG_ARCH_HAS_SG_CHAIN +#define SG_MAX_SEGMENTS 2048 +#else +#define SG_MAX_SEGMENTS SG_CHUNK_SIZE +#endif + +#ifdef CONFIG_SG_POOL +void sg_free_table_chained(struct sg_table *table, bool first_chunk); +int sg_alloc_table_chained(struct sg_table *table, int nents, + struct scatterlist *first_chunk); +#endif + +/* * sg page iterator * * Iterates over sg entries page-by-page. On each successful iteration, diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index e0a3398b1547..8ec7c30e35af 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -18,25 +18,6 @@ enum scsi_timeouts { }; /* - * The maximum number of SG segments that we will put inside a - * scatterlist (unless chaining is used). Should ideally fit inside a - * single page, to avoid a higher order allocation. We could define this - * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The - * minimum value is 32 - */ -#define SCSI_MAX_SG_SEGMENTS 128 - -/* - * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit - * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. - */ -#ifdef CONFIG_ARCH_HAS_SG_CHAIN -#define SCSI_MAX_SG_CHAIN_SEGMENTS 2048 -#else -#define SCSI_MAX_SG_CHAIN_SEGMENTS SCSI_MAX_SG_SEGMENTS -#endif - -/* * DIX-capable adapters effectively support infinite chaining for the * protection information scatterlist */ diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 74d79bde7075..a6c346df290d 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -50,6 +50,12 @@ enum scsi_device_state { SDEV_CREATED_BLOCK, /* same as above but for created devices */ }; +enum scsi_scan_mode { + SCSI_SCAN_INITIAL = 0, + SCSI_SCAN_RESCAN, + SCSI_SCAN_MANUAL, +}; + enum scsi_device_event { SDEV_EVT_MEDIA_CHANGE = 1, /* media has changed */ SDEV_EVT_INQUIRY_CHANGE_REPORTED, /* 3F 03 UA reported */ @@ -242,6 +248,7 @@ scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...); enum scsi_target_state { STARGET_CREATED = 1, STARGET_RUNNING, + STARGET_REMOVE, STARGET_DEL, }; @@ -391,7 +398,8 @@ extern void scsi_device_resume(struct scsi_device *sdev); extern void scsi_target_quiesce(struct scsi_target *); extern void scsi_target_resume(struct scsi_target *); extern void scsi_scan_target(struct device *parent, unsigned int channel, - unsigned int id, u64 lun, int rescan); + unsigned int id, u64 lun, + enum scsi_scan_mode rescan); extern void scsi_target_reap(struct scsi_target *); extern void scsi_target_block(struct device *); extern void scsi_target_unblock(struct device *, enum scsi_device_state); @@ -534,9 +542,9 @@ static inline int scsi_device_supports_vpd(struct scsi_device *sdev) /* * Although VPD inquiries can go to SCSI-2 type devices, * some USB ones crash on receiving them, and the pages - * we currently ask for are for SPC-3 and beyond + * we currently ask for are mandatory for SPC-2 and beyond */ - if (sdev->scsi_level > SCSI_SPC_2 && !sdev->skip_vpd_pages) + if (sdev->scsi_level >= SCSI_SPC_2 && !sdev->skip_vpd_pages) return 1; return 0; } diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index fcfa3d7f5e7e..76e9d278c334 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -37,7 +37,7 @@ struct blk_queue_tags; * used in one scatter-gather request. */ #define SG_NONE 0 -#define SG_ALL SCSI_MAX_SG_SEGMENTS +#define SG_ALL SG_CHUNK_SIZE #define MODE_UNKNOWN 0x00 #define MODE_INITIATOR 0x01 diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h index c2ae21cbaa2c..d1defd1ebd95 100644 --- a/include/scsi/scsi_proto.h +++ b/include/scsi/scsi_proto.h @@ -115,6 +115,8 @@ #define VERIFY_16 0x8f #define SYNCHRONIZE_CACHE_16 0x91 #define WRITE_SAME_16 0x93 +#define ZBC_OUT 0x94 +#define ZBC_IN 0x95 #define SERVICE_ACTION_BIDIRECTIONAL 0x9d #define SERVICE_ACTION_IN_16 0x9e #define SERVICE_ACTION_OUT_16 0x9f @@ -143,6 +145,13 @@ #define MO_SET_PRIORITY 0x0e #define MO_SET_TIMESTAMP 0x0f #define MO_MANAGEMENT_PROTOCOL_OUT 0x10 +/* values for ZBC_IN */ +#define ZI_REPORT_ZONES 0x00 +/* values for ZBC_OUT */ +#define ZO_CLOSE_ZONE 0x01 +#define ZO_FINISH_ZONE 0x02 +#define ZO_OPEN_ZONE 0x03 +#define ZO_RESET_WRITE_POINTER 0x04 /* values for variable length command */ #define XDREAD_32 0x03 #define XDWRITE_32 0x04 diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h index 079bd10a01b4..9a9b3e2550af 100644 --- a/include/trace/events/scsi.h +++ b/include/trace/events/scsi.h @@ -94,11 +94,9 @@ scsi_opcode_name(WRITE_16), \ scsi_opcode_name(VERIFY_16), \ scsi_opcode_name(WRITE_SAME_16), \ + scsi_opcode_name(ZBC_OUT), \ + scsi_opcode_name(ZBC_IN), \ scsi_opcode_name(SERVICE_ACTION_IN_16), \ - scsi_opcode_name(SAI_READ_CAPACITY_16), \ - scsi_opcode_name(SAI_GET_LBA_STATUS), \ - scsi_opcode_name(MI_REPORT_TARGET_PGS), \ - scsi_opcode_name(MO_SET_TARGET_PGS), \ scsi_opcode_name(READ_32), \ scsi_opcode_name(WRITE_32), \ scsi_opcode_name(WRITE_SAME_32), \ diff --git a/lib/Kconfig b/lib/Kconfig index 3cca1222578e..61d55bd0ed89 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -523,6 +523,13 @@ config SG_SPLIT a scatterlist. This should be selected by a driver or an API which whishes to split a scatterlist amongst multiple DMA channels. +config SG_POOL + def_bool n + help + Provides a helper to allocate chained scatterlists. This should be + selected by a driver or an API which whishes to allocate chained + scatterlist. + # # sg chaining option # diff --git a/lib/Makefile b/lib/Makefile index a65e9a861535..931396ada5eb 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -178,6 +178,7 @@ obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o obj-$(CONFIG_SG_SPLIT) += sg_split.o +obj-$(CONFIG_SG_POOL) += sg_pool.o obj-$(CONFIG_STMP_DEVICE) += stmp_device.o obj-$(CONFIG_IRQ_POLL) += irq_poll.o diff --git a/lib/sg_pool.c b/lib/sg_pool.c new file mode 100644 index 000000000000..6dd30615a201 --- /dev/null +++ b/lib/sg_pool.c @@ -0,0 +1,172 @@ +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <linux/mempool.h> +#include <linux/slab.h> + +#define SG_MEMPOOL_NR ARRAY_SIZE(sg_pools) +#define SG_MEMPOOL_SIZE 2 + +struct sg_pool { + size_t size; + char *name; + struct kmem_cache *slab; + mempool_t *pool; +}; + +#define SP(x) { .size = x, "sgpool-" __stringify(x) } +#if (SG_CHUNK_SIZE < 32) +#error SG_CHUNK_SIZE is too small (must be 32 or greater) +#endif +static struct sg_pool sg_pools[] = { + SP(8), + SP(16), +#if (SG_CHUNK_SIZE > 32) + SP(32), +#if (SG_CHUNK_SIZE > 64) + SP(64), +#if (SG_CHUNK_SIZE > 128) + SP(128), +#if (SG_CHUNK_SIZE > 256) +#error SG_CHUNK_SIZE is too large (256 MAX) +#endif +#endif +#endif +#endif + SP(SG_CHUNK_SIZE) +}; +#undef SP + +static inline unsigned int sg_pool_index(unsigned short nents) +{ + unsigned int index; + + BUG_ON(nents > SG_CHUNK_SIZE); + + if (nents <= 8) + index = 0; + else + index = get_count_order(nents) - 3; + + return index; +} + +static void sg_pool_free(struct scatterlist *sgl, unsigned int nents) +{ + struct sg_pool *sgp; + + sgp = sg_pools + sg_pool_index(nents); + mempool_free(sgl, sgp->pool); +} + +static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask) +{ + struct sg_pool *sgp; + + sgp = sg_pools + sg_pool_index(nents); + return mempool_alloc(sgp->pool, gfp_mask); +} + +/** + * sg_free_table_chained - Free a previously mapped sg table + * @table: The sg table header to use + * @first_chunk: was first_chunk not NULL in sg_alloc_table_chained? + * + * Description: + * Free an sg table previously allocated and setup with + * sg_alloc_table_chained(). + * + **/ +void sg_free_table_chained(struct sg_table *table, bool first_chunk) +{ + if (first_chunk && table->orig_nents <= SG_CHUNK_SIZE) + return; + __sg_free_table(table, SG_CHUNK_SIZE, first_chunk, sg_pool_free); +} +EXPORT_SYMBOL_GPL(sg_free_table_chained); + +/** + * sg_alloc_table_chained - Allocate and chain SGLs in an sg table + * @table: The sg table header to use + * @nents: Number of entries in sg list + * @first_chunk: first SGL + * + * Description: + * Allocate and chain SGLs in an sg table. If @nents@ is larger than + * SG_CHUNK_SIZE a chained sg table will be setup. + * + **/ +int sg_alloc_table_chained(struct sg_table *table, int nents, + struct scatterlist *first_chunk) +{ + int ret; + + BUG_ON(!nents); + + if (first_chunk) { + if (nents <= SG_CHUNK_SIZE) { + table->nents = table->orig_nents = nents; + sg_init_table(table->sgl, nents); + return 0; + } + } + + ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE, + first_chunk, GFP_ATOMIC, sg_pool_alloc); + if (unlikely(ret)) + sg_free_table_chained(table, (bool)first_chunk); + return ret; +} +EXPORT_SYMBOL_GPL(sg_alloc_table_chained); + +static __init int sg_pool_init(void) +{ + int i; + + for (i = 0; i < SG_MEMPOOL_NR; i++) { + struct sg_pool *sgp = sg_pools + i; + int size = sgp->size * sizeof(struct scatterlist); + + sgp->slab = kmem_cache_create(sgp->name, size, 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!sgp->slab) { + printk(KERN_ERR "SG_POOL: can't init sg slab %s\n", + sgp->name); + goto cleanup_sdb; + } + + sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, + sgp->slab); + if (!sgp->pool) { + printk(KERN_ERR "SG_POOL: can't init sg mempool %s\n", + sgp->name); + goto cleanup_sdb; + } + } + + return 0; + +cleanup_sdb: + for (i = 0; i < SG_MEMPOOL_NR; i++) { + struct sg_pool *sgp = sg_pools + i; + if (sgp->pool) + mempool_destroy(sgp->pool); + if (sgp->slab) + kmem_cache_destroy(sgp->slab); + } + + return -ENOMEM; +} + +static __exit void sg_pool_exit(void) +{ + int i; + + for (i = 0; i < SG_MEMPOOL_NR; i++) { + struct sg_pool *sgp = sg_pools + i; + mempool_destroy(sgp->pool); + kmem_cache_destroy(sgp->slab); + } +} + +module_init(sg_pool_init); +module_exit(sg_pool_exit); |