diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-15 23:20:22 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-15 23:20:22 +0200 |
commit | 3e7819886281e077e82006fe4804b0d6b0f5643b (patch) | |
tree | 40766af623d8a1dde0edaee8b6abc496efbcc615 /drivers/scsi/scsi_debug.c | |
parent | Merge tag 'for-6.11/io_uring-20240714' of git://git.kernel.dk/linux (diff) | |
parent | floppy: add missing MODULE_DESCRIPTION() macro (diff) | |
download | linux-3e7819886281e077e82006fe4804b0d6b0f5643b.tar.xz linux-3e7819886281e077e82006fe4804b0d6b0f5643b.zip |
Merge tag 'for-6.11/block-20240710' of git://git.kernel.dk/linux
Pull block updates from Jens Axboe:
- NVMe updates via Keith:
- Device initialization memory leak fixes (Keith)
- More constants defined (Weiwen)
- Target debugfs support (Hannes)
- PCIe subsystem reset enhancements (Keith)
- Queue-depth multipath policy (Redhat and PureStorage)
- Implement get_unique_id (Christoph)
- Authentication error fixes (Gaosheng)
- MD updates via Song
- sync_action fix and refactoring (Yu Kuai)
- Various small fixes (Christoph Hellwig, Li Nan, and Ofir Gal, Yu
Kuai, Benjamin Marzinski, Christophe JAILLET, Yang Li)
- Fix loop detach/open race (Gulam)
- Fix lower control limit for blk-throttle (Yu)
- Add module descriptions to various drivers (Jeff)
- Add support for atomic writes for block devices, and statx reporting
for same. Includes SCSI and NVMe (John, Prasad, Alan)
- Add IO priority information to block trace points (Dongliang)
- Various zone improvements and tweaks (Damien)
- mq-deadline tag reservation improvements (Bart)
- Ignore direct reclaim swap writes in writeback throttling (Baokun)
- Block integrity improvements and fixes (Anuj)
- Add basic support for rust based block drivers. Has a dummy null_blk
variant for now (Andreas)
- Series converting driver settings to queue limits, and cleanups and
fixes related to that (Christoph)
- Cleanup for poking too deeply into the bvec internals, in preparation
for DMA mapping API changes (Christoph)
- Various minor tweaks and fixes (Jiapeng, John, Kanchan, Mikulas,
Ming, Zhu, Damien, Christophe, Chaitanya)
* tag 'for-6.11/block-20240710' of git://git.kernel.dk/linux: (206 commits)
floppy: add missing MODULE_DESCRIPTION() macro
loop: add missing MODULE_DESCRIPTION() macro
ublk_drv: add missing MODULE_DESCRIPTION() macro
xen/blkback: add missing MODULE_DESCRIPTION() macro
block/rnbd: Constify struct kobj_type
block: take offset into account in blk_bvec_map_sg again
block: fix get_max_segment_size() warning
loop: Don't bother validating blocksize
virtio_blk: Don't bother validating blocksize
null_blk: Don't bother validating blocksize
block: Validate logical block size in blk_validate_limits()
virtio_blk: Fix default logical block size fallback
nvmet-auth: fix nvmet_auth hash error handling
nvme: implement ->get_unique_id
block: pass a phys_addr_t to get_max_segment_size
block: add a bvec_phys helper
blk-lib: check for kill signal in ioctl BLKZEROOUT
block: limit the Write Zeroes to manually writing zeroes fallback
block: refacto blkdev_issue_zeroout
block: move read-only and supported checks into (__)blkdev_issue_zeroout
...
Diffstat (limited to 'drivers/scsi/scsi_debug.c')
-rw-r--r-- | drivers/scsi/scsi_debug.c | 588 |
1 files changed, 454 insertions, 134 deletions
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 91f022fb8d0c..a9d8a9c62663 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -69,6 +69,8 @@ static const char *sdebug_version_date = "20210520"; /* Additional Sense Code (ASC) */ #define NO_ADDITIONAL_SENSE 0x0 +#define OVERLAP_ATOMIC_COMMAND_ASC 0x0 +#define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23 #define LOGICAL_UNIT_NOT_READY 0x4 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8 #define UNRECOVERED_READ_ERR 0x11 @@ -103,6 +105,7 @@ static const char *sdebug_version_date = "20210520"; #define READ_BOUNDARY_ASCQ 0x7 #define ATTEMPT_ACCESS_GAP 0x9 #define INSUFF_ZONE_ASCQ 0xe +/* see drivers/scsi/sense_codes.h */ /* Additional Sense Code Qualifier (ASCQ) */ #define ACK_NAK_TO 0x3 @@ -152,6 +155,12 @@ static const char *sdebug_version_date = "20210520"; #define DEF_VIRTUAL_GB 0 #define DEF_VPD_USE_HOSTNO 1 #define DEF_WRITESAME_LENGTH 0xFFFF +#define DEF_ATOMIC_WR 0 +#define DEF_ATOMIC_WR_MAX_LENGTH 8192 +#define DEF_ATOMIC_WR_ALIGN 2 +#define DEF_ATOMIC_WR_GRAN 2 +#define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH) +#define DEF_ATOMIC_WR_MAX_BNDRY 128 #define DEF_STRICT 0 #define DEF_STATISTICS false #define DEF_SUBMIT_QUEUES 1 @@ -374,7 +383,9 @@ struct sdebug_host_info { /* There is an xarray of pointers to this struct's objects, one per host */ struct sdeb_store_info { - rwlock_t macc_lck; /* for atomic media access on this store */ + rwlock_t macc_data_lck; /* for media data access on this store */ + rwlock_t macc_meta_lck; /* for atomic media meta access on this store */ + rwlock_t macc_sector_lck; /* per-sector media data access on this store */ u8 *storep; /* user data storage (ram) */ struct t10_pi_tuple *dif_storep; /* protection info */ void *map_storep; /* provisioning map */ @@ -398,12 +409,20 @@ struct sdebug_defer { enum sdeb_defer_type defer_t; }; +struct sdebug_device_access_info { + bool atomic_write; + u64 lba; + u32 num; + struct scsi_cmnd *self; +}; + struct sdebug_queued_cmd { /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue * instance indicates this slot is in use. */ struct sdebug_defer sd_dp; struct scsi_cmnd *scmd; + struct sdebug_device_access_info *i; }; struct sdebug_scsi_cmd { @@ -463,7 +482,8 @@ enum sdeb_opcode_index { SDEB_I_PRE_FETCH = 29, /* 10, 16 */ SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */ SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */ - SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */ + SDEB_I_ATOMIC_WRITE_16 = 32, + SDEB_I_LAST_ELEM_P1 = 33, /* keep this last (previous + 1) */ }; @@ -497,7 +517,8 @@ static const unsigned char opcode_ind_arr[256] = { 0, 0, 0, SDEB_I_VERIFY, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0, - 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16, + 0, 0, 0, 0, + SDEB_I_ATOMIC_WRITE_16, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16, /* 0xa0; 0xa0->0xbf: 12 byte cdbs */ SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN, SDEB_I_MAINT_OUT, 0, 0, 0, @@ -547,6 +568,7 @@ static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_atomic_write(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *); @@ -788,6 +810,11 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = { resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */ {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} }, +/* 31 */ + {0, 0x0, 0x0, F_D_OUT | FF_MEDIA_IO, + resp_atomic_write, NULL, /* ATOMIC WRITE 16 */ + {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} }, /* sentinel */ {0xff, 0, 0, 0, NULL, NULL, /* terminating element */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, @@ -835,6 +862,13 @@ static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY; static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC; static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH; +static unsigned int sdebug_atomic_wr = DEF_ATOMIC_WR; +static unsigned int sdebug_atomic_wr_max_length = DEF_ATOMIC_WR_MAX_LENGTH; +static unsigned int sdebug_atomic_wr_align = DEF_ATOMIC_WR_ALIGN; +static unsigned int sdebug_atomic_wr_gran = DEF_ATOMIC_WR_GRAN; +static unsigned int sdebug_atomic_wr_max_length_bndry = + DEF_ATOMIC_WR_MAX_LENGTH_BNDRY; +static unsigned int sdebug_atomic_wr_max_bndry = DEF_ATOMIC_WR_MAX_BNDRY; static int sdebug_uuid_ctl = DEF_UUID_CTL; static bool sdebug_random = DEF_RANDOM; static bool sdebug_per_host_store = DEF_PER_HOST_STORE; @@ -1192,6 +1226,11 @@ static inline bool scsi_debug_lbp(void) (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); } +static inline bool scsi_debug_atomic_write(void) +{ + return sdebug_fake_rw == 0 && sdebug_atomic_wr; +} + static void *lba2fake_store(struct sdeb_store_info *sip, unsigned long long lba) { @@ -1819,6 +1858,14 @@ static int inquiry_vpd_b0(unsigned char *arr) /* Maximum WRITE SAME Length */ put_unaligned_be64(sdebug_write_same_length, &arr[32]); + if (sdebug_atomic_wr) { + put_unaligned_be32(sdebug_atomic_wr_max_length, &arr[40]); + put_unaligned_be32(sdebug_atomic_wr_align, &arr[44]); + put_unaligned_be32(sdebug_atomic_wr_gran, &arr[48]); + put_unaligned_be32(sdebug_atomic_wr_max_length_bndry, &arr[52]); + put_unaligned_be32(sdebug_atomic_wr_max_bndry, &arr[56]); + } + return 0x3c; /* Mandatory page length for Logical Block Provisioning */ } @@ -3381,16 +3428,238 @@ static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip, return xa_load(per_store_ap, devip->sdbg_host->si_idx); } +static inline void +sdeb_read_lock(rwlock_t *lock) +{ + if (sdebug_no_rwlock) + __acquire(lock); + else + read_lock(lock); +} + +static inline void +sdeb_read_unlock(rwlock_t *lock) +{ + if (sdebug_no_rwlock) + __release(lock); + else + read_unlock(lock); +} + +static inline void +sdeb_write_lock(rwlock_t *lock) +{ + if (sdebug_no_rwlock) + __acquire(lock); + else + write_lock(lock); +} + +static inline void +sdeb_write_unlock(rwlock_t *lock) +{ + if (sdebug_no_rwlock) + __release(lock); + else + write_unlock(lock); +} + +static inline void +sdeb_data_read_lock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_read_lock(&sip->macc_data_lck); +} + +static inline void +sdeb_data_read_unlock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_read_unlock(&sip->macc_data_lck); +} + +static inline void +sdeb_data_write_lock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_write_lock(&sip->macc_data_lck); +} + +static inline void +sdeb_data_write_unlock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_write_unlock(&sip->macc_data_lck); +} + +static inline void +sdeb_data_sector_read_lock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_read_lock(&sip->macc_sector_lck); +} + +static inline void +sdeb_data_sector_read_unlock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_read_unlock(&sip->macc_sector_lck); +} + +static inline void +sdeb_data_sector_write_lock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_write_lock(&sip->macc_sector_lck); +} + +static inline void +sdeb_data_sector_write_unlock(struct sdeb_store_info *sip) +{ + BUG_ON(!sip); + + sdeb_write_unlock(&sip->macc_sector_lck); +} + +/* + * Atomic locking: + * We simplify the atomic model to allow only 1x atomic write and many non- + * atomic reads or writes for all LBAs. + + * A RW lock has a similar bahaviour: + * Only 1x writer and many readers. + + * So use a RW lock for per-device read and write locking: + * An atomic access grabs the lock as a writer and non-atomic grabs the lock + * as a reader. + */ + +static inline void +sdeb_data_lock(struct sdeb_store_info *sip, bool atomic) +{ + if (atomic) + sdeb_data_write_lock(sip); + else + sdeb_data_read_lock(sip); +} + +static inline void +sdeb_data_unlock(struct sdeb_store_info *sip, bool atomic) +{ + if (atomic) + sdeb_data_write_unlock(sip); + else + sdeb_data_read_unlock(sip); +} + +/* Allow many reads but only 1x write per sector */ +static inline void +sdeb_data_sector_lock(struct sdeb_store_info *sip, bool do_write) +{ + if (do_write) + sdeb_data_sector_write_lock(sip); + else + sdeb_data_sector_read_lock(sip); +} + +static inline void +sdeb_data_sector_unlock(struct sdeb_store_info *sip, bool do_write) +{ + if (do_write) + sdeb_data_sector_write_unlock(sip); + else + sdeb_data_sector_read_unlock(sip); +} + +static inline void +sdeb_meta_read_lock(struct sdeb_store_info *sip) +{ + if (sdebug_no_rwlock) { + if (sip) + __acquire(&sip->macc_meta_lck); + else + __acquire(&sdeb_fake_rw_lck); + } else { + if (sip) + read_lock(&sip->macc_meta_lck); + else + read_lock(&sdeb_fake_rw_lck); + } +} + +static inline void +sdeb_meta_read_unlock(struct sdeb_store_info *sip) +{ + if (sdebug_no_rwlock) { + if (sip) + __release(&sip->macc_meta_lck); + else + __release(&sdeb_fake_rw_lck); + } else { + if (sip) + read_unlock(&sip->macc_meta_lck); + else + read_unlock(&sdeb_fake_rw_lck); + } +} + +static inline void +sdeb_meta_write_lock(struct sdeb_store_info *sip) +{ + if (sdebug_no_rwlock) { + if (sip) + __acquire(&sip->macc_meta_lck); + else + __acquire(&sdeb_fake_rw_lck); + } else { + if (sip) + write_lock(&sip->macc_meta_lck); + else + write_lock(&sdeb_fake_rw_lck); + } +} + +static inline void +sdeb_meta_write_unlock(struct sdeb_store_info *sip) +{ + if (sdebug_no_rwlock) { + if (sip) + __release(&sip->macc_meta_lck); + else + __release(&sdeb_fake_rw_lck); + } else { + if (sip) + write_unlock(&sip->macc_meta_lck); + else + write_unlock(&sdeb_fake_rw_lck); + } +} + /* Returns number of bytes copied or -1 if error. */ static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp, - u32 sg_skip, u64 lba, u32 num, bool do_write, - u8 group_number) + u32 sg_skip, u64 lba, u32 num, u8 group_number, + bool do_write, bool atomic) { int ret; - u64 block, rest = 0; + u64 block; enum dma_data_direction dir; struct scsi_data_buffer *sdb = &scp->sdb; u8 *fsp; + int i; + + /* + * Even though reads are inherently atomic (in this driver), we expect + * the atomic flag only for writes. + */ + if (!do_write && atomic) + return -1; if (do_write) { dir = DMA_TO_DEVICE; @@ -3410,21 +3679,26 @@ static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp, fsp = sip->storep; block = do_div(lba, sdebug_store_sectors); - if (block + num > sdebug_store_sectors) - rest = block + num - sdebug_store_sectors; - ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, + /* Only allow 1x atomic write or multiple non-atomic writes at any given time */ + sdeb_data_lock(sip, atomic); + for (i = 0; i < num; i++) { + /* We shouldn't need to lock for atomic writes, but do it anyway */ + sdeb_data_sector_lock(sip, do_write); + ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, fsp + (block * sdebug_sector_size), - (num - rest) * sdebug_sector_size, sg_skip, do_write); - if (ret != (num - rest) * sdebug_sector_size) - return ret; - - if (rest) { - ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents, - fsp, rest * sdebug_sector_size, - sg_skip + ((num - rest) * sdebug_sector_size), - do_write); + sdebug_sector_size, sg_skip, do_write); + sdeb_data_sector_unlock(sip, do_write); + if (ret != sdebug_sector_size) { + ret += (i * sdebug_sector_size); + break; + } + sg_skip += sdebug_sector_size; + if (++block >= sdebug_store_sectors) + block = 0; } + ret = num * sdebug_sector_size; + sdeb_data_unlock(sip, atomic); return ret; } @@ -3600,70 +3874,6 @@ static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec, return ret; } -static inline void -sdeb_read_lock(struct sdeb_store_info *sip) -{ - if (sdebug_no_rwlock) { - if (sip) - __acquire(&sip->macc_lck); - else - __acquire(&sdeb_fake_rw_lck); - } else { - if (sip) - read_lock(&sip->macc_lck); - else - read_lock(&sdeb_fake_rw_lck); - } -} - -static inline void -sdeb_read_unlock(struct sdeb_store_info *sip) -{ - if (sdebug_no_rwlock) { - if (sip) - __release(&sip->macc_lck); - else - __release(&sdeb_fake_rw_lck); - } else { - if (sip) - read_unlock(&sip->macc_lck); - else - read_unlock(&sdeb_fake_rw_lck); - } -} - -static inline void -sdeb_write_lock(struct sdeb_store_info *sip) -{ - if (sdebug_no_rwlock) { - if (sip) - __acquire(&sip->macc_lck); - else - __acquire(&sdeb_fake_rw_lck); - } else { - if (sip) - write_lock(&sip->macc_lck); - else - write_lock(&sdeb_fake_rw_lck); - } -} - -static inline void -sdeb_write_unlock(struct sdeb_store_info *sip) -{ - if (sdebug_no_rwlock) { - if (sip) - __release(&sip->macc_lck); - else - __release(&sdeb_fake_rw_lck); - } else { - if (sip) - write_unlock(&sip->macc_lck); - else - write_unlock(&sdeb_fake_rw_lck); - } -} - static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { bool check_prot; @@ -3673,6 +3883,7 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) u64 lba; struct sdeb_store_info *sip = devip2sip(devip, true); u8 *cmd = scp->cmnd; + bool meta_data_locked = false; switch (cmd[0]) { case READ_16: @@ -3731,6 +3942,10 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) atomic_set(&sdeb_inject_pending, 0); } + /* + * When checking device access params, for reads we only check data + * versus what is set at init time, so no need to lock. + */ ret = check_device_access_params(scp, lba, num, false); if (ret) return ret; @@ -3750,29 +3965,33 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) return check_condition_result; } - sdeb_read_lock(sip); + if (sdebug_dev_is_zoned(devip) || + (sdebug_dix && scsi_prot_sg_count(scp))) { + sdeb_meta_read_lock(sip); + meta_data_locked = true; + } /* DIX + T10 DIF */ if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) { switch (prot_verify_read(scp, lba, num, ei_lba)) { case 1: /* Guard tag error */ if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */ - sdeb_read_unlock(sip); + sdeb_meta_read_unlock(sip); mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); return check_condition_result; } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { - sdeb_read_unlock(sip); + sdeb_meta_read_unlock(sip); mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); return illegal_condition_result; } break; case 3: /* Reference tag error */ if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */ - sdeb_read_unlock(sip); + sdeb_meta_read_unlock(sip); mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3); return check_condition_result; } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) { - sdeb_read_unlock(sip); + sdeb_meta_read_unlock(sip); mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3); return illegal_condition_result; } @@ -3780,8 +3999,9 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) } } - ret = do_device_access(sip, scp, 0, lba, num, false, 0); - sdeb_read_unlock(sip); + ret = do_device_access(sip, scp, 0, lba, num, 0, false, false); + if (meta_data_locked) + sdeb_meta_read_unlock(sip); if (unlikely(ret == -1)) return DID_ERROR << 16; @@ -3971,6 +4191,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) u64 lba; struct sdeb_store_info *sip = devip2sip(devip, true); u8 *cmd = scp->cmnd; + bool meta_data_locked = false; switch (cmd[0]) { case WRITE_16: @@ -4029,10 +4250,17 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) "to DIF device\n"); } - sdeb_write_lock(sip); + if (sdebug_dev_is_zoned(devip) || + (sdebug_dix && scsi_prot_sg_count(scp)) || + scsi_debug_lbp()) { + sdeb_meta_write_lock(sip); + meta_data_locked = true; + } + ret = check_device_access_params(scp, lba, num, true); if (ret) { - sdeb_write_unlock(sip); + if (meta_data_locked) + sdeb_meta_write_unlock(sip); return ret; } @@ -4041,22 +4269,22 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) switch (prot_verify_write(scp, lba, num, ei_lba)) { case 1: /* Guard tag error */ if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); return illegal_condition_result; } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); return check_condition_result; } break; case 3: /* Reference tag error */ if (scp->prot_flags & SCSI_PROT_REF_CHECK) { - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3); return illegal_condition_result; } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3); return check_condition_result; } @@ -4064,13 +4292,16 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) } } - ret = do_device_access(sip, scp, 0, lba, num, true, group); + ret = do_device_access(sip, scp, 0, lba, num, group, true, false); if (unlikely(scsi_debug_lbp())) map_region(sip, lba, num); + /* If ZBC zone then bump its write pointer */ if (sdebug_dev_is_zoned(devip)) zbc_inc_wp(devip, lba, num); - sdeb_write_unlock(sip); + if (meta_data_locked) + sdeb_meta_write_unlock(sip); + if (unlikely(-1 == ret)) return DID_ERROR << 16; else if (unlikely(sdebug_verbose && @@ -4180,7 +4411,8 @@ static int resp_write_scat(struct scsi_cmnd *scp, goto err_out; } - sdeb_write_lock(sip); + /* Just keep it simple and always lock for now */ + sdeb_meta_write_lock(sip); sg_off = lbdof_blen; /* Spec says Buffer xfer Length field in number of LBs in dout */ cum_lb = 0; @@ -4223,7 +4455,11 @@ static int resp_write_scat(struct scsi_cmnd *scp, } } - ret = do_device_access(sip, scp, sg_off, lba, num, true, group); + /* + * Write ranges atomically to keep as close to pre-atomic + * writes behaviour as possible. + */ + ret = do_device_access(sip, scp, sg_off, lba, num, group, true, true); /* If ZBC zone then bump its write pointer */ if (sdebug_dev_is_zoned(devip)) zbc_inc_wp(devip, lba, num); @@ -4262,7 +4498,7 @@ static int resp_write_scat(struct scsi_cmnd *scp, } ret = 0; err_out_unlock: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); err_out: kfree(lrdp); return ret; @@ -4281,14 +4517,16 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, scp->device->hostdata, true); u8 *fs1p; u8 *fsp; + bool meta_data_locked = false; - sdeb_write_lock(sip); + if (sdebug_dev_is_zoned(devip) || scsi_debug_lbp()) { + sdeb_meta_write_lock(sip); + meta_data_locked = true; + } ret = check_device_access_params(scp, lba, num, true); - if (ret) { - sdeb_write_unlock(sip); - return ret; - } + if (ret) + goto out; if (unmap && scsi_debug_lbp()) { unmap_region(sip, lba, num); @@ -4299,6 +4537,7 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, /* if ndob then zero 1 logical block, else fetch 1 logical block */ fsp = sip->storep; fs1p = fsp + (block * lb_size); + sdeb_data_write_lock(sip); if (ndob) { memset(fs1p, 0, lb_size); ret = 0; @@ -4306,8 +4545,8 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, ret = fetch_to_dev_buffer(scp, fs1p, lb_size); if (-1 == ret) { - sdeb_write_unlock(sip); - return DID_ERROR << 16; + ret = DID_ERROR << 16; + goto out; } else if (sdebug_verbose && !ndob && (ret < lb_size)) sdev_printk(KERN_INFO, scp->device, "%s: %s: lb size=%u, IO sent=%d bytes\n", @@ -4324,10 +4563,12 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, /* If ZBC zone then bump its write pointer */ if (sdebug_dev_is_zoned(devip)) zbc_inc_wp(devip, lba, num); + sdeb_data_write_unlock(sip); + ret = 0; out: - sdeb_write_unlock(sip); - - return 0; + if (meta_data_locked) + sdeb_meta_write_unlock(sip); + return ret; } static int resp_write_same_10(struct scsi_cmnd *scp, @@ -4470,25 +4711,30 @@ static int resp_comp_write(struct scsi_cmnd *scp, return check_condition_result; } - sdeb_write_lock(sip); - ret = do_dout_fetch(scp, dnum, arr); if (ret == -1) { retval = DID_ERROR << 16; - goto cleanup; + goto cleanup_free; } else if (sdebug_verbose && (ret < (dnum * lb_size))) sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb " "indicated=%u, IO sent=%d bytes\n", my_name, dnum * lb_size, ret); + + sdeb_data_write_lock(sip); + sdeb_meta_write_lock(sip); if (!comp_write_worker(sip, lba, num, arr, false)) { mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0); retval = check_condition_result; - goto cleanup; + goto cleanup_unlock; } + + /* Cover sip->map_storep (which map_region()) sets with data lock */ if (scsi_debug_lbp()) map_region(sip, lba, num); -cleanup: - sdeb_write_unlock(sip); +cleanup_unlock: + sdeb_meta_write_unlock(sip); + sdeb_data_write_unlock(sip); +cleanup_free: kfree(arr); return retval; } @@ -4532,7 +4778,7 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) desc = (void *)&buf[8]; - sdeb_write_lock(sip); + sdeb_meta_write_lock(sip); for (i = 0 ; i < descriptors ; i++) { unsigned long long lba = get_unaligned_be64(&desc[i].lba); @@ -4548,7 +4794,7 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) ret = 0; out: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); kfree(buf); return ret; @@ -4706,12 +4952,13 @@ static int resp_pre_fetch(struct scsi_cmnd *scp, rest = block + nblks - sdebug_store_sectors; /* Try to bring the PRE-FETCH range into CPU's cache */ - sdeb_read_lock(sip); + sdeb_data_read_lock(sip); prefetch_range(fsp + (sdebug_sector_size * block), (nblks - rest) * sdebug_sector_size); if (rest) prefetch_range(fsp, rest * sdebug_sector_size); - sdeb_read_unlock(sip); + + sdeb_data_read_unlock(sip); fini: if (cmd[1] & 0x2) res = SDEG_RES_IMMED_MASK; @@ -4870,7 +5117,7 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) return check_condition_result; } /* Not changing store, so only need read access */ - sdeb_read_lock(sip); + sdeb_data_read_lock(sip); ret = do_dout_fetch(scp, a_num, arr); if (ret == -1) { @@ -4892,7 +5139,7 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) goto cleanup; } cleanup: - sdeb_read_unlock(sip); + sdeb_data_read_unlock(sip); kfree(arr); return ret; } @@ -4938,7 +5185,7 @@ static int resp_report_zones(struct scsi_cmnd *scp, return check_condition_result; } - sdeb_read_lock(sip); + sdeb_meta_read_lock(sip); desc = arr + 64; for (lba = zs_lba; lba < sdebug_capacity; @@ -5036,11 +5283,70 @@ static int resp_report_zones(struct scsi_cmnd *scp, ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len)); fini: - sdeb_read_unlock(sip); + sdeb_meta_read_unlock(sip); kfree(arr); return ret; } +static int resp_atomic_write(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + struct sdeb_store_info *sip; + u8 *cmd = scp->cmnd; + u16 boundary, len; + u64 lba, lba_tmp; + int ret; + + if (!scsi_debug_atomic_write()) { + mk_sense_invalid_opcode(scp); + return check_condition_result; + } + + sip = devip2sip(devip, true); + + lba = get_unaligned_be64(cmd + 2); + boundary = get_unaligned_be16(cmd + 10); + len = get_unaligned_be16(cmd + 12); + + lba_tmp = lba; + if (sdebug_atomic_wr_align && + do_div(lba_tmp, sdebug_atomic_wr_align)) { + /* Does not meet alignment requirement */ + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + return check_condition_result; + } + + if (sdebug_atomic_wr_gran && len % sdebug_atomic_wr_gran) { + /* Does not meet alignment requirement */ + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + return check_condition_result; + } + + if (boundary > 0) { + if (boundary > sdebug_atomic_wr_max_bndry) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1); + return check_condition_result; + } + + if (len > sdebug_atomic_wr_max_length_bndry) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1); + return check_condition_result; + } + } else { + if (len > sdebug_atomic_wr_max_length) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 12, -1); + return check_condition_result; + } + } + + ret = do_device_access(sip, scp, 0, lba, len, 0, true, true); + if (unlikely(ret == -1)) + return DID_ERROR << 16; + if (unlikely(ret != len * sdebug_sector_size)) + return DID_ERROR << 16; + return 0; +} + /* Logic transplanted from tcmu-runner, file_zbc.c */ static void zbc_open_all(struct sdebug_dev_info *devip) { @@ -5067,8 +5373,7 @@ static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) mk_sense_invalid_opcode(scp); return check_condition_result; } - - sdeb_write_lock(sip); + sdeb_meta_write_lock(sip); if (all) { /* Check if all closed zones can be open */ @@ -5117,7 +5422,7 @@ static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) zbc_open_zone(devip, zsp, true); fini: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); return res; } @@ -5144,7 +5449,7 @@ static int resp_close_zone(struct scsi_cmnd *scp, return check_condition_result; } - sdeb_write_lock(sip); + sdeb_meta_write_lock(sip); if (all) { zbc_close_all(devip); @@ -5173,7 +5478,7 @@ static int resp_close_zone(struct scsi_cmnd *scp, zbc_close_zone(devip, zsp); fini: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); return res; } @@ -5216,7 +5521,7 @@ static int resp_finish_zone(struct scsi_cmnd *scp, return check_condition_result; } - sdeb_write_lock(sip); + sdeb_meta_write_lock(sip); if (all) { zbc_finish_all(devip); @@ -5245,7 +5550,7 @@ static int resp_finish_zone(struct scsi_cmnd *scp, zbc_finish_zone(devip, zsp, true); fini: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); return res; } @@ -5296,7 +5601,7 @@ static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) return check_condition_result; } - sdeb_write_lock(sip); + sdeb_meta_write_lock(sip); if (all) { zbc_rwp_all(devip); @@ -5324,7 +5629,7 @@ static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) zbc_rwp_zone(devip, zsp); fini: - sdeb_write_unlock(sip); + sdeb_meta_write_unlock(sip); return res; } @@ -6288,6 +6593,7 @@ module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO); module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO); module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO); module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO); +module_param_named(atomic_wr, sdebug_atomic_wr, int, S_IRUGO); module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO); module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR); module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR); @@ -6322,6 +6628,11 @@ module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO); module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO); module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO); module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO); +module_param_named(atomic_wr_max_length, sdebug_atomic_wr_max_length, int, S_IRUGO); +module_param_named(atomic_wr_align, sdebug_atomic_wr_align, int, S_IRUGO); +module_param_named(atomic_wr_gran, sdebug_atomic_wr_gran, int, S_IRUGO); +module_param_named(atomic_wr_max_length_bndry, sdebug_atomic_wr_max_length_bndry, int, S_IRUGO); +module_param_named(atomic_wr_max_bndry, sdebug_atomic_wr_max_bndry, int, S_IRUGO); module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO); module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR); module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int, @@ -6365,6 +6676,7 @@ MODULE_PARM_DESC(lbprz, MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); +MODULE_PARM_DESC(atomic_write, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)"); MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method"); MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); @@ -6396,6 +6708,11 @@ MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)" MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)"); MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)"); +MODULE_PARM_DESC(atomic_wr_max_length, "max # of blocks can be atomically written in one cmd (def=8192)"); +MODULE_PARM_DESC(atomic_wr_align, "minimum alignment of atomic write in blocks (def=2)"); +MODULE_PARM_DESC(atomic_wr_gran, "minimum granularity of atomic write in blocks (def=2)"); +MODULE_PARM_DESC(atomic_wr_max_length_bndry, "max # of blocks can be atomically written in one cmd with boundary set (def=8192)"); +MODULE_PARM_DESC(atomic_wr_max_bndry, "max # boundaries per atomic write (def=128)"); MODULE_PARM_DESC(uuid_ctl, "1->use uuid for lu name, 0->don't, 2->all use same (def=0)"); MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)"); @@ -7567,6 +7884,7 @@ static int __init scsi_debug_init(void) return -EINVAL; } } + xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); if (want_store) { idx = sdebug_add_store(); @@ -7774,7 +8092,9 @@ static int sdebug_add_store(void) map_region(sip, 0, 2); } - rwlock_init(&sip->macc_lck); + rwlock_init(&sip->macc_data_lck); + rwlock_init(&sip->macc_meta_lck); + rwlock_init(&sip->macc_sector_lck); return (int)n_idx; err: sdebug_erase_store((int)n_idx, sip); |