summaryrefslogtreecommitdiffstats
path: root/drivers/target/target_core_user.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 22:34:31 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 22:34:31 +0100
commit60f7c503d971a731ee3c4f884a9f2e80d476730d (patch)
tree30e23c822306b0e407f6218135feb5b2e847665d /drivers/target/target_core_user.c
parentMerge tag 'for-5.11/drivers-2020-12-14' of git://git.kernel.dk/linux-block (diff)
parentscsi: mpt3sas: Update driver version to 36.100.00.00 (diff)
downloadlinux-60f7c503d971a731ee3c4f884a9f2e80d476730d.tar.xz
linux-60f7c503d971a731ee3c4f884a9f2e80d476730d.zip
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This consists of the usual driver updates (ufs, qla2xxx, smartpqi, target, zfcp, fnic, mpt3sas, ibmvfc) plus a load of cleanups, a major power management rework and a load of assorted minor updates. There are a few core updates (formatting fixes being the big one) but nothing major this cycle" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (279 commits) scsi: mpt3sas: Update driver version to 36.100.00.00 scsi: mpt3sas: Handle trigger page after firmware update scsi: mpt3sas: Add persistent MPI trigger page scsi: mpt3sas: Add persistent SCSI sense trigger page scsi: mpt3sas: Add persistent Event trigger page scsi: mpt3sas: Add persistent Master trigger page scsi: mpt3sas: Add persistent trigger pages support scsi: mpt3sas: Sync time periodically between driver and firmware scsi: qla2xxx: Update version to 10.02.00.104-k scsi: qla2xxx: Fix device loss on 4G and older HBAs scsi: qla2xxx: If fcport is undergoing deletion complete I/O with retry scsi: qla2xxx: Fix the call trace for flush workqueue scsi: qla2xxx: Fix flash update in 28XX adapters on big endian machines scsi: qla2xxx: Handle aborts correctly for port undergoing deletion scsi: qla2xxx: Fix N2N and NVMe connect retry failure scsi: qla2xxx: Fix FW initialization error on big endian machines scsi: qla2xxx: Fix crash during driver load on big endian machines scsi: qla2xxx: Fix compilation issue in PPC systems scsi: qla2xxx: Don't check for fw_started while posting NVMe command scsi: qla2xxx: Tear down session if FW say it is down ...
Diffstat (limited to 'drivers/target/target_core_user.c')
-rw-r--r--drivers/target/target_core_user.c164
1 files changed, 75 insertions, 89 deletions
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 590e6d072228..6b171fff007b 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -586,14 +586,15 @@ static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd)
}
static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
- struct iovec **iov, int prev_dbi, int *remain)
+ struct iovec **iov, int prev_dbi, int len)
{
/* Get the next dbi */
int dbi = tcmu_cmd_get_dbi(cmd);
+
/* Do not add more than DATA_BLOCK_SIZE to iov */
- int len = min_t(int, DATA_BLOCK_SIZE, *remain);
+ if (len > DATA_BLOCK_SIZE)
+ len = DATA_BLOCK_SIZE;
- *remain -= len;
/*
* The following code will gather and map the blocks to the same iovec
* when the blocks are all next to each other.
@@ -618,8 +619,8 @@ static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
int dbi = -2;
/* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */
- while (data_length > 0)
- dbi = new_block_to_iov(udev, cmd, iov, dbi, &data_length);
+ for (; data_length > 0; data_length -= DATA_BLOCK_SIZE)
+ dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length);
}
static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
@@ -688,67 +689,83 @@ static inline size_t head_to_end(size_t head, size_t size)
#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
-static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
- struct iovec **iov)
+#define TCMU_SG_TO_DATA_AREA 1
+#define TCMU_DATA_AREA_TO_SG 2
+
+static inline void tcmu_copy_data(struct tcmu_dev *udev,
+ struct tcmu_cmd *tcmu_cmd, uint32_t direction,
+ struct scatterlist *sg, unsigned int sg_nents,
+ struct iovec **iov, size_t data_len)
{
- struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
/* start value of dbi + 1 must not be a valid dbi */
- int i, dbi = -2;
- int block_remaining = 0;
- int data_len = se_cmd->data_length;
- void *from, *to = NULL;
- size_t copy_bytes, offset;
- struct scatterlist *sg;
- struct page *page = NULL;
-
- for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
- int sg_remaining = sg->length;
- from = kmap_atomic(sg_page(sg)) + sg->offset;
- while (sg_remaining > 0) {
- if (block_remaining == 0) {
- if (to) {
- flush_dcache_page(page);
- kunmap_atomic(to);
- }
-
- /* get next dbi and add to IOVs */
- dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi,
- &data_len);
- page = tcmu_get_block_page(udev, dbi);
- to = kmap_atomic(page);
- block_remaining = DATA_BLOCK_SIZE;
- }
+ int dbi = -2;
+ size_t block_remaining, cp_len;
+ struct sg_mapping_iter sg_iter;
+ unsigned int sg_flags;
+ struct page *page;
+ void *data_page_start, *data_addr;
- copy_bytes = min_t(size_t, sg_remaining,
- block_remaining);
- offset = DATA_BLOCK_SIZE - block_remaining;
- memcpy(to + offset, from + sg->length - sg_remaining,
- copy_bytes);
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ sg_flags = SG_MITER_ATOMIC | SG_MITER_FROM_SG;
+ else
+ sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
+ sg_miter_start(&sg_iter, sg, sg_nents, sg_flags);
- sg_remaining -= copy_bytes;
- block_remaining -= copy_bytes;
+ while (data_len) {
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi,
+ data_len);
+ else
+ dbi = tcmu_cmd_get_dbi(tcmu_cmd);
+ page = tcmu_get_block_page(udev, dbi);
+ if (direction == TCMU_DATA_AREA_TO_SG)
+ flush_dcache_page(page);
+ data_page_start = kmap_atomic(page);
+ block_remaining = DATA_BLOCK_SIZE;
+
+ while (block_remaining && data_len) {
+ if (!sg_miter_next(&sg_iter)) {
+ /* set length to 0 to abort outer loop */
+ data_len = 0;
+ pr_debug("tcmu_move_data: aborting data copy due to exhausted sg_list\n");
+ break;
+ }
+ cp_len = min3(sg_iter.length, block_remaining, data_len);
+
+ data_addr = data_page_start +
+ DATA_BLOCK_SIZE - block_remaining;
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ memcpy(data_addr, sg_iter.addr, cp_len);
+ else
+ memcpy(sg_iter.addr, data_addr, cp_len);
+
+ data_len -= cp_len;
+ block_remaining -= cp_len;
+ sg_iter.consumed = cp_len;
}
- kunmap_atomic(from - sg->offset);
- }
+ sg_miter_stop(&sg_iter);
- if (to) {
- flush_dcache_page(page);
- kunmap_atomic(to);
+ kunmap_atomic(data_page_start);
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ flush_dcache_page(page);
}
}
-static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
+ struct iovec **iov)
+{
+ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+
+ tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg,
+ se_cmd->t_data_nents, iov, se_cmd->data_length);
+}
+
+static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
bool bidi, uint32_t read_len)
{
- struct se_cmd *se_cmd = cmd->se_cmd;
- int i, dbi;
- int block_remaining = 0;
- void *from = NULL, *to;
- size_t copy_bytes, offset;
- struct scatterlist *sg, *data_sg;
- struct page *page;
+ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+ struct scatterlist *data_sg;
unsigned int data_nents;
- uint32_t count = 0;
if (!bidi) {
data_sg = se_cmd->t_data_sg;
@@ -759,46 +776,15 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
* buffer blocks, and before gathering the Data-In buffer
* the Data-Out buffer blocks should be skipped.
*/
- count = cmd->dbi_cnt - cmd->dbi_bidi_cnt;
+ tcmu_cmd_set_dbi_cur(tcmu_cmd,
+ tcmu_cmd->dbi_cnt - tcmu_cmd->dbi_bidi_cnt);
data_sg = se_cmd->t_bidi_data_sg;
data_nents = se_cmd->t_bidi_data_nents;
}
- tcmu_cmd_set_dbi_cur(cmd, count);
-
- for_each_sg(data_sg, sg, data_nents, i) {
- int sg_remaining = sg->length;
- to = kmap_atomic(sg_page(sg)) + sg->offset;
- while (sg_remaining > 0 && read_len > 0) {
- if (block_remaining == 0) {
- if (from)
- kunmap_atomic(from);
-
- block_remaining = DATA_BLOCK_SIZE;
- dbi = tcmu_cmd_get_dbi(cmd);
- page = tcmu_get_block_page(udev, dbi);
- from = kmap_atomic(page);
- flush_dcache_page(page);
- }
- copy_bytes = min_t(size_t, sg_remaining,
- block_remaining);
- if (read_len < copy_bytes)
- copy_bytes = read_len;
- offset = DATA_BLOCK_SIZE - block_remaining;
- memcpy(to + sg->length - sg_remaining, from + offset,
- copy_bytes);
-
- sg_remaining -= copy_bytes;
- block_remaining -= copy_bytes;
- read_len -= copy_bytes;
- }
- kunmap_atomic(to - sg->offset);
- if (read_len == 0)
- break;
- }
- if (from)
- kunmap_atomic(from);
+ tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg,
+ data_nents, NULL, read_len);
}
static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)