summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ipr.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2019-06-18 03:37:46 +0200
committerMartin K. Petersen <martin.petersen@oracle.com>2019-06-20 21:21:32 +0200
commitc71ae886d1321e74f524c7c023933cf87768915d (patch)
treecfca5d519bb2a2e731d6c084259302a29bf9b0be /drivers/scsi/ipr.c
parentscsi: mvumi: use sg helper to iterate over scatterlist (diff)
downloadlinux-c71ae886d1321e74f524c7c023933cf87768915d.tar.xz
linux-c71ae886d1321e74f524c7c023933cf87768915d.zip
scsi: ipr: use sg helper to iterate over scatterlist
Unlike the legacy I/O path, scsi-mq preallocates a large array to hold the scatterlist for each request. This static allocation can consume substantial amounts of memory on modern controllers which support a large number of concurrently outstanding requests. To facilitate a switch to a smaller static allocation combined with a dynamic allocation for requests that need it, we need to make sure all SCSI drivers handle chained scatterlists correctly. Convert remaining drivers that directly dereference the scatterlist array to using the iterator functions. [mkp: clarified commit message] Reviewed-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r--drivers/scsi/ipr.c29
1 files changed, 16 insertions, 13 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 6d053e220153..bf17540affbc 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3915,22 +3915,23 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
u8 *buffer, u32 len)
{
int bsize_elem, i, result = 0;
- struct scatterlist *scatterlist;
+ struct scatterlist *sg;
void *kaddr;
/* Determine the actual number of bytes per element */
bsize_elem = PAGE_SIZE * (1 << sglist->order);
- scatterlist = sglist->scatterlist;
+ sg = sglist->scatterlist;
- for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
- struct page *page = sg_page(&scatterlist[i]);
+ for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
+ buffer += bsize_elem) {
+ struct page *page = sg_page(sg);
kaddr = kmap(page);
memcpy(kaddr, buffer, bsize_elem);
kunmap(page);
- scatterlist[i].length = bsize_elem;
+ sg->length = bsize_elem;
if (result != 0) {
ipr_trace;
@@ -3939,13 +3940,13 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
}
if (len % bsize_elem) {
- struct page *page = sg_page(&scatterlist[i]);
+ struct page *page = sg_page(sg);
kaddr = kmap(page);
memcpy(kaddr, buffer, len % bsize_elem);
kunmap(page);
- scatterlist[i].length = len % bsize_elem;
+ sg->length = len % bsize_elem;
}
sglist->buffer_len = len;
@@ -3966,6 +3967,7 @@ static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
struct scatterlist *scatterlist = sglist->scatterlist;
+ struct scatterlist *sg;
int i;
ipr_cmd->dma_use_sg = sglist->num_dma_sg;
@@ -3974,10 +3976,10 @@ static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
ioarcb->ioadl_len =
cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
- for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
+ for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
- ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
- ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
+ ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
+ ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
}
ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
@@ -3997,6 +3999,7 @@ static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
struct scatterlist *scatterlist = sglist->scatterlist;
+ struct scatterlist *sg;
int i;
ipr_cmd->dma_use_sg = sglist->num_dma_sg;
@@ -4006,11 +4009,11 @@ static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
ioarcb->ioadl_len =
cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
- for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
+ for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
ioadl[i].flags_and_data_len =
- cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
+ cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
ioadl[i].address =
- cpu_to_be32(sg_dma_address(&scatterlist[i]));
+ cpu_to_be32(sg_dma_address(sg));
}
ioadl[i-1].flags_and_data_len |=