summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorBernard Metzler <bmt@zurich.ibm.com>2019-08-22 17:07:41 +0200
committerDoug Ledford <dledford@redhat.com>2019-08-22 17:21:06 +0200
commitfab4f97e1fe33cf08e58c09cf9eee334857d9fe7 (patch)
tree52105f0deb2e0894921793eb24b81518334c4c8a /drivers/infiniband
parentRDMA/bnxt_re: Fix stack-out-of-bounds in bnxt_qplib_rcfw_send_message (diff)
downloadlinux-fab4f97e1fe33cf08e58c09cf9eee334857d9fe7.tar.xz
linux-fab4f97e1fe33cf08e58c09cf9eee334857d9fe7.zip
RDMA/siw: Fix SGL mapping issues
All user level and most in-kernel applications submit WQEs where the SG list entries are all of a single type. iSER in particular, however, will send us WQEs with mixed SG types: sge[0] = kernel buffer, sge[1] = PBL region. Check and set is_kva on each SG entry individually instead of assuming the first SGE type carries through to the last. This fixes iSER over siw. Fixes: b9be6f18cf9e ("rdma/siw: transmit path") Reported-by: Krishnamraju Eraparaju <krishna2@chelsio.com> Tested-by: Krishnamraju Eraparaju <krishna2@chelsio.com> Signed-off-by: Bernard Metzler <bmt@zurich.ibm.com> Link: https://lore.kernel.org/r/20190822150741.21871-1-bmt@zurich.ibm.com Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c37
1 files changed, 15 insertions, 22 deletions
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 43020d2040fc..42c63622c7bd 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -398,15 +398,13 @@ static int siw_0copy_tx(struct socket *s, struct page **page,
#define MAX_TRAILER (MPA_CRC_SIZE + 4)
-static void siw_unmap_pages(struct page **pages, int hdr_len, int num_maps)
+static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask)
{
- if (hdr_len) {
- ++pages;
- --num_maps;
- }
- while (num_maps-- > 0) {
- kunmap(*pages);
- pages++;
+ while (kmap_mask) {
+ if (kmap_mask & BIT(0))
+ kunmap(*pp);
+ pp++;
+ kmap_mask >>= 1;
}
}
@@ -437,6 +435,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx,
pbl_idx = c_tx->pbl_idx;
+ unsigned long kmap_mask = 0L;
if (c_tx->state == SIW_SEND_HDR) {
if (c_tx->use_sendpage) {
@@ -463,8 +462,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
if (!(tx_flags(wqe) & SIW_WQE_INLINE)) {
mem = wqe->mem[sge_idx];
- if (!mem->mem_obj)
- is_kva = 1;
+ is_kva = mem->mem_obj == NULL ? 1 : 0;
} else {
is_kva = 1;
}
@@ -500,12 +498,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
p = siw_get_upage(mem->umem,
sge->laddr + sge_off);
if (unlikely(!p)) {
- if (hdr_len)
- seg--;
- if (!c_tx->use_sendpage && seg) {
- siw_unmap_pages(page_array,
- hdr_len, seg);
- }
+ siw_unmap_pages(page_array, kmap_mask);
wqe->processed -= c_tx->bytes_unsent;
rv = -EFAULT;
goto done_crc;
@@ -515,6 +508,10 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
if (!c_tx->use_sendpage) {
iov[seg].iov_base = kmap(p) + fp_off;
iov[seg].iov_len = plen;
+
+ /* Remember for later kunmap() */
+ kmap_mask |= BIT(seg);
+
if (do_crc)
crypto_shash_update(
c_tx->mpa_crc_hd,
@@ -543,10 +540,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
if (++seg > (int)MAX_ARRAY) {
siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
- if (!is_kva && !c_tx->use_sendpage) {
- siw_unmap_pages(page_array, hdr_len,
- seg - 1);
- }
+ siw_unmap_pages(page_array, kmap_mask);
wqe->processed -= c_tx->bytes_unsent;
rv = -EMSGSIZE;
goto done_crc;
@@ -597,8 +591,7 @@ sge_done:
} else {
rv = kernel_sendmsg(s, &msg, iov, seg + 1,
hdr_len + data_len + trl_len);
- if (!is_kva)
- siw_unmap_pages(page_array, hdr_len, seg);
+ siw_unmap_pages(page_array, kmap_mask);
}
if (rv < (int)hdr_len) {
/* Not even complete hdr pushed or negative rv */